diff --git a/CHANGES.rst b/CHANGES.rst
index 6e5807da72..e8e3b3eeca 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -58,7 +58,7 @@ RELEASE FILE IN doc/releases
files that have frametype None (this prevent ``run_pypeit`` to crash)
- Added a function ``check_spectrograph()`` (currently only defined for LRIS),
that checks (during ``pypeit_setup``) if the selected spectrograph is the
- corrected one for the data used.
+ corrected one for the data used.
1.13.0 (2 June 2023)
diff --git a/doc/coadd3d.rst b/doc/coadd3d.rst
index d0b2454297..03640ca5e5 100644
--- a/doc/coadd3d.rst
+++ b/doc/coadd3d.rst
@@ -155,13 +155,13 @@ Sky Subtraction
The default behaviour of PypeIt is to subtract the model sky that is
derived from the science frame during the reduction. If you would like
-to turn off sky subtraction, set the following keyword argument:
+to turn off sky subtraction, set the following keyword argument (all lowercase):
.. code-block:: ini
[reduce]
[[cube]]
- skysub_frame = None
+ skysub_frame = none
If you would like to use a dedicated sky frame for sky subtraction
that is separate from the science frame, then you need to provide
@@ -330,7 +330,7 @@ plot a wavelength slice of the cube:
from matplotlib import pyplot as plt
from astropy.visualization import ZScaleInterval, ImageNormalize
- from pypeit.core.datacube import DataCube
+ from pypeit.coadd3d import DataCube
filename = "datacube.fits"
cube = DataCube.from_file(filename)
diff --git a/doc/include/spectrographs_table.rst b/doc/include/spectrographs_table.rst
index a1f4674edc..77970d24d0 100644
--- a/doc/include/spectrographs_table.rst
+++ b/doc/include/spectrographs_table.rst
@@ -18,7 +18,7 @@ jwst_nirspec :class:`~pypeit.spectrographs.jwst_nirspec.JWSTNIRSpec
keck_deimos :class:`~pypeit.spectrographs.keck_deimos.KeckDEIMOSSpectrograph` KECK DEIMOS `Link `__ MultiSlit True True Supported gratings: 600ZD, 830G, 900ZD, 1200B, 1200G; see :doc:`deimos`
keck_esi :class:`~pypeit.spectrographs.keck_esi.KeckESISpectrograph` KECK ESI Echelle True False
keck_hires :class:`~pypeit.spectrographs.keck_hires.KECKHIRESSpectrograph` KECK HIRES `Link `__ Echelle False False
-keck_kcrm :class:`~pypeit.spectrographs.keck_kcwi.KeckKCRMSpectrograph` KECK KCRM `Link `__ IFU True False Supported setups: RM1, RM2, RH3; see :doc:`keck_kcwi`
+keck_kcrm :class:`~pypeit.spectrographs.keck_kcwi.KeckKCRMSpectrograph` KECK KCRM `Link `__ IFU True False Supported setups: RL, RM1, RM2, RH3; see :doc:`keck_kcwi`
keck_kcwi :class:`~pypeit.spectrographs.keck_kcwi.KeckKCWISpectrograph` KECK KCWI `Link `__ IFU True False Supported setups: BL, BM, BH2; see :doc:`keck_kcwi`
keck_lris_blue :class:`~pypeit.spectrographs.keck_lris.KeckLRISBSpectrograph` KECK LRISb `Link `__ MultiSlit True False Blue camera; Current FITS file format; used from May 2009, see :doc:`lris`
keck_lris_blue_orig :class:`~pypeit.spectrographs.keck_lris.KeckLRISBOrigSpectrograph` KECK LRISb `Link `__ MultiSlit True False Blue camera; Original FITS file format; used until April 2009; see :doc:`lris`
diff --git a/doc/pypeit_par.rst b/doc/pypeit_par.rst
index be70137ce7..50f606bdbe 100644
--- a/doc/pypeit_par.rst
+++ b/doc/pypeit_par.rst
@@ -578,13 +578,13 @@ FlexurePar Keywords
Class Instantiation: :class:`~pypeit.par.pypeitpar.FlexurePar`
=================== ========== ======================================================== ==================== ======================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
-Key Type Options Default Description
+Key Type Options Default Description
=================== ========== ======================================================== ==================== ======================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
``excessive_shift`` str ``crash``, ``set_to_zero``, ``continue``, ``use_median`` ``use_median`` Behavior when the measured spectral flexure shift is larger than ``spec_maxshift``. The options are: 'crash' - Raise an error and halt the data reduction; 'set_to_zero' - Set the flexure shift to zero and continue with the reduction; 'continue' - Use the large flexure value whilst issuing a warning; and 'use_median' - Use the median flexure shift among all the objects in the same slit (if more than one object is detected) or among all the other slits; if not available, the flexure correction will not be applied.
-``multi_min_SN`` int, float .. 1 Minimum S/N for analyzing sky spectrum for flexure
-``spec_maxshift`` int .. 20 Maximum allowed spectral flexure shift in pixels.
-``spec_method`` str ``boxcar``, ``slitcen``, ``skip`` ``skip`` Method used to correct for flexure. Use skip for no correction. If slitcen is used, the flexure correction is performed before the extraction of objects (not recommended). Options are: None, boxcar, slitcen, skip
-``spectrum`` str .. ``paranal_sky.fits`` Archive sky spectrum to be used for the flexure correction.
+``multi_min_SN`` int, float .. 1 Minimum S/N for analyzing sky spectrum for flexure
+``spec_maxshift`` int .. 20 Maximum allowed spectral flexure shift in pixels.
+``spec_method`` str ``boxcar``, ``slitcen``, ``skip`` ``skip`` Method used to correct for flexure. Use skip for no correction. If slitcen is used, the flexure correction is performed before the extraction of objects (not recommended). Options are: None, boxcar, slitcen, skip
+``spectrum`` str .. ``paranal_sky.fits`` Archive sky spectrum to be used for the flexure correction.
=================== ========== ======================================================== ==================== ======================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
@@ -598,12 +598,12 @@ FluxCalibratePar Keywords
Class Instantiation: :class:`~pypeit.par.pypeitpar.FluxCalibratePar`
===================== ==== ======= =========== ============================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
-Key Type Options Default Description
+Key Type Options Default Description
===================== ==== ======= =========== ============================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
``extinct_correct`` bool .. .. The default behavior for atmospheric extinction corrections is that if UVIS algorithm is used (which does not correct for telluric absorption) than an atmospheric extinction model is used to correct for extinction below 10,000A, whereas if the IR algorithm is used, then no extinction correction is applied since the atmosphere is modeled directly. To follow these defaults based on the algorithm this parameter should be set to ``extinct_correct=None``. If instead this parameter is set, this overide this default behavior. In other words, it will force an extinction correction if ``extinct_correct=True``, and will not perform an extinction correction if ``extinct_correct=False``.
-``extinct_file`` str .. ``closest`` If ``extinct_file='closest'`` the code will select the PypeIt-included extinction file for the closest observatory (within 5 deg, geographic coordinates) to the telescope identified in ``std_file`` (see :ref:`extinction_correction` for the list of currently included files). If constructing a sesitivity function for a telescope not within 5 deg of a listed observatory, this parameter may be set to the name of one of the listed extinction files. Alternatively, a custom extinction file may be installed in the PypeIt cache using the ``pypeit_install_extinctfile`` script; this parameter may then be set to the name of the custom extinction file.
-``extrap_sens`` bool .. False If False (default), the code will crash if one tries to use sensfunc at wavelengths outside its defined domain. By changing the par['sensfunc']['extrap_blu'] and par['sensfunc']['extrap_red'] this domain can be extended. If True the code will blindly extrapolate.
-``use_archived_sens`` bool .. False Use an archived sensfunc to flux calibration
+``extinct_file`` str .. ``closest`` If ``extinct_file='closest'`` the code will select the PypeIt-included extinction file for the closest observatory (within 5 deg, geographic coordinates) to the telescope identified in ``std_file`` (see :ref:`extinction_correction` for the list of currently included files). If constructing a sesitivity function for a telescope not within 5 deg of a listed observatory, this parameter may be set to the name of one of the listed extinction files. Alternatively, a custom extinction file may be installed in the PypeIt cache using the ``pypeit_install_extinctfile`` script; this parameter may then be set to the name of the custom extinction file.
+``extrap_sens`` bool .. False If False (default), the code will crash if one tries to use sensfunc at wavelengths outside its defined domain. By changing the par['sensfunc']['extrap_blu'] and par['sensfunc']['extrap_red'] this domain can be extended. If True the code will blindly extrapolate.
+``use_archived_sens`` bool .. False Use an archived sensfunc to flux calibration
===================== ==== ======= =========== ============================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
@@ -643,13 +643,13 @@ ReducePar Keywords
Class Instantiation: :class:`~pypeit.par.pypeitpar.ReducePar`
============== ============================================ ======= ========================= =================================================================================
-Key Type Options Default Description
+Key Type Options Default Description
============== ============================================ ======= ========================= =================================================================================
-``cube`` :class:`~pypeit.par.pypeitpar.CubePar` .. `CubePar Keywords`_ Parameters for cube generation algorithms
-``extraction`` :class:`~pypeit.par.pypeitpar.ExtractionPar` .. `ExtractionPar Keywords`_ Parameters for extraction algorithms
-``findobj`` :class:`~pypeit.par.pypeitpar.FindObjPar` .. `FindObjPar Keywords`_ Parameters for the find object and tracing algorithms
-``skysub`` :class:`~pypeit.par.pypeitpar.SkySubPar` .. `SkySubPar Keywords`_ Parameters for sky subtraction algorithms
-``slitmask`` :class:`~pypeit.par.pypeitpar.SlitMaskPar` .. `SlitMaskPar Keywords`_ Parameters for slitmask
+``cube`` :class:`~pypeit.par.pypeitpar.CubePar` .. `CubePar Keywords`_ Parameters for cube generation algorithms
+``extraction`` :class:`~pypeit.par.pypeitpar.ExtractionPar` .. `ExtractionPar Keywords`_ Parameters for extraction algorithms
+``findobj`` :class:`~pypeit.par.pypeitpar.FindObjPar` .. `FindObjPar Keywords`_ Parameters for the find object and tracing algorithms
+``skysub`` :class:`~pypeit.par.pypeitpar.SkySubPar` .. `SkySubPar Keywords`_ Parameters for sky subtraction algorithms
+``slitmask`` :class:`~pypeit.par.pypeitpar.SlitMaskPar` .. `SlitMaskPar Keywords`_ Parameters for slitmask
``trim_edge`` list .. 3, 3 Trim the slit by this number of pixels left/right when performing sky subtraction
============== ============================================ ======= ========================= =================================================================================
@@ -664,32 +664,32 @@ CubePar Keywords
Class Instantiation: :class:`~pypeit.par.pypeitpar.CubePar`
==================== ===== ======= ============ ===========================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
-Key Type Options Default Description
+Key Type Options Default Description
==================== ===== ======= ============ ===========================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
-``align`` bool .. False If set to True, the input frames will be spatially aligned by cross-correlating the whitelight images with either a reference image (see ``reference_image``) or the whitelight image that is generated using the first spec2d listed in the coadd3d file. Alternatively, the user can specify the offsets (i.e. Delta RA x cos(dec) and Delta Dec, both in arcsec) in the spec2d block of the coadd3d file. See the documentation for examples of this usage.
-``astrometric`` bool .. True If true, an astrometric correction will be applied using the alignment frames.
-``combine`` bool .. False If set to True, the input frames will be combined. Otherwise, a separate datacube will be generated for each input spec2d file, and will be saved as a spec3d file.
-``dec_max`` float .. .. Maximum DEC to use when generating the WCS. If None, the default is maximum DEC based on the WCS of all spaxels. Units should be degrees.
-``dec_min`` float .. .. Minimum DEC to use when generating the WCS. If None, the default is minimum DEC based on the WCS of all spaxels. Units should be degrees.
-``grating_corr`` bool .. True This option performs a small correction for the relative blaze function of all input frames that have (even slightly) different grating angles, or if you are flux calibrating your science data with a standard star that was observed with a slightly different setup.
+``align`` bool .. False If set to True, the input frames will be spatially aligned by cross-correlating the whitelight images with either a reference image (see ``reference_image``) or the whitelight image that is generated using the first spec2d listed in the coadd3d file. Alternatively, the user can specify the offsets (i.e. Delta RA x cos(dec) and Delta Dec, both in arcsec) in the spec2d block of the coadd3d file. See the documentation for examples of this usage.
+``astrometric`` bool .. True If true, an astrometric correction will be applied using the alignment frames.
+``combine`` bool .. False If set to True, the input frames will be combined. Otherwise, a separate datacube will be generated for each input spec2d file, and will be saved as a spec3d file.
+``dec_max`` float .. .. Maximum DEC to use when generating the WCS. If None, the default is maximum DEC based on the WCS of all spaxels. Units should be degrees.
+``dec_min`` float .. .. Minimum DEC to use when generating the WCS. If None, the default is minimum DEC based on the WCS of all spaxels. Units should be degrees.
+``grating_corr`` bool .. True This option performs a small correction for the relative blaze function of all input frames that have (even slightly) different grating angles, or if you are flux calibrating your science data with a standard star that was observed with a slightly different setup.
``method`` str .. ``subpixel`` What method should be used to generate the datacube. There are currently two options: (1) "subpixel" (default) - this algorithm divides each pixel in the spec2d frames into subpixels, and assigns each subpixel to a voxel of the datacube. Flux is conserved, but voxels are correlated, and the error spectrum does not account for covariance between adjacent voxels. See also, spec_subpixel and spat_subpixel. (2) "NGP" (nearest grid point) - this algorithm is effectively a 3D histogram. Flux is conserved, voxels are not correlated, however this option suffers the same downsides as any histogram; the choice of bin sizes can change how the datacube appears. This algorithm takes each pixel on the spec2d frame and puts the flux of this pixel into one voxel in the datacube. Depending on the binning used, some voxels may be empty (zero flux) while a neighboring voxel might contain the flux from two spec2d pixels. Note that all spec2d pixels that contribute to the same voxel are inverse variance weighted (e.g. if two pixels have the same variance, the voxel would be assigned the average flux of the two pixels).
-``output_filename`` str .. .. If combining multiple frames, this string sets the output filename of the combined datacube. If combine=False, the output filenames will be prefixed with ``spec3d_*``
-``ra_max`` float .. .. Maximum RA to use when generating the WCS. If None, the default is maximum RA based on the WCS of all spaxels. Units should be degrees.
-``ra_min`` float .. .. Minimum RA to use when generating the WCS. If None, the default is minimum RA based on the WCS of all spaxels. Units should be degrees.
-``reference_image`` str .. .. White light image of a previously combined datacube. The white light image will be used as a reference when calculating the offsets of the input spec2d files. Ideally, the reference image should have the same shape as the data to be combined (i.e. set the ra_min, ra_max etc. params so they are identical to the reference image).
-``relative_weights`` bool .. False If set to True, the combined frames will use a relative weighting scheme. This only works well if there is a common continuum source in the field of view of all input observations, and is generally only required if high relative precision is desired.
-``save_whitelight`` bool .. False Save a white light image of the combined datacube. The output filename will be given by the "output_filename" variable with a suffix "_whitelight". Note that the white light image collapses the flux along the wavelength axis, so some spaxels in the 2D white light image may have different wavelength ranges. To set the wavelength range, use the "whitelight_range" parameter. If combine=False, the individual spec3d files will have a suffix "_whitelight".
-``scale_corr`` str .. .. This option performs a small correction for the relative spectral illumination scale of different spec2D files. Specify the relative path+file to the spec2D file that you would like to use for the relative scaling. If you want to perform this correction, it is best to use the spec2d file with the highest S/N sky spectrum. You should choose the same frame for both the standards and science frames.
-``skysub_frame`` str .. ``image`` Set the sky subtraction to be implemented. The default behaviour is to subtract the sky using the model that is derived from each individual image (i.e. set this parameter to "image"). To turn off sky subtraction completely, set this parameter to "none" (all lowercase). Finally, if you want to use a different frame for the sky subtraction, specify the relative path+file to the spec2D file that you would like to use for the sky subtraction. The model fit to the sky of the specified frame will be used. Note, the sky and science frames do not need to have the same exposure time; the sky model will be scaled to the science frame based on the relative exposure time.
-``slit_spec`` bool .. True If the data use slits in one spatial direction, set this to True. If the data uses fibres for all spaxels, set this to False.
-``spat_subpixel`` int .. 5 When method=subpixel, spat_subpixel sets the subpixellation scale of each detector pixel in the spatial direction. The total number of subpixels in each pixel is given by spec_subpixel x spat_subpixel. The default option is to divide each spec2d pixel into 25 subpixels during datacube creation. See also, spec_subpixel.
-``spatial_delta`` float .. .. The spatial size of each spaxel to use when generating the WCS (in arcsec). If None, the default is set by the spectrograph file.
-``spec_subpixel`` int .. 5 When method=subpixel, spec_subpixel sets the subpixellation scale of each detector pixel in the spectral direction. The total number of subpixels in each pixel is given by spec_subpixel x spat_subpixel. The default option is to divide each spec2d pixel into 25 subpixels during datacube creation. See also, spat_subpixel.
-``standard_cube`` str .. .. Filename of a standard star datacube. This cube will be used to correct the relative scales of the slits, and to flux calibrate the science datacube.
-``wave_delta`` float .. .. The wavelength step to use when generating the WCS (in Angstroms). If None, the default is set by the wavelength solution.
-``wave_max`` float .. .. Maximum wavelength to use when generating the WCS. If None, the default is maximum wavelength based on the WCS of all spaxels. Units should be Angstroms.
-``wave_min`` float .. .. Minimum wavelength to use when generating the WCS. If None, the default is minimum wavelength based on the WCS of all spaxels. Units should be Angstroms.
-``whitelight_range`` list .. None, None A two element list specifying the wavelength range over which to generate the white light image. The first (second) element is the minimum (maximum) wavelength to use. If either of these elements are None, PypeIt will automatically use a wavelength range that ensures all spaxels have the same wavelength coverage. Note, if you are using a reference_image to align all frames, it is preferable to use the same white light wavelength range for all white light images. For example, you may wish to use an emission line map to register two frames.
+``output_filename`` str .. .. If combining multiple frames, this string sets the output filename of the combined datacube. If combine=False, the output filenames will be prefixed with ``spec3d_*``
+``ra_max`` float .. .. Maximum RA to use when generating the WCS. If None, the default is maximum RA based on the WCS of all spaxels. Units should be degrees.
+``ra_min`` float .. .. Minimum RA to use when generating the WCS. If None, the default is minimum RA based on the WCS of all spaxels. Units should be degrees.
+``reference_image`` str .. .. White light image of a previously combined datacube. The white light image will be used as a reference when calculating the offsets of the input spec2d files. Ideally, the reference image should have the same shape as the data to be combined (i.e. set the ra_min, ra_max etc. params so they are identical to the reference image).
+``relative_weights`` bool .. False If set to True, the combined frames will use a relative weighting scheme. This only works well if there is a common continuum source in the field of view of all input observations, and is generally only required if high relative precision is desired.
+``save_whitelight`` bool .. False Save a white light image of the combined datacube. The output filename will be given by the "output_filename" variable with a suffix "_whitelight". Note that the white light image collapses the flux along the wavelength axis, so some spaxels in the 2D white light image may have different wavelength ranges. To set the wavelength range, use the "whitelight_range" parameter. If combine=False, the individual spec3d files will have a suffix "_whitelight".
+``scale_corr`` str .. .. This option performs a small correction for the relative spectral illumination scale of different spec2D files. Specify the relative path+file to the spec2D file that you would like to use for the relative scaling. If you want to perform this correction, it is best to use the spec2d file with the highest S/N sky spectrum. You should choose the same frame for both the standards and science frames.
+``skysub_frame`` str .. ``image`` Set the sky subtraction to be implemented. The default behaviour is to subtract the sky using the model that is derived from each individual image (i.e. set this parameter to "image"). To turn off sky subtraction completely, set this parameter to "none" (all lowercase). Finally, if you want to use a different frame for the sky subtraction, specify the relative path+file to the spec2D file that you would like to use for the sky subtraction. The model fit to the sky of the specified frame will be used. Note, the sky and science frames do not need to have the same exposure time; the sky model will be scaled to the science frame based on the relative exposure time.
+``slit_spec`` bool .. True If the data use slits in one spatial direction, set this to True. If the data uses fibres for all spaxels, set this to False.
+``spat_subpixel`` int .. 5 When method=subpixel, spat_subpixel sets the subpixellation scale of each detector pixel in the spatial direction. The total number of subpixels in each pixel is given by spec_subpixel x spat_subpixel. The default option is to divide each spec2d pixel into 25 subpixels during datacube creation. See also, spec_subpixel.
+``spatial_delta`` float .. .. The spatial size of each spaxel to use when generating the WCS (in arcsec). If None, the default is set by the spectrograph file.
+``spec_subpixel`` int .. 5 When method=subpixel, spec_subpixel sets the subpixellation scale of each detector pixel in the spectral direction. The total number of subpixels in each pixel is given by spec_subpixel x spat_subpixel. The default option is to divide each spec2d pixel into 25 subpixels during datacube creation. See also, spat_subpixel.
+``standard_cube`` str .. .. Filename of a standard star datacube. This cube will be used to correct the relative scales of the slits, and to flux calibrate the science datacube.
+``wave_delta`` float .. .. The wavelength step to use when generating the WCS (in Angstroms). If None, the default is set by the wavelength solution.
+``wave_max`` float .. .. Maximum wavelength to use when generating the WCS. If None, the default is maximum wavelength based on the WCS of all spaxels. Units should be Angstroms.
+``wave_min`` float .. .. Minimum wavelength to use when generating the WCS. If None, the default is minimum wavelength based on the WCS of all spaxels. Units should be Angstroms.
+``whitelight_range`` list .. None, None A two element list specifying the wavelength range over which to generate the white light image. The first (second) element is the minimum (maximum) wavelength to use. If either of these elements are None, PypeIt will automatically use a wavelength range that ensures all spaxels have the same wavelength coverage. Note, if you are using a reference_image to align all frames, it is preferable to use the same white light wavelength range for all white light images. For example, you may wish to use an emission line map to register two frames.
==================== ===== ======= ============ ===========================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
@@ -703,17 +703,17 @@ ExtractionPar Keywords
Class Instantiation: :class:`~pypeit.par.pypeitpar.ExtractionPar`
==================== ========== ======= ======= =============================================================================================================================================================================================================================================================================================
-Key Type Options Default Description
+Key Type Options Default Description
==================== ========== ======= ======= =============================================================================================================================================================================================================================================================================================
-``boxcar_radius`` int, float .. 1.5 Boxcar radius in arcseconds used for boxcar extraction
+``boxcar_radius`` int, float .. 1.5 Boxcar radius in arcseconds used for boxcar extraction
``model_full_slit`` bool .. False If True local sky subtraction will be performed on the entire slit. If False, local sky subtraction will be applied to only a restricted region around each object. This should be set to True for either multislit observations using narrow slits or echelle observations with narrow slits
-``return_negative`` bool .. False If ``True`` the negative traces will be extracted and saved to disk
-``skip_extraction`` bool .. False Do not perform an object extraction
-``skip_optimal`` bool .. False Perform boxcar extraction only (i.e. skip Optimal and local skysub)
-``sn_gauss`` int, float .. 4.0 S/N threshold for performing the more sophisticated optimal extraction which performs a b-spline fit to the object profile. For S/N < sn_gauss the code will simply optimal extractwith a Gaussian with FWHM determined from the object finding.
-``std_prof_nsigma`` float .. 30.0 prof_nsigma parameter for Standard star extraction. Prevents undesired rejection. NOTE: Not consumed by the code at present.
-``use_2dmodel_mask`` bool .. True Mask pixels rejected during profile fitting when extracting.Turning this off may help with bright emission lines.
-``use_user_fwhm`` bool .. False Boolean indicating if PypeIt should use the FWHM provided by the user (``find_fwhm`` in `FindObjPar`) for the optimal extraction. If this parameter is ``False`` (default), PypeIt estimates the FWHM for each detected object, and uses ``find_fwhm`` as initial guess.
+``return_negative`` bool .. False If ``True`` the negative traces will be extracted and saved to disk
+``skip_extraction`` bool .. False Do not perform an object extraction
+``skip_optimal`` bool .. False Perform boxcar extraction only (i.e. skip Optimal and local skysub)
+``sn_gauss`` int, float .. 4.0 S/N threshold for performing the more sophisticated optimal extraction which performs a b-spline fit to the object profile. For S/N < sn_gauss the code will simply optimal extractwith a Gaussian with FWHM determined from the object finding.
+``std_prof_nsigma`` float .. 30.0 prof_nsigma parameter for Standard star extraction. Prevents undesired rejection. NOTE: Not consumed by the code at present.
+``use_2dmodel_mask`` bool .. True Mask pixels rejected during profile fitting when extracting.Turning this off may help with bright emission lines.
+``use_user_fwhm`` bool .. False Boolean indicating if PypeIt should use the FWHM provided by the user (``find_fwhm`` in `FindObjPar`) for the optimal extraction. If this parameter is ``False`` (default), PypeIt estimates the FWHM for each detected object, and uses ``find_fwhm`` as initial guess.
==================== ========== ======= ======= =============================================================================================================================================================================================================================================================================================
@@ -727,25 +727,25 @@ FindObjPar Keywords
Class Instantiation: :class:`~pypeit.par.pypeitpar.FindObjPar`
=========================== ========== ======= ======= =============================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
-Key Type Options Default Description
+Key Type Options Default Description
=========================== ========== ======= ======= =============================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
-``ech_find_max_snr`` int, float .. 1.0 Criteria for keeping echelle objects. They must either have a maximum S/N across all the orders greater than this value or satisfy the min_snr criteria described by the min_snr parameters. If maxnumber is set (see above) then these criteria will be applied but only the maxnumber highest (median) S/N ratio objects will be kept.
-``ech_find_min_snr`` int, float .. 0.3 Criteria for keeping echelle objects. They must either have a maximum S/N across all the orders greater than ech_find_max_snr, value or they must have S/N > ech_find_min_snr on >= ech_find_nabove_min_snr orders. If maxnumber is set (see above) then these criteria will be applied but only the maxnumber highest (median) S/N ratio objects will be kept.
-``ech_find_nabove_min_snr`` int .. 2 Criteria for keeping echelle objects. They must either have a maximum S/N across all the orders greater than ech_find_max_snr, value or they must have S/N > ech_find_min_snr on >= ech_find_nabove_min_snr orders. If maxnumber is set (see above) then these criteria will be applied but only the maxnumber highest (median) S/N ratio objects will be kept.
-``find_extrap_npoly`` int .. 3 Polynomial order used for trace extrapolation
-``find_fwhm`` int, float .. 5.0 Indicates roughly the fwhm of objects in pixels for object finding
-``find_maxdev`` int, float .. 2.0 Maximum deviation of pixels from polynomial fit to trace used to reject bad pixels in trace fitting.
-``find_min_max`` list .. .. It defines the minimum and maximum of your object in pixels in the spectral direction on the detector. It only used for object finding. This parameter is helpful if your object only has emission lines or at high redshift and the trace only shows in part of the detector.
-``find_negative`` bool .. .. Identify negative objects in object finding for spectra that are differenced. This is used to manually override the default behavior in PypeIt for object finding by setting this parameter to something other than None The default behavior is that PypeIt will search for negative object traces if background frames are present in the PypeIt file that are classified as "science" (i.e. via pypeit_setup -b, and setting bkg_id in the PypeIt file). If background frames are present that are classified as "sky", then PypeIt will NOT search for negative object traces. If one wishes to explicitly override this default behavior, set this parameter to True to find negative objects or False to ignore them.
-``find_trim_edge`` list .. 5, 5 Trim the slit by this number of pixels left/right before finding objects
+``ech_find_max_snr`` int, float .. 1.0 Criteria for keeping echelle objects. They must either have a maximum S/N across all the orders greater than this value or satisfy the min_snr criteria described by the min_snr parameters. If maxnumber is set (see above) then these criteria will be applied but only the maxnumber highest (median) S/N ratio objects will be kept.
+``ech_find_min_snr`` int, float .. 0.3 Criteria for keeping echelle objects. They must either have a maximum S/N across all the orders greater than ech_find_max_snr, value or they must have S/N > ech_find_min_snr on >= ech_find_nabove_min_snr orders. If maxnumber is set (see above) then these criteria will be applied but only the maxnumber highest (median) S/N ratio objects will be kept.
+``ech_find_nabove_min_snr`` int .. 2 Criteria for keeping echelle objects. They must either have a maximum S/N across all the orders greater than ech_find_max_snr, value or they must have S/N > ech_find_min_snr on >= ech_find_nabove_min_snr orders. If maxnumber is set (see above) then these criteria will be applied but only the maxnumber highest (median) S/N ratio objects will be kept.
+``find_extrap_npoly`` int .. 3 Polynomial order used for trace extrapolation
+``find_fwhm`` int, float .. 5.0 Indicates roughly the fwhm of objects in pixels for object finding
+``find_maxdev`` int, float .. 2.0 Maximum deviation of pixels from polynomial fit to trace used to reject bad pixels in trace fitting.
+``find_min_max`` list .. .. It defines the minimum and maximum of your object in pixels in the spectral direction on the detector. It only used for object finding. This parameter is helpful if your object only has emission lines or at high redshift and the trace only shows in part of the detector.
+``find_negative`` bool .. .. Identify negative objects in object finding for spectra that are differenced. This is used to manually override the default behavior in PypeIt for object finding by setting this parameter to something other than None The default behavior is that PypeIt will search for negative object traces if background frames are present in the PypeIt file that are classified as "science" (i.e. via pypeit_setup -b, and setting bkg_id in the PypeIt file). If background frames are present that are classified as "sky", then PypeIt will NOT search for negative object traces. If one wishes to explicitly override this default behavior, set this parameter to True to find negative objects or False to ignore them.
+``find_trim_edge`` list .. 5, 5 Trim the slit by this number of pixels left/right before finding objects
``maxnumber_sci`` int .. 10 Maximum number of objects to extract in a science frame. Use None for no limit. This parameter can be useful in situations where systematics lead to spurious extra objects. Setting this parameter means they will be trimmed. For mulitslit maxnumber applies per slit, for echelle observations this applies per order. Note that objects on a slit/order impact the sky-modeling and so maxnumber should never be lower than the true number of detectable objects on your slit. For image differenced observations with positive and negative object traces, maxnumber applies to the number of positive (or negative) traces individually. In other words, if you had two positive objects and one negative object, then you would set maxnumber to be equal to two (not three). Note that if manually extracted apertures are explicitly requested, they do not count against this maxnumber. If more than maxnumber objects are detected, then highest S/N ratio objects will be the ones that are kept. For multislit observations the choice here depends on the slit length. For echelle observations with short slits we set the default to be 1
-``maxnumber_std`` int .. 5 Maximum number of objects to extract in a standard star frame. Same functionality as maxnumber_sci documented above. For multislit observations the default here is 5, for echelle observations the default is 1
-``skip_final_global`` bool .. False If True, do not update initial sky to get global sky using updated noise model. This should be True for quicklook to save time. This should also be True for near-IR reductions which perform difference imaging, since there we fit sky-residuals rather than the sky itself, so there is no noise model to update.
-``skip_second_find`` bool .. False Only perform one round of object finding (mainly for quick_look)
-``skip_skysub`` bool .. False If True, do not sky subtract when performing object finding. This should be set to True for example when running on data that is already sky-subtracted. Note that for near-IR difference imaging one still wants to remove sky-residuals via sky-subtraction, and so this is typically set to False
-``snr_thresh`` int, float .. 10.0 S/N threshold for object finding in wavelength direction smashed image.
-``std_spec1d`` str .. .. A PypeIt spec1d file of a previously reduced standard star. The trace of the standard star spectrum is used as a crutch for tracing the object spectra, when a direct trace is not possible (i.e., faint sources). If provided, this overrides use of any standards included in your pypeit file; the standard exposures will still be reduced.
-``trace_npoly`` int .. 5 Order of legendre polynomial fits to object traces.
+``maxnumber_std`` int .. 5 Maximum number of objects to extract in a standard star frame. Same functionality as maxnumber_sci documented above. For multislit observations the default here is 5, for echelle observations the default is 1
+``skip_final_global`` bool .. False If True, do not update initial sky to get global sky using updated noise model. This should be True for quicklook to save time. This should also be True for near-IR reductions which perform difference imaging, since there we fit sky-residuals rather than the sky itself, so there is no noise model to update.
+``skip_second_find`` bool .. False Only perform one round of object finding (mainly for quick_look)
+``skip_skysub`` bool .. False If True, do not sky subtract when performing object finding. This should be set to True for example when running on data that is already sky-subtracted. Note that for near-IR difference imaging one still wants to remove sky-residuals via sky-subtraction, and so this is typically set to False
+``snr_thresh`` int, float .. 10.0 S/N threshold for object finding in wavelength direction smashed image.
+``std_spec1d`` str .. .. A PypeIt spec1d file of a previously reduced standard star. The trace of the standard star spectrum is used as a crutch for tracing the object spectra, when a direct trace is not possible (i.e., faint sources). If provided, this overrides use of any standards included in your pypeit file; the standard exposures will still be reduced.
+``trace_npoly`` int .. 5 Order of legendre polynomial fits to object traces.
=========================== ========== ======= ======= =============================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
@@ -759,17 +759,17 @@ SkySubPar Keywords
Class Instantiation: :class:`~pypeit.par.pypeitpar.SkySubPar`
=================== ========== ======= ======= ===================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
-Key Type Options Default Description
+Key Type Options Default Description
=================== ========== ======= ======= ===================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
-``bspline_spacing`` int, float .. 0.6 Break-point spacing for the bspline sky subtraction fits.
-``global_sky_std`` bool .. True Global sky subtraction will be performed on standard stars. This should be turned off for example for near-IR reductions with narrow slits, since bright standards can fill the slit causing global sky-subtraction to fail. In these situations we go straight to local sky-subtraction since it is designed to deal with such situations
-``joint_fit`` bool .. False Perform a simultaneous joint fit to sky regions using all available slits. Currently, this parameter is only used for IFU data reduction. Note that the current implementation does not account for variations in the instrument FWHM in different slits. This will be addressed by Issue #1660.
-``local_maskwidth`` float .. 4.0 Initial width of the region in units of FWHM that will be used for local sky subtraction
-``mask_by_boxcar`` bool .. False In global sky evaluation, mask the sky region around the object by the boxcar radius (set in ExtractionPar).
-``max_mask_frac`` float .. 0.8 Maximum fraction of total pixels on a slit that can be masked by the input masks. If more than this threshold is masked the code will return zeros and throw a warning.
-``no_local_sky`` bool .. False If True, turn off local sky model evaluation, but do fit object profile and perform optimal extraction
-``no_poly`` bool .. False Turn off polynomial basis (Legendre) in global sky subtraction
-``sky_sigrej`` float .. 3.0 Rejection parameter for local sky subtraction
+``bspline_spacing`` int, float .. 0.6 Break-point spacing for the bspline sky subtraction fits.
+``global_sky_std`` bool .. True Global sky subtraction will be performed on standard stars. This should be turned off for example for near-IR reductions with narrow slits, since bright standards can fill the slit causing global sky-subtraction to fail. In these situations we go straight to local sky-subtraction since it is designed to deal with such situations
+``joint_fit`` bool .. False Perform a simultaneous joint fit to sky regions using all available slits. Currently, this parameter is only used for IFU data reduction. Note that the current implementation does not account for variations in the instrument FWHM in different slits. This will be addressed by Issue #1660.
+``local_maskwidth`` float .. 4.0 Initial width of the region in units of FWHM that will be used for local sky subtraction
+``mask_by_boxcar`` bool .. False In global sky evaluation, mask the sky region around the object by the boxcar radius (set in ExtractionPar).
+``max_mask_frac`` float .. 0.8 Maximum fraction of total pixels on a slit that can be masked by the input masks. If more than this threshold is masked the code will return zeros and throw a warning.
+``no_local_sky`` bool .. False If True, turn off local sky model evaluation, but do fit object profile and perform optimal extraction
+``no_poly`` bool .. False Turn off polynomial basis (Legendre) in global sky subtraction
+``sky_sigrej`` float .. 3.0 Rejection parameter for local sky subtraction
``user_regions`` str, list .. .. Provides a user-defined mask defining sky regions. By default, the sky regions are identified automatically. To specify sky regions for *all* slits, provide a comma separated list of percentages. For example, setting user_regions = :10,35:65,80: selects the first 10%, the inner 30%, and the final 20% of *all* slits as containing sky. Setting user_regions = user will attempt to load any SkyRegions files generated by the user via the pypeit_skysub_regions tool.
=================== ========== ======= ======= ===================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
@@ -784,18 +784,18 @@ SlitMaskPar Keywords
Class Instantiation: :class:`~pypeit.par.pypeitpar.SlitMaskPar`
=========================== ========== ======= ======= ======================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
-Key Type Options Default Description
+Key Type Options Default Description
=========================== ========== ======= ======= ======================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
-``assign_obj`` bool .. False If SlitMask object was generated, assign RA,DEC,name to detected objects
-``bright_maskdef_id`` int .. .. `maskdef_id` (corresponding e.g., to `dSlitId` and `Slit_Number` in the DEIMOS/LRIS and MOSFIRE slitmask design, respectively) of a slit containing a bright object that will be used to compute the slitmask offset. This parameter is optional and is ignored if ``slitmask_offset`` is provided.
-``extract_missing_objs`` bool .. False Force extraction of undetected objects at the location expected from the slitmask design.
-``missing_objs_boxcar_rad`` int, float .. 1.0 Indicates the boxcar radius in arcsec for the force extraction of undetected objects.
+``assign_obj`` bool .. False If SlitMask object was generated, assign RA,DEC,name to detected objects
+``bright_maskdef_id`` int .. .. `maskdef_id` (corresponding e.g., to `dSlitId` and `Slit_Number` in the DEIMOS/LRIS and MOSFIRE slitmask design, respectively) of a slit containing a bright object that will be used to compute the slitmask offset. This parameter is optional and is ignored if ``slitmask_offset`` is provided.
+``extract_missing_objs`` bool .. False Force extraction of undetected objects at the location expected from the slitmask design.
+``missing_objs_boxcar_rad`` int, float .. 1.0 Indicates the boxcar radius in arcsec for the force extraction of undetected objects.
``missing_objs_fwhm`` int, float .. .. Indicates the FWHM in arcsec for the force extraction of undetected objects. PypeIt will try to determine the FWHM from the flux profile (by using ``missing_objs_fwhm`` as initial guess). If the FWHM cannot be determined, ``missing_objs_fwhm`` will be assumed. If you do not want PypeIt to try to determine the FWHM set the parameter ``use_user_fwhm`` in ``ExtractionPar`` to True. If ``missing_objs_fwhm`` is ``None`` (which is the default) PypeIt will use the median FWHM of all the detected objects.
-``obj_toler`` int, float .. 1.0 If slitmask design information is provided, and slit matching is performed (``use_maskdesign = True`` in ``EdgeTracePar``), this parameter provides the desired tolerance (arcsec) to match sources to targeted objects
-``slitmask_offset`` int, float .. .. User-provided slitmask offset (pixels) from the position expected by the slitmask design. This is optional, and if set PypeIt will NOT compute the offset using ``snr_thrshd`` or ``bright_maskdef_id``.
-``snr_thrshd`` int, float .. 50.0 Objects detected above this S/N threshold will be used to compute the slitmask offset. This is the default behaviour for DEIMOS unless ``slitmask_offset``, ``bright_maskdef_id`` or ``use_alignbox`` is set.
-``use_alignbox`` bool .. False Use stars in alignment boxes to compute the slitmask offset. If this is set to ``True`` PypeIt will NOT compute the offset using ``snr_thrshd`` or ``bright_maskdef_id``
-``use_dither_offset`` bool .. False Use the dither offset recorded in the header of science frames as the value of the slitmask offset. This is currently only available for Keck MOSFIRE reduction and it is set as the default for this instrument. If set PypeIt will NOT compute the offset using ``snr_thrshd`` or ``bright_maskdef_id``. However, it is ignored if ``slitmask_offset`` is provided.
+``obj_toler`` int, float .. 1.0 If slitmask design information is provided, and slit matching is performed (``use_maskdesign = True`` in ``EdgeTracePar``), this parameter provides the desired tolerance (arcsec) to match sources to targeted objects
+``slitmask_offset`` int, float .. .. User-provided slitmask offset (pixels) from the position expected by the slitmask design. This is optional, and if set PypeIt will NOT compute the offset using ``snr_thrshd`` or ``bright_maskdef_id``.
+``snr_thrshd`` int, float .. 50.0 Objects detected above this S/N threshold will be used to compute the slitmask offset. This is the default behaviour for DEIMOS unless ``slitmask_offset``, ``bright_maskdef_id`` or ``use_alignbox`` is set.
+``use_alignbox`` bool .. False Use stars in alignment boxes to compute the slitmask offset. If this is set to ``True`` PypeIt will NOT compute the offset using ``snr_thrshd`` or ``bright_maskdef_id``
+``use_dither_offset`` bool .. False Use the dither offset recorded in the header of science frames as the value of the slitmask offset. This is currently only available for Keck MOSFIRE reduction and it is set as the default for this instrument. If set PypeIt will NOT compute the offset using ``snr_thrshd`` or ``bright_maskdef_id``. However, it is ignored if ``slitmask_offset`` is provided.
=========================== ========== ======= ======= ======================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
@@ -809,12 +809,12 @@ FrameGroupPar Keywords
Class Instantiation: :class:`~pypeit.par.pypeitpar.FrameGroupPar`
============= =============================================== ============================================================================================================================================================ ============================ ===============================================================================================================================================================================================================================================================
-Key Type Options Default Description
+Key Type Options Default Description
============= =============================================== ============================================================================================================================================================ ============================ ===============================================================================================================================================================================================================================================================
``exprng`` list .. None, None Used in identifying frames of this type. This sets the minimum and maximum allowed exposure times. There must be two items in the list. Use None to indicate no limit; i.e., to select exposures with any time greater than 30 sec, use exprng = [30, None].
-``frametype`` str ``align``, ``arc``, ``bias``, ``dark``, ``pinhole``, ``pixelflat``, ``illumflat``, ``lampoffflats``, ``science``, ``standard``, ``trace``, ``tilt``, ``sky`` ``science`` Frame type. Options are: align, arc, bias, dark, pinhole, pixelflat, illumflat, lampoffflats, science, standard, trace, tilt, sky
-``process`` :class:`~pypeit.par.pypeitpar.ProcessImagesPar` .. `ProcessImagesPar Keywords`_ Low level parameters used for basic image processing
-``useframe`` str .. .. A calibrations file to use if it exists.
+``frametype`` str ``align``, ``arc``, ``bias``, ``dark``, ``pinhole``, ``pixelflat``, ``illumflat``, ``lampoffflats``, ``science``, ``standard``, ``trace``, ``tilt``, ``sky`` ``science`` Frame type. Options are: align, arc, bias, dark, pinhole, pixelflat, illumflat, lampoffflats, science, standard, trace, tilt, sky
+``process`` :class:`~pypeit.par.pypeitpar.ProcessImagesPar` .. `ProcessImagesPar Keywords`_ Low level parameters used for basic image processing
+``useframe`` str .. .. A calibrations file to use if it exists.
============= =============================================== ============================================================================================================================================================ ============================ ===============================================================================================================================================================================================================================================================
@@ -828,39 +828,38 @@ ProcessImagesPar Keywords
Class Instantiation: :class:`~pypeit.par.pypeitpar.ProcessImagesPar`
======================== ========== ====================================== ========== ============================================================================================================================================================================================================================================================================================================================================================
-Key Type Options Default Description
+Key Type Options Default Description
======================== ========== ====================================== ========== ============================================================================================================================================================================================================================================================================================================================================================
-``apply_gain`` bool .. True Convert the ADUs to electrons using the detector gain
-``clip`` bool .. True Perform sigma clipping when combining. Only used with combine=mean
-``comb_sigrej`` float .. .. Sigma-clipping level for when clip=True; Use None for automatic limit (recommended).
-``combine`` str ``median``, ``mean`` ``mean`` Method used to combine multiple frames. Options are: median, mean
+``apply_gain`` bool .. True Convert the ADUs to electrons using the detector gain
+``clip`` bool .. True Perform sigma clipping when combining. Only used with combine=mean
+``comb_sigrej`` float .. .. Sigma-clipping level for when clip=True; Use None for automatic limit (recommended).
+``combine`` str ``median``, ``mean`` ``mean`` Method used to combine multiple frames. Options are: median, mean
``dark_expscale`` bool .. False If designated dark frames are used and have a different exposure time than the science frames, scale the counts by the by the ratio in the exposure times to adjust the dark counts for the difference in exposure time. WARNING: You should always take dark frames that have the same exposure time as your science frames, so use this option with care!
-``empirical_rn`` bool .. False If True, use the standard deviation in the overscan region to measure an empirical readnoise to use in the noise model.
-``grow`` int, float .. 1.5 Factor by which to expand regions with cosmic rays detected by the LA cosmics routine.
-``lamaxiter`` int .. 1 Maximum number of iterations for LA cosmics routine.
-``mask_cr`` bool .. False Identify CRs and mask them
-``n_lohi`` list .. 0, 0 Number of pixels to reject at the lowest and highest ends of the distribution; i.e., n_lohi = low, high. Use None for no limit.
-``noise_floor`` float .. 0.0 Impose a noise floor by adding the provided fraction of the bias- and dark-subtracted electron counts to the error budget. E.g., a value of 0.01 means that the S/N of the counts in the image will never be greater than 100.
-``objlim`` int, float .. 3.0 Object detection limit in LA cosmics routine
-``orient`` bool .. True Orient the raw image into the PypeIt frame
-``overscan_method`` str ``polynomial``, ``savgol``, ``median`` ``savgol`` Method used to fit the overscan. Options are: polynomial, savgol, median
-``overscan_par`` int, list .. 5, 65 Parameters for the overscan subtraction. For 'polynomial', set overcan_par = order, number of pixels, number of repeats ; for 'savgol', set overscan_par = order, window size ; for 'median', set overscan_par = None or omit the keyword.
-``rmcompact`` bool .. True Remove compact detections in LA cosmics routine
-``satpix`` str ``reject``, ``force``, ``nothing`` ``reject`` Handling of saturated pixels. Options are: reject, force, nothing
-``shot_noise`` bool .. True Use the bias- and dark-subtracted image to calculate and include electron count shot noise in the image processing error budget
-``sigclip`` int, float .. 4.5 Sigma level for rejection in LA cosmics routine
-``sigfrac`` int, float .. 0.3 Fraction for the lower clipping threshold in LA cosmics routine.
-``spat_flexure_correct`` bool .. False Correct slits, illumination flat, etc. for flexure
-``subtract_continuum`` bool .. False Subtract off the continuum level from an image. This parameter should only be set to True to combine arcs with multiple different lamps. For all other cases, this parameter should probably be False.
-``subtract_scattlight`` bool .. False Subtract off the scattered light from an image. This parameter should only be set to True for spectrographs that have dedicated methods to subtract scattered light. For all other cases, this parameter should be False.
-``trim`` bool .. True Trim the image to the detector supplied region
-``use_biasimage`` bool .. True Use a bias image. If True, one or more must be supplied in the PypeIt file.
-``use_darkimage`` bool .. False Subtract off a dark image. If True, one or more darks must be provided.
-``use_illumflat`` bool .. True Use the illumination flat to correct for the illumination profile of each slit.
-``use_overscan`` bool .. True Subtract off the overscan. Detector *must* have one or code will crash.
-``use_pattern`` bool .. False Subtract off a detector pattern. This pattern is assumed to be sinusoidal along one direction, with a frequency that is constant across the detector.
-``use_pixelflat`` bool .. True Use the pixel flat to make pixel-level corrections. A pixelflat image must be provied.
-``use_specillum`` bool .. False Use the relative spectral illumination profiles to correct the spectral illumination profile of each slit. This is primarily used for IFUs. To use this, you must set ``slit_illum_relative=True`` in the ``flatfield`` parameter set!
+``empirical_rn`` bool .. False If True, use the standard deviation in the overscan region to measure an empirical readnoise to use in the noise model.
+``grow`` int, float .. 1.5 Factor by which to expand regions with cosmic rays detected by the LA cosmics routine.
+``lamaxiter`` int .. 1 Maximum number of iterations for LA cosmics routine.
+``mask_cr`` bool .. False Identify CRs and mask them
+``n_lohi`` list .. 0, 0 Number of pixels to reject at the lowest and highest ends of the distribution; i.e., n_lohi = low, high. Use None for no limit.
+``noise_floor`` float .. 0.0 Impose a noise floor by adding the provided fraction of the bias- and dark-subtracted electron counts to the error budget. E.g., a value of 0.01 means that the S/N of the counts in the image will never be greater than 100.
+``objlim`` int, float .. 3.0 Object detection limit in LA cosmics routine
+``orient`` bool .. True Orient the raw image into the PypeIt frame
+``overscan_method`` str ``polynomial``, ``savgol``, ``median`` ``savgol`` Method used to fit the overscan. Options are: polynomial, savgol, median
+``overscan_par`` int, list .. 5, 65 Parameters for the overscan subtraction. For 'polynomial', set overcan_par = order, number of pixels, number of repeats ; for 'savgol', set overscan_par = order, window size ; for 'median', set overscan_par = None or omit the keyword.
+``rmcompact`` bool .. True Remove compact detections in LA cosmics routine
+``satpix`` str ``reject``, ``force``, ``nothing`` ``reject`` Handling of saturated pixels. Options are: reject, force, nothing
+``shot_noise`` bool .. True Use the bias- and dark-subtracted image to calculate and include electron count shot noise in the image processing error budget
+``sigclip`` int, float .. 4.5 Sigma level for rejection in LA cosmics routine
+``sigfrac`` int, float .. 0.3 Fraction for the lower clipping threshold in LA cosmics routine.
+``spat_flexure_correct`` bool .. False Correct slits, illumination flat, etc. for flexure
+``subtract_continuum`` bool .. False Subtract off the continuum level from an image. This parameter should only be set to True to combine arcs with multiple different lamps. For all other cases, this parameter should probably be False.
+``trim`` bool .. True Trim the image to the detector supplied region
+``use_biasimage`` bool .. True Use a bias image. If True, one or more must be supplied in the PypeIt file.
+``use_darkimage`` bool .. False Subtract off a dark image. If True, one or more darks must be provided.
+``use_illumflat`` bool .. True Use the illumination flat to correct for the illumination profile of each slit.
+``use_overscan`` bool .. True Subtract off the overscan. Detector *must* have one or code will crash.
+``use_pattern`` bool .. False Subtract off a detector pattern. This pattern is assumed to be sinusoidal along one direction, with a frequency that is constant across the detector.
+``use_pixelflat`` bool .. True Use the pixel flat to make pixel-level corrections. A pixelflat image must be provied.
+``use_specillum`` bool .. False Use the relative spectral illumination profiles to correct the spectral illumination profile of each slit. This is primarily used for IFUs. To use this, you must set ``slit_illum_relative=True`` in the ``flatfield`` parameter set!
======================== ========== ====================================== ========== ============================================================================================================================================================================================================================================================================================================================================================
@@ -874,24 +873,24 @@ SensFuncPar Keywords
Class Instantiation: :class:`~pypeit.par.pypeitpar.SensFuncPar`
======================= ============================================== ================ =========================== ============================================================================================================================================================================================================================================================================================================================================================================================
-Key Type Options Default Description
+Key Type Options Default Description
======================= ============================================== ================ =========================== ============================================================================================================================================================================================================================================================================================================================================================================================
-``IR`` :class:`~pypeit.par.pypeitpar.TelluricPar` .. `TelluricPar Keywords`_ Parameters for the IR sensfunc algorithm
-``UVIS`` :class:`~pypeit.par.pypeitpar.SensfuncUVISPar` .. `SensfuncUVISPar Keywords`_ Parameters for the UVIS sensfunc algorithm
+``IR`` :class:`~pypeit.par.pypeitpar.TelluricPar` .. `TelluricPar Keywords`_ Parameters for the IR sensfunc algorithm
+``UVIS`` :class:`~pypeit.par.pypeitpar.SensfuncUVISPar` .. `SensfuncUVISPar Keywords`_ Parameters for the UVIS sensfunc algorithm
``algorithm`` str ``UVIS``, ``IR`` ``UVIS`` Specify the algorithm for computing the sensitivity function. The options are: (1) UVIS = Should be used for data with :math:`\lambda < 7000` A. No detailed model of telluric absorption but corrects for atmospheric extinction. (2) IR = Should be used for data with :math:`\lambda > 7000` A. Peforms joint fit for sensitivity function and telluric absorption using HITRAN models.
-``extrap_blu`` float .. 0.1 Fraction of minimum wavelength coverage to grow the wavelength coverage of the sensitivitity function in the blue direction (`i.e.`, if the standard star spectrum cuts off at ``wave_min``) the sensfunc will be extrapolated to cover down to (1.0 - ``extrap_blu``) * ``wave_min``
-``extrap_red`` float .. 0.1 Fraction of maximum wavelength coverage to grow the wavelength coverage of the sensitivitity function in the red direction (`i.e.`, if the standard star spectrumcuts off at ``wave_max``) the sensfunc will be extrapolated to cover up to (1.0 + ``extrap_red``) * ``wave_max``
-``flatfile`` str .. .. Flat field file to be used if the sensitivity function model will utilize the blaze function computed from a flat field file in the Calibrations directory, e.g.Calibrations/Flat_A_0_DET01.fits
-``hydrogen_mask_wid`` float .. 10.0 Mask width from line center for hydrogen recombination lines in Angstroms (total mask width is 2x this value).
-``mask_helium_lines`` bool .. False Mask certain ``HeII`` recombination lines prominent in O-type stars in the sensitivity function fit A region equal to 0.5 * ``hydrogen_mask_wid`` on either side of the line center is masked.
-``mask_hydrogen_lines`` bool .. True Mask hydrogen Balmer, Paschen, Brackett, and Pfund recombination lines in the sensitivity function fit. A region equal to ``hydrogen_mask_wid`` on either side of the line center is masked.
-``multi_spec_det`` list .. .. List of detectors (identified by their string name, like DET01) to splice together for multi-detector instruments (e.g. DEIMOS). It is assumed that there is *no* overlap in wavelength across detectors (might be ok if there is). If entered as a list of integers, they should be converted to the detector name. **Cannot be used with detector mosaics.**
-``polyorder`` int, list .. 5 Polynomial order for sensitivity function fitting
-``samp_fact`` float .. 1.5 Sampling factor to make the wavelength grid for sensitivity function finer or coarser. samp_fact > 1.0 oversamples (finer), samp_fact < 1.0 undersamples (coarser).
-``star_dec`` float .. .. DEC of the standard star. This will override values in the header (`i.e.`, if they are wrong or absent)
-``star_mag`` float .. .. Magnitude of the standard star (for near-IR mainly)
-``star_ra`` float .. .. RA of the standard star. This will override values in the header (`i.e.`, if they are wrong or absent)
-``star_type`` str .. .. Spectral type of the standard star (for near-IR mainly)
+``extrap_blu`` float .. 0.1 Fraction of minimum wavelength coverage to grow the wavelength coverage of the sensitivitity function in the blue direction (`i.e.`, if the standard star spectrum cuts off at ``wave_min``) the sensfunc will be extrapolated to cover down to (1.0 - ``extrap_blu``) * ``wave_min``
+``extrap_red`` float .. 0.1 Fraction of maximum wavelength coverage to grow the wavelength coverage of the sensitivitity function in the red direction (`i.e.`, if the standard star spectrumcuts off at ``wave_max``) the sensfunc will be extrapolated to cover up to (1.0 + ``extrap_red``) * ``wave_max``
+``flatfile`` str .. .. Flat field file to be used if the sensitivity function model will utilize the blaze function computed from a flat field file in the Calibrations directory, e.g.Calibrations/Flat_A_0_DET01.fits
+``hydrogen_mask_wid`` float .. 10.0 Mask width from line center for hydrogen recombination lines in Angstroms (total mask width is 2x this value).
+``mask_helium_lines`` bool .. False Mask certain ``HeII`` recombination lines prominent in O-type stars in the sensitivity function fit A region equal to 0.5 * ``hydrogen_mask_wid`` on either side of the line center is masked.
+``mask_hydrogen_lines`` bool .. True Mask hydrogen Balmer, Paschen, Brackett, and Pfund recombination lines in the sensitivity function fit. A region equal to ``hydrogen_mask_wid`` on either side of the line center is masked.
+``multi_spec_det`` list .. .. List of detectors (identified by their string name, like DET01) to splice together for multi-detector instruments (e.g. DEIMOS). It is assumed that there is *no* overlap in wavelength across detectors (might be ok if there is). If entered as a list of integers, they should be converted to the detector name. **Cannot be used with detector mosaics.**
+``polyorder`` int, list .. 5 Polynomial order for sensitivity function fitting
+``samp_fact`` float .. 1.5 Sampling factor to make the wavelength grid for sensitivity function finer or coarser. samp_fact > 1.0 oversamples (finer), samp_fact < 1.0 undersamples (coarser).
+``star_dec`` float .. .. DEC of the standard star. This will override values in the header (`i.e.`, if they are wrong or absent)
+``star_mag`` float .. .. Magnitude of the standard star (for near-IR mainly)
+``star_ra`` float .. .. RA of the standard star. This will override values in the header (`i.e.`, if they are wrong or absent)
+``star_type`` str .. .. Spectral type of the standard star (for near-IR mainly)
======================= ============================================== ================ =========================== ============================================================================================================================================================================================================================================================================================================================================================================================
@@ -905,20 +904,20 @@ SensfuncUVISPar Keywords
Class Instantiation: :class:`~pypeit.par.pypeitpar.SensfuncUVISPar`
==================== ========== ======= =========== =========================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
-Key Type Options Default Description
+Key Type Options Default Description
==================== ========== ======= =========== =========================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
-``extinct_correct`` bool .. True If ``extinct_correct=True`` the code will use an atmospheric extinction model to extinction correct the data below 10000A. Note that this correction makes no sense if one is telluric correcting and this shold be set to False
+``extinct_correct`` bool .. True If ``extinct_correct=True`` the code will use an atmospheric extinction model to extinction correct the data below 10000A. Note that this correction makes no sense if one is telluric correcting and this shold be set to False
``extinct_file`` str .. ``closest`` If ``extinct_file='closest'`` the code will select the PypeIt-included extinction file for the closest observatory (within 5 deg, geographic coordinates) to the telescope identified in ``std_file`` (see :ref:`extinction_correction` for the list of currently included files). If constructing a sesitivity function for a telescope not within 5 deg of a listed observatory, this parameter may be set to the name of one of the listed extinction files. Alternatively, a custom extinction file may be installed in the PypeIt cache using the ``pypeit_install_extinctfile`` script; this parameter may then be set to the name of the custom extinction file.
-``nresln`` int, float .. 20 Parameter governing the spacing of the bspline breakpoints in terms of number of resolution elements.
-``polycorrect`` bool .. True Whether you want to correct the sensfunc with polynomial in the telluric and recombination line regions
-``polyfunc`` bool .. False Whether you want to use the polynomial fit as your final SENSFUNC
-``resolution`` int, float .. 3000.0 Expected resolution of the standard star spectrum. This should be measured from the data.
-``sensfunc`` str .. .. FITS file that contains or will contain the sensitivity function.
-``std_file`` str .. .. Standard star file to generate sensfunc
-``std_obj_id`` str, int .. .. Specifies object in spec1d file to use as standard. The brightest object found is used otherwise.
-``telluric`` bool .. False If ``telluric=True`` the code creates a synthetic standard star spectrum using the Kurucz models, the sens func is created setting nresln=1.5 it contains the correction for telluric lines.
-``telluric_correct`` bool .. False If ``telluric_correct=True`` the code will grab the sens_dict['telluric'] tag from the sensfunc dictionary and apply it to the data.
-``trans_thresh`` float .. 0.9 Parameter for selecting telluric regions which are masked. Locations below this transmission value are masked. If you have significant telluric absorption you should be using telluric.sensnfunc_telluric
+``nresln`` int, float .. 20 Parameter governing the spacing of the bspline breakpoints in terms of number of resolution elements.
+``polycorrect`` bool .. True Whether you want to correct the sensfunc with polynomial in the telluric and recombination line regions
+``polyfunc`` bool .. False Whether you want to use the polynomial fit as your final SENSFUNC
+``resolution`` int, float .. 3000.0 Expected resolution of the standard star spectrum. This should be measured from the data.
+``sensfunc`` str .. .. FITS file that contains or will contain the sensitivity function.
+``std_file`` str .. .. Standard star file to generate sensfunc
+``std_obj_id`` str, int .. .. Specifies object in spec1d file to use as standard. The brightest object found is used otherwise.
+``telluric`` bool .. False If ``telluric=True`` the code creates a synthetic standard star spectrum using the Kurucz models, the sens func is created setting nresln=1.5 it contains the correction for telluric lines.
+``telluric_correct`` bool .. False If ``telluric_correct=True`` the code will grab the sens_dict['telluric'] tag from the sensfunc dictionary and apply it to the data.
+``trans_thresh`` float .. 0.9 Parameter for selecting telluric regions which are masked. Locations below this transmission value are masked. If you have significant telluric absorption you should be using telluric.sensnfunc_telluric
==================== ========== ======= =========== =========================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
@@ -932,45 +931,45 @@ TelluricPar Keywords
Class Instantiation: :class:`~pypeit.par.pypeitpar.TelluricPar`
======================= ================== ======= ========================== =================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
-Key Type Options Default Description
+Key Type Options Default Description
======================= ================== ======= ========================== =================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
-``bal_wv_min_max`` list, ndarray .. .. Min/max wavelength of broad absorption features. If there are several BAL features, the format for this mask is [wave_min_bal1, wave_max_bal1,wave_min_bal2, wave_max_bal2,...]. These masked pixels will be ignored during the fitting.
-``bounds_norm`` tuple .. (0.1, 3.0) Normalization bounds for scaling the initial object model.
-``delta_coeff_bounds`` tuple .. (-20.0, 20.0) Parameters setting the polynomial coefficient bounds for sensfunc optimization.
-``delta_redshift`` float .. 0.1 Range within the redshift can be varied for telluric fitting, i.e. the code performs a bounded optimization within the redshift +- delta_redshift
-``disp`` bool .. False Argument for scipy.optimize.differential_evolution which will display status messages to the screen indicating the status of the optimization. See documentation for telluric.Telluric for a description of the output and how to know if things are working well.
-``fit_wv_min_max`` list .. .. Pixels within this mask will be used during the fitting. The format is the same with bal_wv_min_max, but this mask is good pixel masks.
-``func`` str .. ``legendre`` Polynomial model function
-``lower`` int, float .. 3.0 Lower rejection threshold in units of sigma_corr*sigma, where sigma is the formal noise of the spectrum, and sigma_corr is an empirically determined correction to the formal error. The distribution of input chi (defined by chi = (data - model)/sigma) values is analyzed, and a correction factor to the formal error sigma_corr is returned which is multiplied into the formal errors. In this way, a rejection threshold of i.e. 3-sigma, will always correspond to roughly the same percentile. This renormalization is performed with coadd1d.renormalize_errors function, and guarantees that rejection is not too agressive in cases where the empirical errors determined from the chi-distribution differ significantly from the formal noise which is used to determine chi.
-``mask_lyman_a`` bool .. True Mask the blueward of Lyman-alpha line during the fitting?
-``maxiter`` int .. 2 Maximum number of iterations for the telluric + object model fitting. The code performs multiple iterations rejecting outliers at each step. The fit is then performed anew to the remaining good pixels. For this reason if you run with the disp=True option, you will see that the f(x) loss function gets progressively better during the iterations.
-``minmax_coeff_bounds`` tuple .. (-5.0, 5.0) Parameters setting the polynomial coefficient bounds for sensfunc optimization. Bounds are currently determined as follows. We compute an initial fit to the sensfunc in the :func:`~pypeit.core.telluric.init_sensfunc_model` function. That deterines a set of coefficients. The bounds are then determined according to: [(np.fmin(np.abs(this_coeff)*obj_params['delta_coeff_bounds'][0], obj_params['minmax_coeff_bounds'][0]), np.fmax(np.abs(this_coeff)*obj_params['delta_coeff_bounds'][1], obj_params['minmax_coeff_bounds'][1]))]
-``model`` str .. ``exp`` Types of polynomial model. Options are poly, square, exp corresponding to normal polynomial, squared polynomial, or exponentiated polynomial
-``npca`` int .. 8 Number of pca for the objmodel=qso qso PCA fit
-``objmodel`` str .. .. The object model to be used for telluric fitting. Currently the options are: qso, star, and poly
-``only_orders`` int, list, ndarray .. .. Order number, or list of order numbers if you only want to fit specific orders
-``pca_file`` str .. ``qso_pca_1200_3100.fits`` Fits file containing quasar PCA model. Needed for objmodel=qso. NOTE: This parameter no longer includes the full pathname to the Telluric Model file, but is just the filename of the model itself.
-``pca_lower`` int, float .. 1220.0 Minimum wavelength for the qso pca model
-``pca_upper`` int, float .. 3100.0 Maximum wavelength for the qso pca model
-``pix_shift_bounds`` tuple .. (-5.0, 5.0) Bounds for the pixel shift optimization in telluric model fit in units of pixels. The atmosphere will be allowed to shift within this range during the fit.
-``polish`` bool .. True If True then differential evolution will perform an additional optimizatino at the end to polish the best fit at the end, which can improve the optimization slightly. See scipy.optimize.differential_evolution for details.
-``polyorder`` int .. 3 Order of the polynomial model fit
-``popsize`` int .. 30 A multiplier for setting the total population size for the differential evolution optimization. See scipy.optimize.differential_evolution for details.
-``recombination`` int, float .. 0.7 The recombination constant for the differential evolution optimization. This should be in the range [0, 1]. See scipy.optimize.differential_evolution for details.
-``redshift`` int, float .. 0.0 The redshift for the object model. This is currently only used by objmodel=qso
-``resln_frac_bounds`` tuple .. (0.5, 1.5) Bounds for the resolution fit optimization which is part of the telluric model. This range is in units of the resln_guess, so the (0.5, 1.5) would bound the spectral resolution fit to be within the range bounds_resln = (0.5*resln_guess, 1.5*resln_guess)
-``resln_guess`` int, float .. .. A guess for the resolution of your spectrum expressed as lambda/dlambda. The resolution is fit explicitly as part of the telluric model fitting, but this guess helps determine the bounds for the optimization (see next). If not provided, the wavelength sampling of your spectrum will be used and the resolution calculated using a typical sampling of 3 spectral pixels per resolution element.
-``seed`` int .. 777 An initial seed for the differential evolution optimization, which is a random process. The default is a seed = 777 which will be used to generate a unique seed for every order. A specific seed is used because otherwise the random number generator will use the time for the seed, and the results will not be reproducible.
-``sn_clip`` int, float .. 30.0 This adds an error floor to the ivar, preventing too much rejection at high-S/N (`i.e.`, standard stars, bright objects) using the function utils.clip_ivar. A small erorr is added to the input ivar so that the output ivar_out will never give S/N greater than sn_clip. This prevents overly aggressive rejection in high S/N ratio spectra which neverthless differ at a level greater than the formal S/N due to the fact that our telluric models are only good to about 3%.
-``star_dec`` float .. .. Object declination in decimal deg
-``star_mag`` float, int .. .. AB magnitude in V band
-``star_ra`` float .. .. Object right-ascension in decimal deg
-``star_type`` str .. .. stellar type
+``bal_wv_min_max`` list, ndarray .. .. Min/max wavelength of broad absorption features. If there are several BAL features, the format for this mask is [wave_min_bal1, wave_max_bal1,wave_min_bal2, wave_max_bal2,...]. These masked pixels will be ignored during the fitting.
+``bounds_norm`` tuple .. (0.1, 3.0) Normalization bounds for scaling the initial object model.
+``delta_coeff_bounds`` tuple .. (-20.0, 20.0) Parameters setting the polynomial coefficient bounds for sensfunc optimization.
+``delta_redshift`` float .. 0.1 Range within the redshift can be varied for telluric fitting, i.e. the code performs a bounded optimization within the redshift +- delta_redshift
+``disp`` bool .. False Argument for scipy.optimize.differential_evolution which will display status messages to the screen indicating the status of the optimization. See documentation for telluric.Telluric for a description of the output and how to know if things are working well.
+``fit_wv_min_max`` list .. .. Pixels within this mask will be used during the fitting. The format is the same with bal_wv_min_max, but this mask is good pixel masks.
+``func`` str .. ``legendre`` Polynomial model function
+``lower`` int, float .. 3.0 Lower rejection threshold in units of sigma_corr*sigma, where sigma is the formal noise of the spectrum, and sigma_corr is an empirically determined correction to the formal error. The distribution of input chi (defined by chi = (data - model)/sigma) values is analyzed, and a correction factor to the formal error sigma_corr is returned which is multiplied into the formal errors. In this way, a rejection threshold of i.e. 3-sigma, will always correspond to roughly the same percentile. This renormalization is performed with coadd1d.renormalize_errors function, and guarantees that rejection is not too agressive in cases where the empirical errors determined from the chi-distribution differ significantly from the formal noise which is used to determine chi.
+``mask_lyman_a`` bool .. True Mask the blueward of Lyman-alpha line during the fitting?
+``maxiter`` int .. 2 Maximum number of iterations for the telluric + object model fitting. The code performs multiple iterations rejecting outliers at each step. The fit is then performed anew to the remaining good pixels. For this reason if you run with the disp=True option, you will see that the f(x) loss function gets progressively better during the iterations.
+``minmax_coeff_bounds`` tuple .. (-5.0, 5.0) Parameters setting the polynomial coefficient bounds for sensfunc optimization. Bounds are currently determined as follows. We compute an initial fit to the sensfunc in the :func:`~pypeit.core.telluric.init_sensfunc_model` function. That deterines a set of coefficients. The bounds are then determined according to: [(np.fmin(np.abs(this_coeff)*obj_params['delta_coeff_bounds'][0], obj_params['minmax_coeff_bounds'][0]), np.fmax(np.abs(this_coeff)*obj_params['delta_coeff_bounds'][1], obj_params['minmax_coeff_bounds'][1]))]
+``model`` str .. ``exp`` Types of polynomial model. Options are poly, square, exp corresponding to normal polynomial, squared polynomial, or exponentiated polynomial
+``npca`` int .. 8 Number of pca for the objmodel=qso qso PCA fit
+``objmodel`` str .. .. The object model to be used for telluric fitting. Currently the options are: qso, star, and poly
+``only_orders`` int, list, ndarray .. .. Order number, or list of order numbers if you only want to fit specific orders
+``pca_file`` str .. ``qso_pca_1200_3100.fits`` Fits file containing quasar PCA model. Needed for objmodel=qso. NOTE: This parameter no longer includes the full pathname to the Telluric Model file, but is just the filename of the model itself.
+``pca_lower`` int, float .. 1220.0 Minimum wavelength for the qso pca model
+``pca_upper`` int, float .. 3100.0 Maximum wavelength for the qso pca model
+``pix_shift_bounds`` tuple .. (-5.0, 5.0) Bounds for the pixel shift optimization in telluric model fit in units of pixels. The atmosphere will be allowed to shift within this range during the fit.
+``polish`` bool .. True If True then differential evolution will perform an additional optimizatino at the end to polish the best fit at the end, which can improve the optimization slightly. See scipy.optimize.differential_evolution for details.
+``polyorder`` int .. 3 Order of the polynomial model fit
+``popsize`` int .. 30 A multiplier for setting the total population size for the differential evolution optimization. See scipy.optimize.differential_evolution for details.
+``recombination`` int, float .. 0.7 The recombination constant for the differential evolution optimization. This should be in the range [0, 1]. See scipy.optimize.differential_evolution for details.
+``redshift`` int, float .. 0.0 The redshift for the object model. This is currently only used by objmodel=qso
+``resln_frac_bounds`` tuple .. (0.5, 1.5) Bounds for the resolution fit optimization which is part of the telluric model. This range is in units of the resln_guess, so the (0.5, 1.5) would bound the spectral resolution fit to be within the range bounds_resln = (0.5*resln_guess, 1.5*resln_guess)
+``resln_guess`` int, float .. .. A guess for the resolution of your spectrum expressed as lambda/dlambda. The resolution is fit explicitly as part of the telluric model fitting, but this guess helps determine the bounds for the optimization (see next). If not provided, the wavelength sampling of your spectrum will be used and the resolution calculated using a typical sampling of 3 spectral pixels per resolution element.
+``seed`` int .. 777 An initial seed for the differential evolution optimization, which is a random process. The default is a seed = 777 which will be used to generate a unique seed for every order. A specific seed is used because otherwise the random number generator will use the time for the seed, and the results will not be reproducible.
+``sn_clip`` int, float .. 30.0 This adds an error floor to the ivar, preventing too much rejection at high-S/N (`i.e.`, standard stars, bright objects) using the function utils.clip_ivar. A small erorr is added to the input ivar so that the output ivar_out will never give S/N greater than sn_clip. This prevents overly aggressive rejection in high S/N ratio spectra which neverthless differ at a level greater than the formal S/N due to the fact that our telluric models are only good to about 3%.
+``star_dec`` float .. .. Object declination in decimal deg
+``star_mag`` float, int .. .. AB magnitude in V band
+``star_ra`` float .. .. Object right-ascension in decimal deg
+``star_type`` str .. .. stellar type
``sticky`` bool .. True Sticky parameter for the utils.djs_reject algorithm for iterative model fit rejection. If set to True then points rejected from a previous iteration are kept rejected, in other words the bad pixel mask is the OR of all previous iterations and rejected pixels accumulate. If set to False, the bad pixel mask is the mask from the previous iteration, and if the model fit changes between iterations, points can alternate from being rejected to not rejected. At present this code only performs optimizations with differential evolution and experience shows that sticky needs to be True in order for these to converge. This is because the outliers can be so large that they dominate the loss function, and one never iteratively converges to a good model fit. In other words, the deformations in the model between iterations with sticky=False are too small to approach a reasonable fit.
-``telgridfile`` str .. .. File containing the telluric grid for the observatory in question. These grids are generated from HITRAN models for each observatory using nominal site parameters. They must be downloaded from the GoogleDrive and installed in your PypeIt installation via the pypeit_install_telluric script. NOTE: This parameter no longer includes the full pathname to the Telluric Grid file, but is just the filename of the grid itself.
-``tell_norm_thresh`` int, float .. 0.9 Threshold of telluric absorption region
-``tol`` float .. 0.001 Relative tolerance for converage of the differential evolution optimization. See scipy.optimize.differential_evolution for details.
-``upper`` int, float .. 3.0 Upper rejection threshold in units of sigma_corr*sigma, where sigma is the formal noise of the spectrum, and sigma_corr is an empirically determined correction to the formal error. See above for description.
+``telgridfile`` str .. .. File containing the telluric grid for the observatory in question. These grids are generated from HITRAN models for each observatory using nominal site parameters. They must be downloaded from the GoogleDrive and installed in your PypeIt installation via the pypeit_install_telluric script. NOTE: This parameter no longer includes the full pathname to the Telluric Grid file, but is just the filename of the grid itself.
+``tell_norm_thresh`` int, float .. 0.9 Threshold of telluric absorption region
+``tol`` float .. 0.001 Relative tolerance for converage of the differential evolution optimization. See scipy.optimize.differential_evolution for details.
+``upper`` int, float .. 3.0 Upper rejection threshold in units of sigma_corr*sigma, where sigma is the formal noise of the spectrum, and sigma_corr is an empirically determined correction to the formal error. See above for description.
======================= ================== ======= ========================== =================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
@@ -2915,7 +2914,6 @@ Alterations to the default parameters are:
satpix = nothing
use_pixelflat = False
use_illumflat = False
- subtract_scattlight = True
[[alignframe]]
[[[process]]]
satpix = nothing
@@ -2932,7 +2930,6 @@ Alterations to the default parameters are:
satpix = nothing
use_illumflat = False
use_pattern = True
- subtract_scattlight = True
[[lampoffflatsframe]]
[[[process]]]
satpix = nothing
@@ -2970,7 +2967,6 @@ Alterations to the default parameters are:
noise_floor = 0.01
use_specillum = True
use_pattern = True
- subtract_scattlight = True
[reduce]
[[skysub]]
no_poly = True
@@ -7468,4 +7464,3 @@ Alterations to the default parameters are:
mask_cr = True
use_overscan = False
noise_floor = 0.01
-
diff --git a/doc/releases/1.14.1dev.rst b/doc/releases/1.14.1dev.rst
index 62d74ffac3..53aae548b7 100644
--- a/doc/releases/1.14.1dev.rst
+++ b/doc/releases/1.14.1dev.rst
@@ -12,10 +12,13 @@ Functionality/Performance Improvements and Additions
- Started the development of instrument-specific scattered light removal. In this
release, we only model KCWI/KCRM scattered light.
+- Added support for Keck/KCRM RL data reduction.
Instrument-specific Updates
---------------------------
+- Keck/KCWI and Keck/KCRM: Turned on polynomial correction for sky subtraction.
+
Script Changes
--------------
@@ -25,14 +28,21 @@ Script Changes
Datamodel Changes
-----------------
+- A wavelength array is now stored for DataCube()
+
Under-the-hood Improvements
---------------------------
+- The CoAdd3D code has been refactored into a series of core modules and PypeIt-specific routines.
+
Bug Fixes
---------
- Fixed bug associated with finding more than one file with the same name (but
presumably different extensions).
+- Fixed differential atmospheric refraction (DAR) correction bug. This bug affected
+ datacubes combined using CoAdd3D(). Previously, the DAR was being computed, applied,
+ and then later overwritten. The new routine is faster and more accurate.
- Fixed a bug associated with an incorrect date for the transition to the Mark4
detector for Keck/LRIS RED.
diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py
new file mode 100644
index 0000000000..58b74265d3
--- /dev/null
+++ b/pypeit/coadd3d.py
@@ -0,0 +1,1381 @@
+"""
+Module containing routines used by 3D datacubes.
+
+.. include:: ../include/links.rst
+"""
+
+import os
+import copy
+import inspect
+
+from astropy import wcs, units
+from astropy.io import fits
+import erfa
+from scipy.interpolate import interp1d
+import numpy as np
+
+from pypeit import msgs
+from pypeit import alignframe, datamodel, flatfield, io, spec2dobj, utils
+from pypeit.core.flexure import calculate_image_phase
+from pypeit.core import datacube, extract, flux_calib, parse
+from pypeit.spectrographs.util import load_spectrograph
+
+from IPython import embed
+
+
+class DataCube(datamodel.DataContainer):
+ """
+ DataContainer to hold the products of a datacube
+
+ The datamodel attributes are:
+
+ .. include:: ../include/class_datamodel_datacube.rst
+
+ Args:
+ flux (`numpy.ndarray`_):
+ The science datacube (nwave, nspaxel_y, nspaxel_x)
+ sig (`numpy.ndarray`_):
+ The error datacube (nwave, nspaxel_y, nspaxel_x)
+ bpm (`numpy.ndarray`_):
+ The bad pixel mask of the datacube (nwave, nspaxel_y, nspaxel_x).
+ True values indicate a bad pixel
+ wave (`numpy.ndarray`_):
+ A 1D numpy array containing the wavelength array for convenience (nwave)
+ blaze_wave (`numpy.ndarray`_):
+ Wavelength array of the spectral blaze function
+ blaze_spec (`numpy.ndarray`_):
+ The spectral blaze function
+ sensfunc (`numpy.ndarray`_, None):
+ Sensitivity function (nwave,). Only saved if the data are fluxed.
+ PYP_SPEC (str):
+ Name of the PypeIt Spectrograph
+ fluxed (bool):
+ If the cube has been flux calibrated, this will be set to "True"
+
+ Attributes:
+ head0 (`astropy.io.fits.Header`_):
+ Primary header
+ filename (str):
+ Filename to use when loading from file
+ spect_meta (:obj:`dict`):
+ Parsed meta from the header
+ spectrograph (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`):
+ Build from PYP_SPEC
+ _ivar (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`):
+ Build from PYP_SPEC
+ _wcs (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`):
+ Build from PYP_SPEC
+
+ """
+ version = '1.2.0'
+
+ datamodel = {'flux': dict(otype=np.ndarray, atype=np.floating,
+ descr='Flux datacube in units of counts/s/Ang/arcsec^2 or '
+ '10^-17 erg/s/cm^2/Ang/arcsec^2'),
+ 'sig': dict(otype=np.ndarray, atype=np.floating,
+ descr='Error datacube (matches units of flux)'),
+ 'bpm': dict(otype=np.ndarray, atype=np.uint8,
+ descr='Bad pixel mask of the datacube (0=good, 1=bad)'),
+ 'wave': dict(otype=np.ndarray, atype=np.floating,
+ descr='Wavelength of each slice in the spectral direction. '
+ 'The units are Angstroms.'),
+ 'blaze_wave': dict(otype=np.ndarray, atype=np.floating,
+ descr='Wavelength array of the spectral blaze function'),
+ 'blaze_spec': dict(otype=np.ndarray, atype=np.floating,
+ descr='The spectral blaze function'),
+ 'sensfunc': dict(otype=np.ndarray, atype=np.floating,
+ descr='Sensitivity function 10^-17 erg/(counts/cm^2)'),
+ 'PYP_SPEC': dict(otype=str, descr='PypeIt: Spectrograph name'),
+ 'fluxed': dict(otype=bool, descr='Boolean indicating if the datacube is fluxed.')}
+
+ internals = ['head0',
+ 'filename',
+ 'spectrograph',
+ 'spect_meta',
+ '_ivar', # This is set internally, and should be accessed with self.ivar
+ '_wcs' # This is set internally, and should be accessed with self.wcs
+ ]
+
+ def __init__(self, flux, sig, bpm, wave, PYP_SPEC, blaze_wave, blaze_spec, sensfunc=None,
+ fluxed=None):
+
+ args, _, _, values = inspect.getargvalues(inspect.currentframe())
+ _d = dict([(k, values[k]) for k in args[1:]])
+ # Setup the DataContainer
+ datamodel.DataContainer.__init__(self, d=_d)
+ # Initialise the internals
+ self._ivar = None
+ self._wcs = None
+
+ def _bundle(self):
+ """
+ Over-write default _bundle() method to separate the DetectorContainer
+ into its own HDU
+
+ Returns:
+ :obj:`list`: A list of dictionaries, each list element is
+ written to its own fits extension. See the description
+ above.
+ """
+ d = []
+ # Rest of the datamodel
+ for key in self.keys():
+ # Skip Nones
+ if self[key] is None:
+ continue
+ # Array?
+ if self.datamodel[key]['otype'] == np.ndarray:
+ tmp = {}
+ if self.datamodel[key]['atype'] == np.floating:
+ tmp[key] = self[key].astype(np.float32)
+ else:
+ tmp[key] = self[key]
+ d.append(tmp)
+ else:
+ # Add to header of the primary image
+ d[0][key] = self[key]
+ # Return
+ return d
+
+ def to_file(self, ofile, primary_hdr=None, hdr=None, **kwargs):
+ """
+ Over-load :func:`~pypeit.datamodel.DataContainer.to_file`
+ to deal with the header
+
+ Args:
+ ofile (:obj:`str`):
+ Filename
+ primary_hdr (`astropy.io.fits.Header`_, optional):
+ Base primary header. Updated with new subheader keywords. If
+ None, initialized using :func:`~pypeit.io.initialize_header`.
+ wcs (`astropy.io.fits.Header`_, optional):
+ The World Coordinate System, represented by a fits header
+ kwargs (dict):
+ Keywords passed directly to parent ``to_file`` function.
+
+ """
+ if primary_hdr is None:
+ primary_hdr = io.initialize_header()
+ # Build the header
+ if self.head0 is not None and self.PYP_SPEC is not None:
+ spectrograph = load_spectrograph(self.PYP_SPEC)
+ subheader = spectrograph.subheader_for_spec(self.head0, self.head0)
+ else:
+ subheader = {}
+ # Add em in
+ for key in subheader:
+ primary_hdr[key] = subheader[key]
+ # Do it
+ super(DataCube, self).to_file(ofile, primary_hdr=primary_hdr, hdr=hdr, **kwargs)
+
+ @classmethod
+ def from_file(cls, ifile):
+ """
+ Over-load :func:`~pypeit.datamodel.DataContainer.from_file`
+ to deal with the header
+
+ Args:
+ ifile (str): Filename holding the object
+ """
+ with io.fits_open(ifile) as hdu:
+ # Read using the base class
+ self = super().from_hdu(hdu)
+ # Internals
+ self.filename = ifile
+ self.head0 = hdu[1].header # Actually use the first extension here, since it contains the WCS
+ # Meta
+ self.spectrograph = load_spectrograph(self.PYP_SPEC)
+ self.spect_meta = self.spectrograph.parse_spec_header(hdu[0].header)
+ self._ivar = None
+ self._wcs = None
+ return self
+
+ @property
+ def ivar(self):
+ """
+ Utility function to compute the inverse variance cube
+
+ Returns
+ -------
+ self._ivar : `numpy.ndarray`_
+ The inverse variance of the datacube. Note that self._ivar should
+ not be accessed directly, and you should only call self.ivar
+ """
+ if self._ivar is None:
+ self._ivar = utils.inverse(self.sig**2)
+ return self._ivar
+
+ @property
+ def wcs(self):
+ """
+ Utility function to provide the world coordinate system of the datacube
+
+ Returns
+ -------
+ self._wcs : `astropy.wcs.WCS`_
+ The WCS based on the stored header information. Note that self._wcs should
+ not be accessed directly, and you should only call self.wcs
+ """
+ if self._wcs is None:
+ self._wcs = wcs.WCS(self.head0)
+ return self._wcs
+
+
+class DARcorrection:
+ """
+ This class holds all of the functions needed to quickly compute the differential atmospheric refraction correction.
+ """
+ def __init__(self, airmass, parangle, pressure, temperature, humidity, cosdec, wave_ref=4500.0):
+ """
+ Args:
+ airmass (:obj:`float`):
+ The airmass of the observations (unitless)
+ parangle (:obj:`float`):
+ The parallactic angle of the observations (units=radians, relative to North, towards East is postive)
+ pressure (:obj:`float`):
+ The atmospheric pressure during the observations in Pascal. Valid range is from 10kPa - 140 kPa.
+ temperature (:obj:`float`):
+ Temperature in degree Celsius. Valid temperate range is -40 to
+ 100 degree Celsius.
+ humidity (:obj:`float`):
+ The humidity during the observations (Expressed as a percentage, not a fraction!).
+ Valid range is 0 to 100.
+ cosdec (:obj:`float`):
+ Cosine of the target declination.
+ wave_ref (:obj:`float`, optional):
+ Reference wavelength (The DAR correction will be performed relative to this wavelength)
+ """
+ msgs.info("Preparing the parameters for the DAR correction")
+
+ # Get DAR parameters
+ self.airmass = airmass # unitless
+ self.parangle = parangle
+ self.pressure = pressure * units.mbar
+ self.temperature = temperature * units.deg_C
+ self.humidity = humidity/100.0
+ self.wave_ref = wave_ref*units.Angstrom
+ self.cosdec = cosdec
+
+ # Calculate the coefficients of the correction
+ self.refa, self.refb = erfa.refco(self.pressure.to_value(units.hPa), self.temperature.to_value(units.deg_C),
+ self.humidity, self.wave_ref.to_value(units.micron))
+
+ # Print out the DAR parameters
+ msgs.info("DAR correction parameters:" + msgs.newline() +
+ " Airmass = {0:.2f}".format(self.airmass) + msgs.newline() +
+ " Pressure = {0:.2f} mbar".format(self.pressure.to_value(units.mbar)) + msgs.newline() +
+ " Humidity = {0:.2f} %".format(self.humidity*100.0) + msgs.newline() +
+ " Temperature = {0:.2f} deg C".format(self.temperature.to_value(units.deg_C)) + msgs.newline() +
+ " Reference wavelength = {0:.2f} Angstrom".format(self.wave_ref.to_value(units.Angstrom)))
+
+ def calculate_dispersion(self, waves):
+ """ Calculate the total atmospheric dispersion relative to the reference wavelength
+
+ Parameters
+ ----------
+ waves : `np.ndarray`_
+ 1D array of wavelengths (units must be Angstroms)
+
+ Returns
+ -------
+ full_dispersion : :obj:`float`
+ The atmospheric dispersion (in degrees) for each wavelength input
+ """
+
+ # Calculate the zenith angle
+ z = np.arccos(1.0/self.airmass)
+
+ # Calculate the coefficients of the correction
+ # self.refa, self.refb = erfa.refco(self.pressure.to_value(units.hPa), self.temperature.to_value(units.deg_C),
+ # self.humidity, self.wave_ref.to_value(units.micron))
+ cnsa, cnsb = erfa.refco(self.pressure.to_value(units.hPa), self.temperature.to_value(units.deg_C),
+ self.humidity, (waves*units.Angstrom).to_value(units.micron))
+ dar_full = np.rad2deg((self.refa-cnsa) * np.tan(z) + (self.refb-cnsb) * np.tan(z)**3)
+ return dar_full
+
+ def correction(self, waves):
+ """
+ Main routine that computes the DAR correction for both right ascension and declination.
+
+ Parameters
+ ----------
+ waves : `np.ndarray`_
+ 1D array of wavelengths (units must be Angstroms)
+
+ Returns
+ -------
+ ra_corr : `np.ndarray`_
+ The RA component of the atmospheric dispersion correction (in degrees) for each wavelength input.
+ dec_corr : `np.ndarray`_
+ The Dec component of the atmospheric dispersion correction (in degrees) for each wavelength input.
+ """
+ # Determine the correction angle
+ corr_ang = self.parangle - np.pi/2
+ # Calculate the full amount of refraction
+ dar_full = self.calculate_dispersion(waves)
+
+ # Calculate the correction in dec and RA for each detector pixel
+ # These numbers should be ADDED to the original RA and Dec values
+ ra_corr = (dar_full/self.cosdec)*np.cos(corr_ang)
+ dec_corr = -dar_full*np.sin(corr_ang)
+ return ra_corr, dec_corr
+
+
+class CoAdd3D:
+ """
+ Main routine to convert processed PypeIt spec2d frames into
+ DataCube (spec3d) files. This routine is only used for IFU
+ data reduction.
+
+ Algorithm steps are detailed in the coadd routine.
+ """
+ # Superclass factory method generates the subclass instance
+ @classmethod
+ def get_instance(cls, spec2dfiles, par, skysub_frame=None, scale_corr=None, ra_offsets=None, dec_offsets=None,
+ spectrograph=None, det=1, overwrite=False, show=False, debug=False):
+ """
+ Instantiate the subclass appropriate for the provided spectrograph.
+
+ The class to instantiate must match the ``pypeline``
+ attribute of the provided ``spectrograph``, and must be a
+ subclass of :class:`CoAdd3D`; see the parent class
+ instantiation for parameter descriptions.
+
+ Returns:
+ :class:`CoAdd3D`: One of the subclasses with
+ :class:`CoAdd3D` as its base.
+ """
+
+ return next(c for c in cls.__subclasses__()
+ if c.__name__ == (spectrograph.pypeline + 'CoAdd3D'))(
+ spec2dfiles, par, skysub_frame=skysub_frame, scale_corr=scale_corr, ra_offsets=ra_offsets,
+ dec_offsets=dec_offsets, spectrograph=spectrograph, det=det, overwrite=overwrite,
+ show=show, debug=debug)
+
+ def __init__(self, spec2dfiles, par, skysub_frame=None, scale_corr=None, ra_offsets=None, dec_offsets=None,
+ spectrograph=None, det=None, overwrite=False, show=False, debug=False):
+ """
+
+ Args:
+ spec2dfiles (:obj:`list`):
+ List of all spec2D files
+ par (:class:`~pypeit.par.pypeitpar.PypeItPar`):
+ An instance of the parameter set. If None, assumes that detector 1
+ is the one reduced and uses the default reduction parameters for the
+ spectrograph (see
+ :func:`~pypeit.spectrographs.spectrograph.Spectrograph.default_pypeit_par`
+ for the relevant spectrograph class).
+ skysub_frame (:obj:`list`, optional):
+ If not None, this should be a list of frames to use for the sky subtraction of each individual
+ entry of spec2dfiles. It should be the same length as spec2dfiles.
+ scale_corr (:obj:`list`, optional):
+ If not None, this should be a list of relative scale correction options. It should be the
+ same length as spec2dfiles.
+ ra_offsets (:obj:`list`, optional):
+ If not None, this should be a list of relative RA offsets of each frame. It should be the
+ same length as spec2dfiles.
+ dec_offsets (:obj:`list`, optional):
+ If not None, this should be a list of relative Dec offsets of each frame. It should be the
+ same length as spec2dfiles.
+ spectrograph (:obj:`str`, :class:`~pypeit.spectrographs.spectrograph.Spectrograph`, optional):
+ The name or instance of the spectrograph used to obtain the data.
+ If None, this is pulled from the file header.
+ det (:obj:`int`_, optional):
+ Detector index
+ overwrite (:obj:`bool`, optional):
+ Overwrite the output file, if it exists?
+ show (:obj:`bool`, optional):
+ Show results in ginga
+ debug (:obj:`bool`, optional):
+ Show QA for debugging.
+
+ """
+ # TODO :: Consider loading all calibrations into a single variable within the main CoAdd3D parent class.
+ self.spec2d = spec2dfiles
+ self.numfiles = len(spec2dfiles)
+ self.par = par
+ self.overwrite = overwrite
+ # Do some quick checks on the input options
+ if skysub_frame is not None:
+ if len(skysub_frame) != len(spec2dfiles):
+ msgs.error("The skysub_frame list should be identical length to the spec2dfiles list")
+ if scale_corr is not None:
+ if len(scale_corr) != len(spec2dfiles):
+ msgs.error("The scale_corr list should be identical length to the spec2dfiles list")
+ if ra_offsets is not None:
+ if len(ra_offsets) != len(spec2dfiles):
+ msgs.error("The ra_offsets list should be identical length to the spec2dfiles list")
+ if dec_offsets is not None:
+ if len(dec_offsets) != len(spec2dfiles):
+ msgs.error("The dec_offsets list should be identical length to the spec2dfiles list")
+ # Set the frame-specific options
+ self.skysub_frame = skysub_frame
+ self.scale_corr = scale_corr
+ self.ra_offsets = np.array(ra_offsets) if isinstance(ra_offsets, list) else ra_offsets
+ self.dec_offsets = np.array(dec_offsets) if isinstance(dec_offsets, list) else dec_offsets
+
+ # Check on Spectrograph input
+ if spectrograph is None:
+ with fits.open(spec2dfiles[0]) as hdu:
+ spectrograph = hdu[0].header['PYP_SPEC']
+
+ self.spec = load_spectrograph(spectrograph)
+ self.specname = self.spec.name
+
+ # Extract some parsets for simplicity
+ self.cubepar = self.par['reduce']['cube']
+ self.flatpar = self.par['calibrations']['flatfield']
+ self.senspar = self.par['sensfunc']
+
+ # Initialise arrays for storage
+ self.ifu_ra, self.ifu_dec = np.array([]), np.array([]) # The RA and Dec at the centre of the IFU, as stored in the header
+ self.all_ra, self.all_dec, self.all_wave = np.array([]), np.array([]), np.array([])
+ self.all_sci, self.all_ivar, self.all_idx, self.all_wghts = np.array([]), np.array([]), np.array([]), np.array([])
+ self.all_spatpos, self.all_specpos, self.all_spatid = np.array([], dtype=int), np.array([], dtype=int), np.array([], dtype=int)
+ self.all_tilts, self.all_slits, self.all_align = [], [], []
+ self.all_wcs, self.all_dar = [], []
+ self.weights = np.ones(self.numfiles) # Weights to use when combining cubes
+
+ self._dspat = None if self.cubepar['spatial_delta'] is None else self.cubepar['spatial_delta'] / 3600.0 # binning size on the sky (/3600 to convert to degrees)
+ self._dwv = self.cubepar['wave_delta'] # linear binning size in wavelength direction (in Angstroms)
+
+ # Extract some commonly used variables
+ self.method = self.cubepar['method']
+ self.combine = self.cubepar['combine']
+ self.align = self.cubepar['align']
+ # If there is only one frame being "combined" AND there's no reference image, then don't compute the translation.
+ if self.numfiles == 1 and self.cubepar["reference_image"] is None:
+ if self.align:
+ msgs.warn("Parameter 'align' should be False when there is only one frame and no reference image")
+ msgs.info("Setting 'align' to False")
+ self.align = False
+ if self.ra_offsets is not None:
+ if not self.align:
+ msgs.warn("When 'ra_offset' and 'dec_offset' are set, 'align' must be True.")
+ msgs.info("Setting 'align' to True")
+ self.align = True
+ # TODO :: The default behaviour (combine=False, align=False) produces a datacube that uses the instrument WCS
+ # It should be possible (and perhaps desirable) to do a spatial alignment (i.e. align=True), apply this to the
+ # RA,Dec values of each pixel, and then use the instrument WCS to save the output (or, just adjust the crval).
+ # At the moment, if the user wishes to spatially align the frames, a different WCS is generated.
+
+ # Determine what method is requested
+ self.spec_subpixel, self.spat_subpixel = 1, 1
+ if self.method == "subpixel":
+ msgs.info("Adopting the subpixel algorithm to generate the datacube.")
+ self.spec_subpixel, self.spat_subpixel = self.cubepar['spec_subpixel'], self.cubepar['spat_subpixel']
+ elif self.method == "ngp":
+ msgs.info("Adopting the nearest grid point (NGP) algorithm to generate the datacube.")
+ else:
+ msgs.error(f"The following datacube method is not allowed: {self.method}")
+
+ # Get the detector number and string representation
+ if det is None:
+ det = 1 if self.par['rdx']['detnum'] is None else self.par['rdx']['detnum']
+ self.detname = self.spec.get_det_name(det)
+
+ # Check if the output file exists
+ self.check_outputs()
+
+ # Check the reference cube and image exist, if requested
+ self.fluxcal = False
+ self.blaze_wave, self.blaze_spec = None, None
+ self.blaze_spline, self.flux_spline = None, None
+ self.flat_splines = dict() # A dictionary containing the splines of the flatfield
+ if self.cubepar['standard_cube'] is not None:
+ self.make_sensfunc()
+
+ # If a reference image has been set, check that it exists
+ if self.cubepar['reference_image'] is not None:
+ if not os.path.exists(self.cubepar['reference_image']):
+ msgs.error("Reference image does not exist:" + msgs.newline() + self.cubepar['reference_image'])
+
+ # Load the default scaleimg frame for the scale correction
+ self.scalecorr_default = "none"
+ self.relScaleImgDef = np.array([1])
+ self.set_default_scalecorr()
+
+ # Load the default sky frame to be used for sky subtraction
+ self.skysub_default = "image"
+ self.skyImgDef, self.skySclDef = None, None # This is the default behaviour (i.e. to use the "image" for the sky subtraction)
+ self.set_default_skysub()
+
+ def check_outputs(self):
+ """
+ Check if any of the intended output files already exist. This check should be done near the
+ beginning of the coaddition, to avoid any computation that won't be saved in the event that
+ files won't be overwritten.
+ """
+ if self.combine:
+ outfile = datacube.get_output_filename("", self.cubepar['output_filename'], self.combine)
+ out_whitelight = datacube.get_output_whitelight_filename(outfile)
+ if os.path.exists(outfile) and not self.overwrite:
+ msgs.error("Output filename already exists:"+msgs.newline()+outfile)
+ if os.path.exists(out_whitelight) and self.cubepar['save_whitelight'] and not self.overwrite:
+ msgs.error("Output filename already exists:"+msgs.newline()+out_whitelight)
+ else:
+ # Finally, if there's just one file, check if the output filename is given
+ if self.numfiles == 1 and self.cubepar['output_filename'] != "":
+ outfile = datacube.get_output_filename("", self.cubepar['output_filename'], True, -1)
+ out_whitelight = datacube.get_output_whitelight_filename(outfile)
+ if os.path.exists(outfile) and not self.overwrite:
+ msgs.error("Output filename already exists:" + msgs.newline() + outfile)
+ if os.path.exists(out_whitelight) and self.cubepar['save_whitelight'] and not self.overwrite:
+ msgs.error("Output filename already exists:" + msgs.newline() + out_whitelight)
+ else:
+ for ff in range(self.numfiles):
+ outfile = datacube.get_output_filename(self.spec2d[ff], self.cubepar['output_filename'], self.combine, ff+1)
+ out_whitelight = datacube.get_output_whitelight_filename(outfile)
+ if os.path.exists(outfile) and not self.overwrite:
+ msgs.error("Output filename already exists:" + msgs.newline() + outfile)
+ if os.path.exists(out_whitelight) and self.cubepar['save_whitelight'] and not self.overwrite:
+ msgs.error("Output filename already exists:" + msgs.newline() + out_whitelight)
+
+ def set_blaze_spline(self, wave_spl, spec_spl):
+ """
+ Generate a spline that represents the blaze function. This only needs to be done once,
+ because it is used as the reference blaze. It is only important if you are combining
+ frames that require a grating correction (i.e. have slightly different grating angles).
+
+ Args:
+ wave_spl (`numpy.ndarray`_):
+ 1D wavelength array where the blaze has been evaluated
+ spec_spl (`numpy.ndarray`_):
+ 1D array (same size as wave_spl), that represents the blaze function for each wavelength.
+ """
+ # Check if a reference blaze spline exists (either from a standard star if fluxing or from a previous
+ # exposure in this for loop)
+ if self.blaze_spline is None:
+ self.blaze_wave, self.blaze_spec = wave_spl, spec_spl
+ self.blaze_spline = interp1d(wave_spl, spec_spl, kind='linear',
+ bounds_error=False, fill_value="extrapolate")
+
+ def make_sensfunc(self):
+ """
+ Generate the sensitivity function to be used for the flux calibration.
+ """
+ self.fluxcal = True
+ # The first standard star cube is used as the reference blaze spline
+ if self.cubepar['grating_corr']:
+ # Load the blaze information
+ stdcube = fits.open(self.cubepar['standard_cube'])
+ # If a reference blaze spline has not been set, do that now.
+ self.set_blaze_spline(stdcube['BLAZE_WAVE'].data, stdcube['BLAZE_SPEC'].data)
+ # Generate a spline representation of the sensitivity function
+ self.flux_spline = datacube.make_sensfunc(self.cubepar['standard_cube'], self.senspar,
+ blaze_wave=self.blaze_wave, blaze_spline=self.blaze_spline,
+ grating_corr=self.cubepar['grating_corr'])
+
+ def set_default_scalecorr(self):
+ """
+ Set the default mode to use for relative spectral scale correction.
+ """
+ if self.cubepar['scale_corr'] is not None:
+ if self.cubepar['scale_corr'] == "image":
+ msgs.info("The default relative spectral illumination correction will use the science image")
+ self.scalecorr_default = "image"
+ else:
+ msgs.info("Loading default scale image for relative spectral illumination correction:" +
+ msgs.newline() + self.cubepar['scale_corr'])
+ try:
+ spec2DObj = spec2dobj.Spec2DObj.from_file(self.cubepar['scale_corr'], self.detname)
+ except Exception as e:
+ msgs.warn(f'Loading spec2d file raised {type(e).__name__}:\n{str(e)}')
+ msgs.warn("Could not load scaleimg from spec2d file:" + msgs.newline() +
+ self.cubepar['scale_corr'] + msgs.newline() +
+ "scale correction will not be performed unless you have specified the correct" + msgs.newline() +
+ "scale_corr file in the spec2d block")
+ self.cubepar['scale_corr'] = None
+ self.scalecorr_default = "none"
+ else:
+ self.relScaleImgDef = spec2DObj.scaleimg
+ self.scalecorr_default = self.cubepar['scale_corr']
+
+ def get_current_scalecorr(self, spec2DObj, scalecorr=None):
+ """
+ Determine the scale correction that should be used to correct
+ for the relative spectral scaling of the science frame
+
+ Args:
+ spec2DObj (:class:`~pypeit.spec2dobj.Spec2DObj`_):
+ 2D PypeIt spectra object.
+ scalecorr (:obj:`str`, optional):
+ A string that describes what mode should be used for the sky subtraction. The
+ allowed values are:
+
+ * default: Use the default value, as defined in self.set_default_scalecorr()
+ * image: Use the relative scale that was derived from the science frame
+ * none: Do not perform relative scale correction
+
+ Returns:
+ A tuple (this_scalecorr, relScaleImg) where this_scalecorr is a :obj:`str`_ that describes the
+ scale correction mode to be used (see scalecorr description) and relScaleImg is a `numpy.ndarray`_
+ (2D, same shape as science frame) containing the relative spectral scaling to apply to the science frame.
+ """
+ this_scalecorr = self.scalecorr_default
+ relScaleImg = self.relScaleImgDef.copy()
+ if scalecorr is not None:
+ if scalecorr.lower() == 'default':
+ if self.scalecorr_default == "image":
+ relScaleImg = spec2DObj.scaleimg
+ this_scalecorr = "image" # Use the current spec2d for the relative spectral illumination scaling
+ else:
+ this_scalecorr = self.scalecorr_default # Use the default value for the scale correction
+ elif scalecorr.lower() == 'image':
+ relScaleImg = spec2DObj.scaleimg
+ this_scalecorr = "image" # Use the current spec2d for the relative spectral illumination scaling
+ elif scalecorr.lower() == 'none':
+ relScaleImg = np.array([1])
+ this_scalecorr = "none" # Don't do relative spectral illumination scaling
+ else:
+ # Load a user specified frame for sky subtraction
+ msgs.info("Loading the following frame for the relative spectral illumination correction:" +
+ msgs.newline() + scalecorr)
+ try:
+ spec2DObj_scl = spec2dobj.Spec2DObj.from_file(scalecorr, self.detname)
+ except Exception as e:
+ msgs.warn(f'Loading spec2d file raised {type(e).__name__}:\n{str(e)}')
+ msgs.error("Could not load skysub image from spec2d file:" + msgs.newline() + scalecorr)
+ else:
+ relScaleImg = spec2DObj_scl.scaleimg
+ this_scalecorr = scalecorr
+ if this_scalecorr == "none":
+ msgs.info("Relative spectral illumination correction will not be performed.")
+ else:
+ msgs.info("Using the following frame for the relative spectral illumination correction:" +
+ msgs.newline() + this_scalecorr)
+ # Return the scaling correction for this frame
+ return this_scalecorr, relScaleImg
+
+ def set_default_skysub(self):
+ """
+ Set the default mode to use for sky subtraction.
+ """
+ if self.cubepar['skysub_frame'] in [None, 'none', '', 'None']:
+ self.skysub_default = "none"
+ self.skyImgDef = np.array([0.0]) # Do not perform sky subtraction
+ self.skySclDef = np.array([0.0]) # Do not perform sky subtraction
+ elif self.cubepar['skysub_frame'] == "image":
+ msgs.info("The sky model in the spec2d science frames will be used for sky subtraction" + msgs.newline() +
+ "(unless specific skysub frames have been specified)")
+ self.skysub_default = "image"
+ else:
+ msgs.info("Loading default image for sky subtraction:" +
+ msgs.newline() + self.cubepar['skysub_frame'])
+ try:
+ spec2DObj = spec2dobj.Spec2DObj.from_file(self.cubepar['skysub_frame'], self.detname)
+ skysub_exptime = self.spec.get_meta_value([spec2DObj.head0], 'exptime')
+ except:
+ msgs.error("Could not load skysub image from spec2d file:" + msgs.newline() + self.cubepar['skysub_frame'])
+ else:
+ self.skysub_default = self.cubepar['skysub_frame']
+ self.skyImgDef = spec2DObj.sciimg / skysub_exptime # Sky counts/second
+ # self.skyImgDef = spec2DObj.skymodel/skysub_exptime # Sky counts/second
+ self.skySclDef = spec2DObj.scaleimg
+
+ def get_current_skysub(self, spec2DObj, exptime, opts_skysub=None):
+ """
+ Determine the sky frame that should be used to subtract from the science frame
+
+ Args:
+ spec2DObj (:class:`~pypeit.spec2dobj.Spec2DObj`_):
+ 2D PypeIt spectra object.
+ exptime (:obj:`float`_):
+ The exposure time of the science frame (in seconds)
+ opts_skysub (:obj:`str`, optional):
+ A string that describes what mode should be used for the sky subtraction. The
+ allowed values are:
+ default - Use the default value, as defined in self.set_default_skysub()
+ image - Use the sky model derived from the science frame
+ none - Do not perform sky subtraction
+
+ Returns:
+ A tuple (this_skysub, skyImg, skyScl) where this_skysub is a :obj:`str`_ that describes the sky subtration
+ mode to be used (see opts_skysub description), skyImg is a `numpy.ndarray`_ (2D, same shape as science
+ frame) containing the sky frame to be subtracted from the science frame, and skyScl is a `numpy.ndarray`_
+ (2D, same shape as science frame) containing the relative spectral scaling that has been applied to the
+ returned sky frame.
+ """
+ this_skysub = self.skysub_default
+ if self.skysub_default == "image":
+ skyImg = spec2DObj.skymodel
+ skyScl = spec2DObj.scaleimg
+ else:
+ skyImg = self.skyImgDef.copy() * exptime
+ skyScl = self.skySclDef.copy()
+ # See if there's any changes from the default behaviour
+ if opts_skysub is not None:
+ if opts_skysub.lower() == 'default':
+ if self.skysub_default == "image":
+ skyImg = spec2DObj.skymodel
+ skyScl = spec2DObj.scaleimg
+ this_skysub = "image" # Use the current spec2d for sky subtraction
+ else:
+ skyImg = self.skyImgDef.copy() * exptime
+ skyScl = self.skySclDef.copy() * exptime
+ this_skysub = self.skysub_default # Use the global value for sky subtraction
+ elif opts_skysub.lower() == 'image':
+ skyImg = spec2DObj.skymodel
+ skyScl = spec2DObj.scaleimg
+ this_skysub = "image" # Use the current spec2d for sky subtraction
+ elif opts_skysub.lower() == 'none':
+ skyImg = np.array([0.0])
+ skyScl = np.array([1.0])
+ this_skysub = "none" # Don't do sky subtraction
+ else:
+ # Load a user specified frame for sky subtraction
+ msgs.info("Loading skysub frame:" + msgs.newline() + opts_skysub)
+ try:
+ spec2DObj_sky = spec2dobj.Spec2DObj.from_file(opts_skysub, self.detname)
+ skysub_exptime = self.spec.get_meta_value([spec2DObj_sky.head0], 'exptime')
+ except:
+ msgs.error("Could not load skysub image from spec2d file:" + msgs.newline() + opts_skysub)
+ skyImg = spec2DObj_sky.sciimg * exptime / skysub_exptime # Sky counts
+ skyScl = spec2DObj_sky.scaleimg
+ this_skysub = opts_skysub # User specified spec2d for sky subtraction
+ if this_skysub == "none":
+ msgs.info("Sky subtraction will not be performed.")
+ else:
+ msgs.info("Using the following frame for sky subtraction:" + msgs.newline() + this_skysub)
+ # Return the skysub params for this frame
+ return this_skysub, skyImg, skyScl
+
+ def add_grating_corr(self, flatfile, waveimg, slits, spat_flexure=None):
+ """
+ Calculate the relative spectral sensitivity correction due to grating shifts with the
+ input frames.
+
+ Parameters
+ ----------
+ flatfile : :obj:`str`
+ Unique path of a flatfield frame used to calculate the relative spectral sensitivity
+ of the corresponding science frame.
+ waveimg : `numpy.ndarray`_
+ 2D image (same shape as the science frame) indicating the wavelength of each detector pixel.
+ slits : :class:`pypeit.slittrace.SlitTraceSet`_):
+ Class containing information about the slits
+ spat_flexure: :obj:`float`, optional:
+ Spatial flexure in pixels
+ """
+ if flatfile not in self.flat_splines.keys():
+ msgs.info("Calculating relative sensitivity for grating correction")
+ # Check if the Flat file exists
+ if not os.path.exists(flatfile):
+ msgs.error("Grating correction requested, but the following file does not exist:" + msgs.newline() + flatfile)
+ # Load the Flat file
+ flatimages = flatfield.FlatImages.from_file(flatfile)
+ total_illum = flatimages.fit2illumflat(slits, finecorr=False, frametype='illum', initial=True, spat_flexure=spat_flexure) * \
+ flatimages.fit2illumflat(slits, finecorr=True, frametype='illum', initial=True, spat_flexure=spat_flexure)
+ flatframe = flatimages.pixelflat_raw / total_illum
+ if flatimages.pixelflat_spec_illum is None:
+ # Calculate the relative scale
+ scale_model = flatfield.illum_profile_spectral(flatframe, waveimg, slits,
+ slit_illum_ref_idx=self.flatpar['slit_illum_ref_idx'],
+ model=None,
+ skymask=None, trim=self.flatpar['slit_trim'],
+ flexure=spat_flexure,
+ smooth_npix=self.flatpar['slit_illum_smooth_npix'])
+ else:
+ msgs.info("Using relative spectral illumination from FlatImages")
+ scale_model = flatimages.pixelflat_spec_illum
+ # Extract a quick spectrum of the flatfield
+ wave_spl, spec_spl = extract.extract_hist_spectrum(waveimg, flatframe*utils.inverse(scale_model),
+ gpm=waveimg != 0, bins=slits.nspec)
+ # Store the result
+ self.flat_splines[flatfile] = interp1d(wave_spl, spec_spl, kind='linear', bounds_error=False, fill_value="extrapolate")
+ self.flat_splines[flatfile + "_wave"] = wave_spl.copy()
+ # Finally, if a reference blaze spline has not been set, do that now.
+ self.set_blaze_spline(wave_spl, spec_spl)
+
+ def run(self):
+ """
+ Main entry routine to set the order of operations to coadd the data. For specific
+ details of this procedure, see the child routines.
+ """
+ msgs.bug("This routine should be overridden by child classes.")
+ msgs.error("Cannot proceed without coding the run() routine.")
+
+
+class SlicerIFUCoAdd3D(CoAdd3D):
+ """
+ Child of CoAdd3D for SlicerIFU data reduction. For documentation, see CoAdd3d parent class above.
+
+ This child class of the IFU datacube creation performs the series of steps that are specific to
+ slicer-based IFUs, including the following steps
+
+ Data preparation:
+
+ * Loads individual spec2d files
+ * If requested, subtract the sky (either from a dedicated sky frame, or use the sky model stored in the science spec2d file)
+ * The sky regions near the spectral edges of the slits are masked
+ * Apply a relative spectral illumination correction (scalecorr) that registers all input frames to the scale illumination.
+ * Generate a WCS of each individual frame, and calculate the RA and DEC of each individual detector pixel
+ * Calculate the astrometric correction that is needed to align spatial positions along the slices
+ * Compute the differential atmospheric refraction correction
+ * Apply the extinction correction
+ * Apply a grating correction (gratcorr) - This corrects for the relative spectral efficiency of combining data taken with multiple different grating angles
+ * Flux calibrate
+
+ Data cube generation:
+
+ * If frames are not being combined, individual data cubes are generated and saved as a DataCube object. A white light image is also produced, if requested
+ * If frames are being aligned and/or combined, the following steps are followed:
+ - The output voxel sampling is computed (this must be consistent for all frames)
+ - Frames are aligned (either by user-specified offsets, or by a fancy cross-correlation)
+ - The relative weights to each for each detector pixel is computed
+ - If frames are not being combined, individual DataCube's will be generated for each frame
+ - If frames are being combined, a single DataCube will be generated.
+ - White light images are also produced, if requested.
+
+ """
+ def __init__(self, spec2dfiles, par, skysub_frame=None, scale_corr=None, ra_offsets=None, dec_offsets=None,
+ spectrograph=None, det=1, overwrite=False, show=False, debug=False):
+ super().__init__(spec2dfiles, par, skysub_frame=skysub_frame, scale_corr=scale_corr, ra_offsets=ra_offsets,
+ dec_offsets=dec_offsets, spectrograph=spectrograph, det=det, overwrite=overwrite,
+ show=show, debug=debug)
+ self.mnmx_wv = None # Will be used to store the minimum and maximum wavelengths of every slit and frame.
+ self._spatscale = np.zeros((self.numfiles, 2)) # index 0, 1 = pixel scale, slicer scale
+ self._specscale = np.zeros(self.numfiles)
+ # Loop through all of the frames, load the data, and save datacubes if no combining is required
+ self.load()
+
+ def get_alignments(self, spec2DObj, slits, spat_flexure=None):
+ """
+ Generate and return the spline interpolation fitting functions to be used for
+ the alignment frames, as part of the astrometric correction.
+
+ Parameters
+ ----------
+ spec2DObj : :class:`~pypeit.spec2dobj.Spec2DObj`_
+ 2D PypeIt spectra object.
+ slits : :class:`pypeit.slittrace.SlitTraceSet`_
+ Class containing information about the slits
+ spat_flexure: :obj:`float`, optional
+ Spatial flexure in pixels
+
+ Returns
+ -------
+ alignSplines : :class:`~pypeit.alignframe.AlignmentSplines`_
+ Alignment splines used for the astrometric correction
+ """
+ # Loading the alignments frame for these data
+ alignments = None
+ if self.cubepar['astrometric']:
+ key = alignframe.Alignments.calib_type.upper()
+ if key in spec2DObj.calibs:
+ alignfile = os.path.join(spec2DObj.calibs['DIR'], spec2DObj.calibs[key])
+ if os.path.exists(alignfile) and self.cubepar['astrometric']:
+ msgs.info("Loading alignments")
+ alignments = alignframe.Alignments.from_file(alignfile)
+ else:
+ msgs.warn(f'Processed alignment frame not recorded or not found!')
+ msgs.info("Using slit edges for astrometric transform")
+ else:
+ msgs.info("Using slit edges for astrometric transform")
+ # If nothing better was provided, use the slit edges
+ if alignments is None:
+ left, right, _ = slits.select_edges(initial=True, flexure=spat_flexure)
+ locations = [0.0, 1.0]
+ traces = np.append(left[:, None, :], right[:, None, :], axis=1)
+ else:
+ locations = self.par['calibrations']['alignment']['locations']
+ traces = alignments.traces
+ msgs.info("Generating alignment splines")
+ return alignframe.AlignmentSplines(traces, locations, spec2DObj.tilts)
+
+ def load(self):
+ """
+ This is the main function that loads in the data, and performs several frame-specific corrections.
+ If the user does not wish to align or combine the individual datacubes, then this routine will also
+ produce a spec3d file, which is a DataCube representation of a PypeIt spec2d frame for SlicerIFU data.
+
+ This function should be called in the __init__ method, and initialises multiple variables. The variables
+ initialised by this function include:
+
+ * self.ifu_ra - The RA of the IFU pointing
+ * self.ifu_dec - The Dec of the IFU pointing
+ * self.mnmx_wv - The minimum and maximum wavelengths of every slit and frame.
+ * self._spatscale - The native spatial scales of all spec2d frames.
+ * self._specscale - The native spectral scales of all spec2d frames.
+ * self.weights - Weights to use when combining cubes
+ * self.flat_splines - Spline representations of the blaze function (based on the illumflat).
+ * self.blaze_spline - Spline representation of the reference blaze function
+ * self.blaze_wave - Wavelength array used to construct the reference blaze function
+ * self.blaze_spec - Spectrum used to construct the reference blaze function
+
+ As well as the primary arrays that store the pixel information for multiple spec2d frames, including:
+
+ * self.all_ra
+ * self.all_dec
+ * self.all_wave
+ * self.all_sci
+ * self.all_ivar
+ * self.all_idx
+ * self.all_wghts
+ * self.all_spatpos
+ * self.all_specpos
+ * self.all_spatid
+ * self.all_tilts
+ * self.all_slits
+ * self.all_align
+ * self.all_dar
+ * self.all_wcs
+ """
+ # Load all spec2d files and prepare the data for making a datacube
+ for ff, fil in enumerate(self.spec2d):
+ # Load it up
+ msgs.info("Loading PypeIt spec2d frame:" + msgs.newline() + fil)
+ spec2DObj = spec2dobj.Spec2DObj.from_file(fil, self.detname)
+ detector = spec2DObj.detector
+ spat_flexure = None # spec2DObj.sci_spat_flexure
+
+ # Load the header
+ hdr0 = spec2DObj.head0
+ self.ifu_ra = np.append(self.ifu_ra, self.spec.compound_meta([hdr0], 'ra'))
+ self.ifu_dec = np.append(self.ifu_dec, self.spec.compound_meta([hdr0], 'dec'))
+
+ # Get the exposure time
+ exptime = self.spec.compound_meta([hdr0], 'exptime')
+
+ # Initialise the slit edges
+ msgs.info("Constructing slit image")
+ slits = spec2DObj.slits
+ slitid_img_init = slits.slit_img(pad=0, initial=True, flexure=spat_flexure)
+ slits_left, slits_right, _ = slits.select_edges(initial=True, flexure=spat_flexure)
+
+ # The order of operations below proceeds as follows:
+ # (1) Get science image
+ # (2) Subtract sky (note, if a joint fit has been performed, the relative scale correction is applied in the reduction!)
+ # (3) Apply relative scale correction to both science and ivar
+
+ # Set the default behaviour if a global skysub frame has been specified
+ this_skysub, skyImg, skyScl = self.get_current_skysub(spec2DObj, exptime,
+ opts_skysub=self.skysub_frame[ff])
+
+ # Load the relative scale image, if something other than the default has been provided
+ this_scalecorr, relScaleImg = self.get_current_scalecorr(spec2DObj,
+ scalecorr=self.scale_corr[ff])
+ # Prepare the relative scaling factors
+ relSclSky = skyScl / spec2DObj.scaleimg # This factor ensures the sky has the same relative scaling as the science frame
+ relScale = spec2DObj.scaleimg / relScaleImg # This factor is applied to the sky subtracted science frame
+
+ # Extract the relevant information from the spec2d file
+ sciImg = (spec2DObj.sciimg - skyImg * relSclSky) * relScale # Subtract sky and apply relative illumination
+ ivar = spec2DObj.ivarraw / relScale ** 2
+ waveimg = spec2DObj.waveimg
+ bpmmask = spec2DObj.bpmmask
+
+ # Mask the edges of the spectrum where the sky model is bad
+ sky_is_good = datacube.make_good_skymask(slitid_img_init, spec2DObj.tilts)
+
+ # TODO :: Really need to write some detailed information in the docs about all of the various corrections that can optionally be applied
+
+ # TODO :: Include a flexure correction from the sky frame? Note, you cannot use the waveimg from a sky frame,
+ # since the heliocentric correction may have been applied to the sky frame. Need to recalculate waveimg using
+ # the slitshifts from a skyimage, and then apply the vel_corr from the science image.
+
+ wnonzero = (waveimg != 0.0)
+ if not np.any(wnonzero):
+ msgs.error("The wavelength image contains only zeros - You need to check the data reduction.")
+ wave0 = waveimg[wnonzero].min()
+ # Calculate the delta wave in every pixel on the slit
+ waveimp = np.roll(waveimg, 1, axis=0)
+ waveimn = np.roll(waveimg, -1, axis=0)
+ dwaveimg = np.zeros_like(waveimg)
+ # All good pixels
+ wnz = np.where((waveimg != 0) & (waveimp != 0))
+ dwaveimg[wnz] = np.abs(waveimg[wnz] - waveimp[wnz])
+ # All bad pixels
+ wnz = np.where((waveimg != 0) & (waveimp == 0))
+ dwaveimg[wnz] = np.abs(waveimg[wnz] - waveimn[wnz])
+ # All endpoint pixels
+ dwaveimg[0, :] = np.abs(waveimg[0, :] - waveimn[0, :])
+ dwaveimg[-1, :] = np.abs(waveimg[-1, :] - waveimp[-1, :])
+ dwv = np.median(dwaveimg[dwaveimg != 0.0]) if self.cubepar['wave_delta'] is None else self.cubepar['wave_delta']
+
+ msgs.info("Using wavelength solution: wave0={0:.3f}, dispersion={1:.3f} Angstrom/pixel".format(wave0, dwv))
+
+ # Obtain the minimum and maximum wavelength of all slits
+ if self.mnmx_wv is None:
+ self.mnmx_wv = np.zeros((len(self.spec2d), slits.nslits, 2))
+ for slit_idx, slit_spat in enumerate(slits.spat_id):
+ onslit_init = (slitid_img_init == slit_spat)
+ self.mnmx_wv[ff, slit_idx, 0] = np.min(waveimg[onslit_init])
+ self.mnmx_wv[ff, slit_idx, 1] = np.max(waveimg[onslit_init])
+
+ # Find the largest spatial scale of all images being combined
+ # TODO :: probably need to put this in the DetectorContainer
+ pxscl = detector.platescale * parse.parse_binning(detector.binning)[1] / 3600.0 # This is degrees/pixel
+ slscl = self.spec.get_meta_value([spec2DObj.head0], 'slitwid')
+ self._spatscale[ff, 0] = pxscl
+ self._spatscale[ff, 1] = slscl
+ self._specscale[ff] = dwv
+
+ # If the spatial scale has been set by the user, check that it doesn't exceed the pixel or slicer scales
+ if self._dspat is not None:
+ if pxscl > self._dspat:
+ msgs.warn("Spatial scale requested ({0:f} arcsec) is less than the pixel scale ({1:f} arcsec)".format(
+ 3600.0 * self._dspat, 3600.0 * pxscl))
+ if slscl > self._dspat:
+ msgs.warn("Spatial scale requested ({0:f} arcsec) is less than the slicer scale ({1:f} arcsec)".format(
+ 3600.0 * self._dspat, 3600.0 * slscl))
+
+ # Construct a good pixel mask
+ # TODO: This should use the mask function to figure out which elements are masked.
+ onslit_gpm = (slitid_img_init > 0) & (bpmmask.mask == 0) & sky_is_good
+
+ # Grab the WCS of this frame
+ self.all_wcs.append(self.spec.get_wcs(spec2DObj.head0, slits, detector.platescale, wave0, dwv))
+
+ # Generate the alignment splines, and then retrieve images of the RA and Dec of every pixel,
+ # and the number of spatial pixels in each slit
+ alignSplines = self.get_alignments(spec2DObj, slits, spat_flexure=spat_flexure)
+ raimg, decimg, minmax = slits.get_radec_image(self.all_wcs[ff], alignSplines, spec2DObj.tilts,
+ initial=True, flexure=spat_flexure)
+
+ # Get copies of arrays to be saved
+ ra_ext = raimg[onslit_gpm]
+ dec_ext = decimg[onslit_gpm]
+ wave_ext = waveimg[onslit_gpm]
+ flux_ext = sciImg[onslit_gpm]
+ ivar_ext = ivar[onslit_gpm]
+ dwav_ext = dwaveimg[onslit_gpm]
+
+ # From here on out, work in sorted wavelengths
+ wvsrt = np.argsort(wave_ext)
+ wave_sort = wave_ext[wvsrt]
+ dwav_sort = dwav_ext[wvsrt]
+ ra_sort = ra_ext[wvsrt]
+ dec_sort = dec_ext[wvsrt]
+ # Here's an array to get back to the original ordering
+ resrt = np.argsort(wvsrt)
+
+ # Compute the DAR correction
+ cosdec = np.cos(np.mean(dec_sort) * np.pi / 180.0)
+ airmass = self.spec.get_meta_value([spec2DObj.head0], 'airmass') # unitless
+ parangle = self.spec.get_meta_value([spec2DObj.head0], 'parangle')
+ pressure = self.spec.get_meta_value([spec2DObj.head0], 'pressure') # units are pascals
+ temperature = self.spec.get_meta_value([spec2DObj.head0], 'temperature') # units are degrees C
+ humidity = self.spec.get_meta_value([spec2DObj.head0], 'humidity') # Expressed as a percentage (not a fraction!)
+ darcorr = DARcorrection(airmass, parangle, pressure, temperature, humidity, cosdec)
+
+ # Perform extinction correction
+ msgs.info("Applying extinction correction")
+ longitude = self.spec.telescope['longitude']
+ latitude = self.spec.telescope['latitude']
+ extinct = flux_calib.load_extinction_data(longitude, latitude, self.senspar['UVIS']['extinct_file'])
+ # extinction_correction requires the wavelength is sorted
+ extcorr_sort = flux_calib.extinction_correction(wave_sort * units.AA, airmass, extinct)
+
+ # Correct for sensitivity as a function of grating angle
+ # (this assumes the spectrum of the flatfield lamp has the same shape for all setups)
+ gratcorr_sort = 1.0
+ if self.cubepar['grating_corr']:
+ # Load the flatfield file
+ key = flatfield.FlatImages.calib_type.upper()
+ if key not in spec2DObj.calibs:
+ msgs.error('Processed flat calibration file not recorded by spec2d file!')
+ flatfile = os.path.join(spec2DObj.calibs['DIR'], spec2DObj.calibs[key])
+ # Setup the grating correction
+ self.add_grating_corr(flatfile, waveimg, slits, spat_flexure=spat_flexure)
+ # Calculate the grating correction
+ gratcorr_sort = datacube.correct_grating_shift(wave_sort, self.flat_splines[flatfile + "_wave"],
+ self.flat_splines[flatfile],
+ self.blaze_wave, self.blaze_spline)
+ # Sensitivity function
+ sensfunc_sort = 1.0
+ if self.fluxcal:
+ msgs.info("Calculating the sensitivity function")
+ sensfunc_sort = self.flux_spline(wave_sort)
+ # Convert the flux_sav to counts/s, correct for the relative sensitivity of different setups
+ extcorr_sort *= sensfunc_sort / (exptime * gratcorr_sort)
+ # Correct for extinction
+ flux_sort = flux_ext[wvsrt] * extcorr_sort
+ ivar_sort = ivar_ext[wvsrt] / extcorr_sort ** 2
+
+ # Convert units to Counts/s/Ang/arcsec2
+ # Slicer sampling * spatial pixel sampling
+ sl_deg = np.sqrt(self.all_wcs[ff].wcs.cd[0, 0] ** 2 + self.all_wcs[ff].wcs.cd[1, 0] ** 2)
+ px_deg = np.sqrt(self.all_wcs[ff].wcs.cd[1, 1] ** 2 + self.all_wcs[ff].wcs.cd[0, 1] ** 2)
+ scl_units = dwav_sort * (3600.0 * sl_deg) * (3600.0 * px_deg)
+ flux_sort /= scl_units
+ ivar_sort *= scl_units ** 2
+
+ # Calculate the weights relative to the zeroth cube
+ self.weights[ff] = 1.0 # exptime #np.median(flux_sav[resrt]*np.sqrt(ivar_sav[resrt]))**2
+
+ # Get the slit image and then unset pixels in the slit image that are bad
+ this_specpos, this_spatpos = np.where(onslit_gpm)
+ this_spatid = slitid_img_init[onslit_gpm]
+
+ # If individual frames are to be output without aligning them,
+ # there's no need to store information, just make the cubes now
+ numpix = ra_sort.size
+ if not self.combine and not self.align:
+ # Get the output filename
+ if self.numfiles == 1 and self.cubepar['output_filename'] != "":
+ outfile = datacube.get_output_filename("", self.cubepar['output_filename'], True, -1)
+ else:
+ outfile = datacube.get_output_filename(fil, self.cubepar['output_filename'], self.combine, ff + 1)
+ # Get the coordinate bounds
+ slitlength = int(np.round(np.median(slits.get_slitlengths(initial=True, median=True))))
+ numwav = int((np.max(waveimg) - wave0) / dwv)
+ bins = self.spec.get_datacube_bins(slitlength, minmax, numwav)
+ # Generate the output WCS for the datacube
+ tmp_crval_wv = (self.all_wcs[ff].wcs.crval[2] * self.all_wcs[ff].wcs.cunit[2]).to(units.Angstrom).value
+ tmp_cd_wv = (self.all_wcs[ff].wcs.cd[2,2] * self.all_wcs[ff].wcs.cunit[2]).to(units.Angstrom).value
+ crval_wv = self.cubepar['wave_min'] if self.cubepar['wave_min'] is not None else tmp_crval_wv
+ cd_wv = self.cubepar['wave_delta'] if self.cubepar['wave_delta'] is not None else tmp_cd_wv
+ output_wcs = self.spec.get_wcs(spec2DObj.head0, slits, detector.platescale, crval_wv, cd_wv)
+ # Set the wavelength range of the white light image.
+ wl_wvrng = None
+ if self.cubepar['save_whitelight']:
+ wl_wvrng = datacube.get_whitelight_range(np.max(self.mnmx_wv[ff, :, 0]),
+ np.min(self.mnmx_wv[ff, :, 1]),
+ self.cubepar['whitelight_range'])
+ # Make the datacube
+ if self.method in ['subpixel', 'ngp']:
+ # Generate the datacube
+ flxcube, sigcube, bpmcube, wave = \
+ datacube.generate_cube_subpixel(outfile, output_wcs, ra_sort[resrt], dec_sort[resrt], wave_sort[resrt],
+ flux_sort[resrt], ivar_sort[resrt], np.ones(numpix),
+ this_spatpos, this_specpos, this_spatid,
+ spec2DObj.tilts, slits, alignSplines, darcorr, bins, all_idx=None,
+ overwrite=self.overwrite, whitelight_range=wl_wvrng,
+ spec_subpixel=self.spec_subpixel, spat_subpixel=self.spat_subpixel)
+ # Prepare the header
+ hdr = output_wcs.to_header()
+ if self.fluxcal:
+ hdr['FLUXUNIT'] = (flux_calib.PYPEIT_FLUX_SCALE, "Flux units -- erg/s/cm^2/Angstrom/arcsec^2")
+ else:
+ hdr['FLUXUNIT'] = (1, "Flux units -- counts/s/Angstrom/arcsec^2")
+ # Write out the datacube
+ msgs.info("Saving datacube as: {0:s}".format(outfile))
+ final_cube = DataCube(flxcube, sigcube, bpmcube, wave, self.specname, self.blaze_wave, self.blaze_spec,
+ sensfunc=None, fluxed=self.fluxcal)
+ final_cube.to_file(outfile, hdr=hdr, overwrite=self.overwrite)
+ # No need to proceed and store arrays - we are writing individual datacubes
+ continue
+
+ # Store the information if we are combining multiple frames
+ self.all_ra = np.append(self.all_ra, ra_sort[resrt])
+ self.all_dec = np.append(self.all_dec, dec_sort[resrt])
+ self.all_wave = np.append(self.all_wave, wave_sort[resrt])
+ self.all_sci = np.append(self.all_sci, flux_sort[resrt])
+ self.all_ivar = np.append(self.all_ivar, ivar_sort[resrt].copy())
+ self.all_idx = np.append(self.all_idx, ff * np.ones(numpix))
+ self.all_wghts = np.append(self.all_wghts, self.weights[ff] * np.ones(numpix) / self.weights[0])
+ self.all_spatpos = np.append(self.all_spatpos, this_spatpos)
+ self.all_specpos = np.append(self.all_specpos, this_specpos)
+ self.all_spatid = np.append(self.all_spatid, this_spatid)
+ self.all_tilts.append(spec2DObj.tilts)
+ self.all_slits.append(slits)
+ self.all_align.append(alignSplines)
+ self.all_dar.append(darcorr)
+
+ def run_align(self):
+ """
+ This routine aligns multiple cubes by using manual input offsets or by cross-correlating white light images.
+
+ Returns:
+ `numpy.ndarray`_: A new set of RA values that have been aligned
+ `numpy.ndarray`_: A new set of Dec values that has been aligned
+ """
+ # Grab cos(dec) for convenience
+ cosdec = np.cos(np.mean(self.all_dec) * np.pi / 180.0)
+ # Register spatial offsets between all frames
+ if self.ra_offsets is not None:
+ # Calculate the offsets
+ new_ra, new_dec = datacube.align_user_offsets(self.all_ra, self.all_dec, self.all_idx,
+ self.ifu_ra, self.ifu_dec,
+ self.ra_offsets, self.dec_offsets)
+ else:
+ new_ra, new_dec = self.all_ra.copy(), self.all_dec.copy()
+ # Find the wavelength range where all frames overlap
+ min_wl, max_wl = datacube.get_whitelight_range(np.max(self.mnmx_wv[:, :, 0]), # The max blue wavelength
+ np.min(self.mnmx_wv[:, :, 1]), # The min red wavelength
+ self.cubepar['whitelight_range']) # The user-specified values (if any)
+ # Get the good whitelight pixels
+ ww, wavediff = datacube.get_whitelight_pixels(self.all_wave, min_wl, max_wl)
+ # Iterate over white light image generation and spatial shifting
+ numiter = 2
+ for dd in range(numiter):
+ msgs.info(f"Iterating on spatial translation - ITERATION #{dd+1}/{numiter}")
+ ref_idx = None # Don't use an index - This is the default behaviour when a reference image is supplied
+ # Generate the WCS
+ image_wcs, voxedge, reference_image = \
+ datacube.create_wcs(new_ra[ww], new_dec[ww], self.all_wave[ww], self._dspat, wavediff,
+ ra_min=self.cubepar['ra_min'], ra_max=self.cubepar['ra_max'],
+ dec_min=self.cubepar['dec_min'], dec_max=self.cubepar['dec_max'],
+ wave_min=self.cubepar['wave_min'], wave_max=self.cubepar['wave_max'],
+ reference=self.cubepar['reference_image'], collapse=True, equinox=2000.0,
+ specname=self.specname)
+ if voxedge[2].size != 2:
+ msgs.error("Spectral range for WCS is incorrect for white light image")
+
+ wl_imgs = datacube.generate_image_subpixel(image_wcs, new_ra[ww], new_dec[ww], self.all_wave[ww],
+ self.all_sci[ww], self.all_ivar[ww], self.all_wghts[ww],
+ self.all_spatpos[ww], self.all_specpos[ww], self.all_spatid[ww],
+ self.all_tilts, self.all_slits, self.all_align, self.all_dar,
+ voxedge, all_idx=self.all_idx[ww],
+ spec_subpixel=self.spec_subpixel, spat_subpixel=self.spat_subpixel)
+ if reference_image is None:
+ # ref_idx will be the index of the cube with the highest S/N
+ ref_idx = np.argmax(self.weights)
+ reference_image = wl_imgs[:, :, ref_idx].copy()
+ msgs.info("Calculating spatial translation of each cube relative to cube #{0:d})".format(ref_idx+1))
+ else:
+ msgs.info("Calculating the spatial translation of each cube relative to user-defined 'reference_image'")
+
+ # Calculate the image offsets relative to the reference image
+ for ff in range(self.numfiles):
+ # Calculate the shift
+ ra_shift, dec_shift = calculate_image_phase(reference_image.copy(), wl_imgs[:, :, ff], maskval=0.0)
+ # Convert pixel shift to degrees shift
+ ra_shift *= self._dspat/cosdec
+ dec_shift *= self._dspat
+ msgs.info("Spatial shift of cube #{0:d}:".format(ff + 1) + msgs.newline() +
+ "RA, DEC (arcsec) = {0:+0.3f} E, {1:+0.3f} N".format(ra_shift*3600.0, dec_shift*3600.0))
+ # Apply the shift
+ new_ra[self.all_idx == ff] += ra_shift
+ new_dec[self.all_idx == ff] += dec_shift
+ return new_ra, new_dec
+
+ def compute_weights(self):
+ """
+ Compute the relative weights to apply to pixels that are collected into the voxels of the output DataCubes
+
+ Returns:
+ `numpy.ndarray`_: The individual pixel weights for each detector pixel, and every frame.
+ """
+ # Calculate the relative spectral weights of all pixels
+ return np.ones_like(self.all_sci) if self.numfiles == 1 else \
+ datacube.compute_weights_frompix(self.all_ra, self.all_dec, self.all_wave, self.all_sci, self.all_ivar,
+ self.all_idx, self._dspat, self._dwv, self.mnmx_wv, self.all_wghts,
+ self.all_spatpos, self.all_specpos, self.all_spatid,
+ self.all_tilts, self.all_slits, self.all_align, self.all_dar,
+ ra_min=self.cubepar['ra_min'], ra_max=self.cubepar['ra_max'],
+ dec_min=self.cubepar['dec_min'], dec_max=self.cubepar['dec_max'],
+ wave_min=self.cubepar['wave_min'], wave_max=self.cubepar['wave_max'],
+ relative_weights=self.cubepar['relative_weights'],
+ whitelight_range=self.cubepar['whitelight_range'],
+ reference_image=self.cubepar['reference_image'],
+ specname=self.specname)
+
+ def run(self):
+ """
+ This is the main routine called to convert PypeIt spec2d files into PypeIt DataCube objects. It is specific
+ to the SlicerIFU data.
+
+ First the data are loaded and several corrections are made. These include:
+
+ * A sky frame or model is subtracted from the science data, and the relative spectral illumination
+ of different slices is corrected.
+ * A mask of good pixels is identified
+ * A common spaxel scale is determined, and the astrometric correction is derived
+ * An RA and Dec image is created for each pixel.
+ * Based on atmospheric conditions, a differential atmospheric refraction correction is applied.
+ * Extinction correction
+ * Flux calibration (optional - this calibration is only applied if a standard star cube is supplied)
+
+ If the input frames will not be combined (combine=False) if they won't be aligned (align=False), then
+ each individual spec2d file is converted into a spec3d file (i.e. a PypeIt DataCube object). These fits
+ files can be loaded/viewed in other software packages to display or combine multiple datacubes into a
+ single datacube. However, note that different software packages use combination algorithms that may not
+ conserve flux, or may produce covariance between adjacent voxels.
+
+ If the user wishes to either spatially align multiple exposures (align=True) or combine multiple
+ exposures (combine=True), then the next set of operations include:
+
+ * Generate white light images of each individual cube (according to a user-specified wavelength range)
+ * Align multiple frames if align=True (either manually by user input, or automatically by cross-correlation)
+ * Create the output WCS, and apply the flux calibration to the data
+ * Generate individual datacubes (combine=False) or one master datacube containing all exposures (combine=True).
+ Note, there are several algorithms used to combine multiple frames. Refer to the subpixellate() routine for
+ more details about the combination options.
+ """
+ # No need to continue if we are not combining nor aligning frames
+ if not self.combine and not self.align:
+ return
+
+ # If the user is aligning or combining, the spatial scale of the output cubes needs to be consistent.
+ # Set the spatial and spectral scales of the output datacube
+ self._dspat, self._dwv = datacube.set_voxel_sampling(self._spatscale, self._specscale,
+ dspat=self._dspat, dwv=self._dwv)
+
+ # Align the frames
+ if self.align:
+ self.all_ra, self.all_dec = self.run_align()
+
+ # Compute the relative weights on the spectra
+ self.all_wghts = self.compute_weights()
+
+ # Generate the WCS, and the voxel edges
+ cube_wcs, vox_edges, _ = \
+ datacube.create_wcs(self.all_ra, self.all_dec, self.all_wave, self._dspat, self._dwv,
+ ra_min=self.cubepar['ra_min'], ra_max=self.cubepar['ra_max'],
+ dec_min=self.cubepar['dec_min'], dec_max=self.cubepar['dec_max'],
+ wave_min=self.cubepar['wave_min'], wave_max=self.cubepar['wave_max'],
+ reference=self.cubepar['reference_image'], collapse=False, equinox=2000.0,
+ specname=self.specname)
+
+ sensfunc = None
+ if self.flux_spline is not None:
+ # Get wavelength of each pixel
+ numwav = vox_edges[2].size - 1
+ wcs_scale = (1.0 * cube_wcs.spectral.wcs.cunit[0]).to(units.Angstrom).value # Ensures the WCS is in Angstroms
+ senswave = wcs_scale * cube_wcs.spectral.wcs_pix2world(np.arange(numwav), 0)[0]
+ sensfunc = self.flux_spline(senswave)
+
+ # Generate a datacube
+ if self.method in ['subpixel', 'ngp']:
+ # Generate the datacube
+ wl_wvrng = None
+ if self.cubepar['save_whitelight']:
+ wl_wvrng = datacube.get_whitelight_range(np.max(self.mnmx_wv[:, :, 0]),
+ np.min(self.mnmx_wv[:, :, 1]),
+ self.cubepar['whitelight_range'])
+ if self.combine:
+ outfile = datacube.get_output_filename("", self.cubepar['output_filename'], True, -1)
+ # Generate the datacube
+ flxcube, sigcube, bpmcube, wave = \
+ datacube.generate_cube_subpixel(outfile, cube_wcs, self.all_ra, self.all_dec, self.all_wave,
+ self.all_sci, self.all_ivar, np.ones(self.all_wghts.size),
+ self.all_spatpos, self.all_specpos, self.all_spatid,
+ self.all_tilts, self.all_slits, self.all_align, self.all_dar, vox_edges,
+ all_idx=self.all_idx, overwrite=self.overwrite, whitelight_range=wl_wvrng,
+ spec_subpixel=self.spec_subpixel, spat_subpixel=self.spat_subpixel)
+ # Prepare the header
+ hdr = cube_wcs.to_header()
+ if self.fluxcal:
+ hdr['FLUXUNIT'] = (flux_calib.PYPEIT_FLUX_SCALE, "Flux units -- erg/s/cm^2/Angstrom/arcsec^2")
+ else:
+ hdr['FLUXUNIT'] = (1, "Flux units -- counts/s/Angstrom/arcsec^2")
+ # Write out the datacube
+ msgs.info("Saving datacube as: {0:s}".format(outfile))
+ final_cube = DataCube(flxcube, sigcube, bpmcube, wave, self.specname, self.blaze_wave, self.blaze_spec,
+ sensfunc=sensfunc, fluxed=self.fluxcal)
+ final_cube.to_file(outfile, hdr=hdr, overwrite=self.overwrite)
+ else:
+ for ff in range(self.numfiles):
+ outfile = datacube.get_output_filename("", self.cubepar['output_filename'], False, ff)
+ ww = np.where(self.all_idx == ff)
+ # Generate the datacube
+ flxcube, sigcube, bpmcube, wave = \
+ datacube.generate_cube_subpixel(outfile, cube_wcs, self.all_ra[ww], self.all_dec[ww], self.all_wave[ww],
+ self.all_sci[ww], self.all_ivar[ww], np.ones(ww[0].size),
+ self.all_spatpos[ww], self.all_specpos[ww], self.all_spatid[ww],
+ self.all_tilts[ff], self.all_slits[ff], self.all_align[ff], self.all_dar[ff], vox_edges,
+ all_idx=self.all_idx[ww], overwrite=self.overwrite, whitelight_range=wl_wvrng,
+ spec_subpixel=self.spec_subpixel, spat_subpixel=self.spat_subpixel)
+ # Prepare the header
+ hdr = cube_wcs.to_header()
+ if self.fluxcal:
+ hdr['FLUXUNIT'] = (flux_calib.PYPEIT_FLUX_SCALE, "Flux units -- erg/s/cm^2/Angstrom/arcsec^2")
+ else:
+ hdr['FLUXUNIT'] = (1, "Flux units -- counts/s/Angstrom/arcsec^2")
+ # Write out the datacube
+ msgs.info("Saving datacube as: {0:s}".format(outfile))
+ final_cube = DataCube(flxcube, sigcube, bpmcube, wave, self.specname, self.blaze_wave, self.blaze_spec,
+ sensfunc=sensfunc, fluxed=self.fluxcal)
+ final_cube.to_file(outfile, hdr=hdr, overwrite=self.overwrite)
diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py
index 8cf86650b6..a7391b1c50 100644
--- a/pypeit/core/datacube.py
+++ b/pypeit/core/datacube.py
@@ -5,8 +5,6 @@
"""
import os
-import copy
-import inspect
from astropy import wcs, units
from astropy.coordinates import AltAz, SkyCoord
@@ -16,179 +14,98 @@
import numpy as np
from pypeit import msgs
-from pypeit import alignframe, datamodel, flatfield, io, specobj, spec2dobj, utils
-from pypeit.core.flexure import calculate_image_phase
-from pypeit.core import coadd, extract, findobj_skymask, flux_calib, parse, skysub
-from pypeit.core.procimg import grow_mask
-from pypeit.spectrographs.util import load_spectrograph
+from pypeit import utils
+from pypeit.core import coadd, flux_calib
# Use a fast histogram for speed!
-try:
- from fast_histogram import histogramdd
-except ImportError:
- histogramdd = None
+from fast_histogram import histogramdd
from IPython import embed
-class DataCube(datamodel.DataContainer):
+def gaussian2D(tup, intflux, xo, yo, sigma_x, sigma_y, theta, offset):
"""
- DataContainer to hold the products of a datacube
+ Fit a 2D Gaussian function to an image.
- The datamodel attributes are:
+ Args:
+ tup (:obj:`tuple`):
+ A two element tuple containing the x and y coordinates of each pixel
+ in the image
+ intflux (float):
+ The Integrated flux of the 2D Gaussian
+ xo (float):
+ The centre of the Gaussian along the x-coordinate when z=0
+ yo (float):
+ The centre of the Gaussian along the y-coordinate when z=0
+ sigma_x (float):
+ The standard deviation in the x-direction
+ sigma_y (float):
+ The standard deviation in the y-direction
+ theta (float):
+ The orientation angle of the 2D Gaussian
+ offset (float):
+ Constant offset
- .. include:: ../include/class_datamodel_datacube.rst
+ Returns:
+ `numpy.ndarray`_: The 2D Gaussian evaluated at the coordinate (x, y)
+ """
+ # Extract the (x, y, z) coordinates of each pixel from the tuple
+ (x, y) = tup
+ # Ensure these are floating point
+ xo = float(xo)
+ yo = float(yo)
+ # Account for a rotated 2D Gaussian
+ a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
+ b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
+ c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
+ # Normalise so that the integrated flux is a parameter, instead of the amplitude
+ norm = 1/(2*np.pi*np.sqrt(a*c-b*b))
+ gtwod = offset + norm*intflux*np.exp(-(a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) + c*((y-yo)**2)))
+ return gtwod.ravel()
- Args:
- flux (`numpy.ndarray`_):
- The science datacube (nwave, nspaxel_y, nspaxel_x)
- sig (`numpy.ndarray`_):
- The error datacube (nwave, nspaxel_y, nspaxel_x)
- bpm (`numpy.ndarray`_):
- The bad pixel mask of the datacube (nwave, nspaxel_y, nspaxel_x).
- True values indicate a bad pixel
- blaze_wave (`numpy.ndarray`_):
- Wavelength array of the spectral blaze function
- blaze_spec (`numpy.ndarray`_):
- The spectral blaze function
- sensfunc (`numpy.ndarray`_, None):
- Sensitivity function (nwave,). Only saved if the data are fluxed.
- PYP_SPEC (str):
- Name of the PypeIt Spectrograph
- fluxed (bool):
- If the cube has been flux calibrated, this will be set to "True"
-
- Attributes:
- head0 (`astropy.io.fits.Header`_):
- Primary header
- filename (str):
- Filename to use when loading from file
- spect_meta (:obj:`dict`):
- Parsed meta from the header
- spectrograph (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`):
- Build from PYP_SPEC
+def fitGaussian2D(image, norm=False):
"""
- version = '1.1.0'
-
- datamodel = {'flux': dict(otype=np.ndarray, atype=np.floating,
- descr='Flux datacube in units of counts/s/Ang/arcsec^2 or '
- '10^-17 erg/s/cm^2/Ang/arcsec^2'),
- 'sig': dict(otype=np.ndarray, atype=np.floating,
- descr='Error datacube (matches units of flux)'),
- 'bpm': dict(otype=np.ndarray, atype=np.uint8,
- descr='Bad pixel mask of the datacube (0=good, 1=bad)'),
- 'blaze_wave': dict(otype=np.ndarray, atype=np.floating,
- descr='Wavelength array of the spectral blaze function'),
- 'blaze_spec': dict(otype=np.ndarray, atype=np.floating,
- descr='The spectral blaze function'),
- 'sensfunc': dict(otype=np.ndarray, atype=np.floating,
- descr='Sensitivity function 10^-17 erg/(counts/cm^2)'),
- 'PYP_SPEC': dict(otype=str, descr='PypeIt: Spectrograph name'),
- 'fluxed': dict(otype=bool, descr='Boolean indicating if the datacube is fluxed.')}
-
- internals = ['head0',
- 'filename',
- 'spectrograph',
- 'spect_meta'
- ]
-
- def __init__(self, flux, sig, bpm, PYP_SPEC, blaze_wave, blaze_spec, sensfunc=None,
- fluxed=None):
-
- args, _, _, values = inspect.getargvalues(inspect.currentframe())
- _d = dict([(k, values[k]) for k in args[1:]])
- # Setup the DataContainer
- datamodel.DataContainer.__init__(self, d=_d)
-
- def _bundle(self):
- """
- Over-write default _bundle() method to separate the DetectorContainer
- into its own HDU
-
- Returns:
- :obj:`list`: A list of dictionaries, each list element is
- written to its own fits extension. See the description
- above.
- """
- d = []
- # Rest of the datamodel
- for key in self.keys():
- # Skip Nones
- if self[key] is None:
- continue
- # Array?
- if self.datamodel[key]['otype'] == np.ndarray:
- tmp = {}
- if self.datamodel[key]['atype'] == np.floating:
- tmp[key] = self[key].astype(np.float32)
- else:
- tmp[key] = self[key]
- d.append(tmp)
- else:
- # Add to header of the primary image
- d[0][key] = self[key]
- # Return
- return d
-
- def to_file(self, ofile, primary_hdr=None, hdr=None, **kwargs):
- """
- Over-load :func:`~pypeit.datamodel.DataContainer.to_file`
- to deal with the header
-
- Args:
- ofile (:obj:`str`):
- Filename
- primary_hdr (`astropy.io.fits.Header`_, optional):
- Base primary header. Updated with new subheader keywords. If
- None, initialized using :func:`~pypeit.io.initialize_header`.
- wcs (`astropy.io.fits.Header`_, optional):
- The World Coordinate System, represented by a fits header
- kwargs (dict):
- Keywords passed directly to parent ``to_file`` function.
-
- """
- if primary_hdr is None:
- primary_hdr = io.initialize_header()
- # Build the header
- if self.head0 is not None and self.PYP_SPEC is not None:
- spectrograph = load_spectrograph(self.PYP_SPEC)
- subheader = spectrograph.subheader_for_spec(self.head0, self.head0)
- else:
- subheader = {}
- # Add em in
- for key in subheader:
- primary_hdr[key] = subheader[key]
- # Do it
- super(DataCube, self).to_file(ofile, primary_hdr=primary_hdr, hdr=hdr, **kwargs)
-
- @classmethod
- def from_file(cls, ifile):
- """
- Over-load :func:`~pypeit.datamodel.DataContainer.from_file`
- to deal with the header
-
- Args:
- ifile (str): Filename holding the object
- """
- with io.fits_open(ifile) as hdu:
- # Read using the base class
- self = super().from_hdu(hdu)
- # Internals
- self.filename = ifile
- self.head0 = hdu[1].header # Actually use the first extension here, since it contains the WCS
- # Meta
- self.spectrograph = load_spectrograph(self.PYP_SPEC)
- self.spect_meta = self.spectrograph.parse_spec_header(hdu[0].header)
- return self
-
- @property
- def ivar(self):
- return utils.inverse(self.sig**2)
-
- @property
- def wcs(self):
- return wcs.WCS(self.head0)
+ Fit a 2D Gaussian to an input image. It is recommended that the input image
+ is scaled to a maximum value that is ~1, so that all fit parameters are of
+ the same order of magnitude. Set norm=True if you do not care about the
+ amplitude or integrated flux. Otherwise, make sure you scale the image by
+ a known value prior to passing it into this function.
+
+ Parameters
+ ----------
+ image : `numpy.ndarray`_
+ A 2D input image
+ norm : bool, optional
+ If True, the input image will be normalised to the maximum value
+ of the input image.
+
+ Returns
+ -------
+ popt : `numpy.ndarray`_
+ The optimum parameters of the Gaussian in the following order: Integrated
+ flux, x center, y center, sigma_x, sigma_y, theta, offset. See
+ :func:`~pypeit.core.datacube.gaussian2D` for a more detailed description
+ of the model.
+ pcov : `numpy.ndarray`_
+ Corresponding covariance matrix
+ """
+ # Normalise if requested
+ wlscl = np.max(image) if norm else 1
+ # Setup the coordinates
+ x = np.linspace(0, image.shape[0] - 1, image.shape[0])
+ y = np.linspace(0, image.shape[1] - 1, image.shape[1])
+ xx, yy = np.meshgrid(x, y, indexing='ij')
+ # Setup the fitting params
+ idx_max = [image.shape[0]/2, image.shape[1]/2] # Just use the centre of the image as the best guess
+ #idx_max = np.unravel_index(np.argmax(image), image.shape)
+ initial_guess = (1, idx_max[0], idx_max[1], 2, 2, 0, 0)
+ bounds = ([0, 0, 0, 0.5, 0.5, -np.pi, -np.inf],
+ [np.inf, image.shape[0], image.shape[1], image.shape[0], image.shape[1], np.pi, np.inf])
+ # Perform the fit
+ popt, pcov = opt.curve_fit(gaussian2D, (xx, yy), image.ravel() / wlscl, bounds=bounds, p0=initial_guess)
+ # Return the fitting results
+ return popt, pcov
def dar_fitfunc(radec, coord_ra, coord_dec, datfit, wave, obstime, location, pressure,
@@ -231,80 +148,6 @@ def dar_fitfunc(radec, coord_ra, coord_dec, datfit, wave, obstime, location, pre
return np.sum((np.array([coord_altaz.alt.value, coord_altaz.az.value])-datfit)**2)
-def correct_dar(wave_arr, coord, obstime, location, pressure, temperature, rel_humidity,
- wave_ref=None, numgrid=10):
- """
- Apply a differental atmospheric refraction correction to the
- input ra/dec.
-
- This implementation is based on ERFA, which is called through
- astropy.
-
- .. todo::
- There's probably going to be issues when the RA angle is
- either side of RA=0.
-
- Parameters
- ----------
- wave_arr : `numpy.ndarray`_
- wavelengths to obtain ra and dec offsets
- coord : `astropy.coordinates.SkyCoord`_
- ra, dec positions at the centre of the field
- obstime : `astropy.time.Time`_
- time at the midpoint of observation
- location : `astropy.coordinates.EarthLocation`_
- observatory location
- pressure : :obj:`float`
- Outside pressure at `location`
- temperature : :obj:`float`
- Outside ambient air temperature at `location`
- rel_humidity : :obj:`float`
- Outside relative humidity at `location`. This should be between 0 to 1.
- wave_ref : :obj:`float`
- Reference wavelength (The DAR correction will be performed relative to this wavelength)
- numgrid : :obj:`int`
- Number of grid points to evaluate the DAR correction.
-
- Returns
- -------
- ra_diff : `numpy.ndarray`_
- Relative RA shift at each wavelength given by ``wave_arr``
- dec_diff : `numpy.ndarray`_
- Relative DEC shift at each wavelength given by ``wave_arr``
- """
- msgs.info("Performing differential atmospheric refraction correction")
- if wave_ref is None:
- wave_ref = 0.5*(wave_arr.min() + wave_arr.max())
-
- # First create the reference frame and wavelength grid
- coord_altaz = coord.transform_to(AltAz(obstime=obstime, location=location))
- wave_grid = np.linspace(wave_arr.min(), wave_arr.max(), numgrid) * units.AA
- # Prepare the fit
- ra_grid, dec_grid = np.zeros(numgrid), np.zeros(numgrid)
- datfit = np.array([coord_altaz.alt.value, coord_altaz.az.value])
- # Loop through all wavelengths
- for ww in range(numgrid):
- # Fit the differential
- args = (coord.ra.value, coord.dec.value, datfit, wave_grid[ww], obstime, location, pressure, temperature, rel_humidity)
- #b_popt, b_pcov = opt.curve_fit(dar_fitfunc, tmp, datfit, p0=(0.0, 0.0))
- res_lsq = opt.least_squares(dar_fitfunc, [0.0, 0.0], args=args, xtol=1.0e-10, ftol=None, gtol=None)
- if not res_lsq.success:
- msgs.warn("DAR correction failed")
- # Store the result
- ra_grid[ww] = res_lsq.x[0]
- dec_grid[ww] = res_lsq.x[1]
-
- # Generate spline of differentials
- spl_ra = interp1d(wave_grid, ra_grid, kind='cubic')
- spl_dec = interp1d(wave_grid, dec_grid, kind='cubic')
-
- # Evaluate the differentials at the input wave_arr
- ra_diff = spl_ra(wave_arr) - spl_ra(wave_ref)
- dec_diff = spl_dec(wave_arr) - spl_dec(wave_ref)
-
- return ra_diff, dec_diff
-
-
def correct_grating_shift(wave_eval, wave_curr, spl_curr, wave_ref, spl_ref, order=2):
"""
Using spline representations of the blaze profile, calculate the grating
@@ -343,143 +186,7 @@ def correct_grating_shift(wave_eval, wave_curr, spl_curr, wave_ref, spl_ref, ord
return grat_corr
-def gaussian2D_cube(tup, intflux, xo, yo, dxdz, dydz, sigma_x, sigma_y, theta, offset):
- """
- Fit a 2D Gaussian function to a datacube. This function assumes that each
- wavelength slice of the datacube is well-fit by a 2D Gaussian. The centre of
- the Gaussian is allowed to vary linearly as a function of wavelength.
-
- .. note::
-
- The integrated flux does not vary with wavelength.
-
- Args:
- tup (:obj:`tuple`):
- A three element tuple containing the x, y, and z locations of each
- pixel in the cube
- intflux (float):
- The Integrated flux of the Gaussian
- xo (float):
- The centre of the Gaussian along the x-coordinate when z=0
- yo (float):
- The centre of the Gaussian along the y-coordinate when z=0
- dxdz (float):
- The change of xo with increasing z
- dydz (float):
- The change of yo with increasing z
- sigma_x (float):
- The standard deviation in the x-direction
- sigma_y (float):
- The standard deviation in the y-direction
- theta (float):
- The orientation angle of the 2D Gaussian
- offset (float):
- Constant offset
-
- Returns:
- `numpy.ndarray`_: The 2D Gaussian evaluated at the coordinate (x, y, z)
- """
- # Extract the (x, y, z) coordinates of each pixel from the tuple
- (x, y, z) = tup
- # Calculate the centre of the Gaussian for each z coordinate
- xo = float(xo) + z*dxdz
- yo = float(yo) + z*dydz
- # Account for a rotated 2D Gaussian
- a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
- b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
- c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
- # Normalise so that the integrated flux is a parameter, instead of the amplitude
- norm = 1/(2*np.pi*np.sqrt(a*c-b*b))
- gtwod = offset + norm*intflux*np.exp(-(a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) + c*((y-yo)**2)))
- return gtwod.ravel()
-
-
-def gaussian2D(tup, intflux, xo, yo, sigma_x, sigma_y, theta, offset):
- """
- Fit a 2D Gaussian function to an image.
-
- Args:
- tup (:obj:`tuple`):
- A two element tuple containing the x and y coordinates of each pixel
- in the image
- intflux (float):
- The Integrated flux of the 2D Gaussian
- xo (float):
- The centre of the Gaussian along the x-coordinate when z=0
- yo (float):
- The centre of the Gaussian along the y-coordinate when z=0
- sigma_x (float):
- The standard deviation in the x-direction
- sigma_y (float):
- The standard deviation in the y-direction
- theta (float):
- The orientation angle of the 2D Gaussian
- offset (float):
- Constant offset
-
- Returns:
- `numpy.ndarray`_: The 2D Gaussian evaluated at the coordinate (x, y)
- """
- # Extract the (x, y, z) coordinates of each pixel from the tuple
- (x, y) = tup
- # Ensure these are floating point
- xo = float(xo)
- yo = float(yo)
- # Account for a rotated 2D Gaussian
- a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
- b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
- c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
- # Normalise so that the integrated flux is a parameter, instead of the amplitude
- norm = 1/(2*np.pi*np.sqrt(a*c-b*b))
- gtwod = offset + norm*intflux*np.exp(-(a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) + c*((y-yo)**2)))
- return gtwod.ravel()
-
-
-def fitGaussian2D(image, norm=False):
- """
- Fit a 2D Gaussian to an input image. It is recommended that the input image
- is scaled to a maximum value that is ~1, so that all fit parameters are of
- the same order of magnitude. Set norm=True if you do not care about the
- amplitude or integrated flux. Otherwise, make sure you scale the image by
- a known value prior to passing it into this function.
-
- Parameters
- ----------
- image : `numpy.ndarray`_
- A 2D input image
- norm : bool, optional
- If True, the input image will be normalised to the maximum value
- of the input image.
-
- Returns
- -------
- popt : `numpy.ndarray`_
- The optimum parameters of the Gaussian in the following order: Integrated
- flux, x center, y center, sigma_x, sigma_y, theta, offset. See
- :func:`~pypeit.core.datacube.gaussian2D` for a more detailed description
- of the model.
- pcov : `numpy.ndarray`_
- Corresponding covariance matrix
- """
- # Normalise if requested
- wlscl = np.max(image) if norm else 1
- # Setup the coordinates
- x = np.linspace(0, image.shape[0] - 1, image.shape[0])
- y = np.linspace(0, image.shape[1] - 1, image.shape[1])
- xx, yy = np.meshgrid(x, y, indexing='ij')
- # Setup the fitting params
- idx_max = [image.shape[0]/2, image.shape[1]/2] # Just use the centre of the image as the best guess
- #idx_max = np.unravel_index(np.argmax(image), image.shape)
- initial_guess = (1, idx_max[0], idx_max[1], 2, 2, 0, 0)
- bounds = ([0, 0, 0, 0.5, 0.5, -np.pi, -np.inf],
- [np.inf, image.shape[0], image.shape[1], image.shape[0], image.shape[1], np.pi, np.inf])
- # Perform the fit
- popt, pcov = opt.curve_fit(gaussian2D, (xx, yy), image.ravel() / wlscl, bounds=bounds, p0=initial_guess)
- # Return the fitting results
- return popt, pcov
-
-
-def extract_standard_spec(stdcube, subpixel=20, method='boxcar'):
+def extract_standard_spec(stdcube, subpixel=20):
"""
Extract a spectrum of a standard star from a datacube
@@ -489,9 +196,6 @@ def extract_standard_spec(stdcube, subpixel=20, method='boxcar'):
An HDU list of fits files
subpixel : int
Number of pixels to subpixelate spectrum when creating mask
- method : str
- Method used to extract standard star spectrum. Currently, only 'boxcar'
- is supported
Returns
-------
@@ -512,8 +216,9 @@ def extract_standard_spec(stdcube, subpixel=20, method='boxcar'):
# Setup the WCS
stdwcs = wcs.WCS(stdcube['FLUX'].header)
- wcs_wav = stdwcs.wcs_pix2world(np.vstack((np.zeros(numwave), np.zeros(numwave), np.arange(numwave))).T, 0)
- wave = wcs_wav[:, 2] * 1.0E10 * units.AA
+
+ wcs_scale = (1.0 * stdwcs.spectral.wcs.cunit[0]).to(units.Angstrom).value # Ensures the WCS is in Angstroms
+ wave = wcs_scale * stdwcs.spectral.wcs_pix2world(np.arange(numwave), 0)[0]
# Generate a whitelight image, and fit a 2D Gaussian to estimate centroid and width
wl_img = make_whitelight_fromcube(flxcube)
@@ -551,106 +256,25 @@ def extract_standard_spec(stdcube, subpixel=20, method='boxcar'):
flxcube -= skyspec.reshape((1, 1, numwave))
# Subtract the residual sky from the whitelight image
- sky_val = np.sum(wl_img[:,:,np.newaxis] * smask) / np.sum(smask)
+ sky_val = np.sum(wl_img[:, :, np.newaxis] * smask) / np.sum(smask)
wl_img -= sky_val
- if method == 'boxcar':
- msgs.info("Extracting a boxcar spectrum of datacube")
- # Construct an image that contains the fraction of flux included in the
- # boxcar extraction at each wavelength interval
- norm_flux = wl_img[:,:,np.newaxis] * mask
- norm_flux /= np.sum(norm_flux)
- # Extract boxcar
- cntmask = np.logical_not(bpmcube) * mask # Good pixels within the masked region around the standard star
- flxscl = (norm_flux * cntmask).sum(0).sum(0) # This accounts for the flux that is missing due to masked pixels
- scimask = flxcube * cntmask
- varmask = varcube * cntmask**2
- nrmcnt = utils.inverse(flxscl)
- box_flux = scimask.sum(0).sum(0) * nrmcnt
- box_var = varmask.sum(0).sum(0) * nrmcnt**2
- box_gpm = flxscl > 1/3 # Good pixels are those where at least one-third of the standard star flux is measured
- # Setup the return values
- ret_flux, ret_var, ret_gpm = box_flux, box_var, box_gpm
- elif method == 'gauss2d':
- msgs.error("Use method=boxcar... this method has not been thoroughly tested")
- # Generate a mask
- fitmask = np.logical_not(bpmcube) * mask
- # Setup the coordinates
- x = np.linspace(0, flxcube.shape[0] - 1, flxcube.shape[0])
- y = np.linspace(0, flxcube.shape[1] - 1, flxcube.shape[1])
- z = np.linspace(0, flxcube.shape[2] - 1, flxcube.shape[2])
- xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')
- # Normalise the flux in each wavelength channel
- scispec = (flxcube * fitmask).sum(0).sum(0).reshape((1, 1, flxcube.shape[2]))
- cntspec = fitmask.sum(0).sum(0).reshape((1, 1, flxcube.shape[2]))
- # These operations are all inverted, because we need to divide flxcube by scispec
- cntspec *= utils.inverse(scispec)
- cubefit = flxcube * cntspec
- cubesigfit = np.sqrt(varcube) * cntspec
- # Setup the fit params
- ww = np.where(fitmask)
- initial_guess = (1, idx_max[0], idx_max[1], 0.0, 0.0, 2, 2, 0, 0)
- bounds = ([-np.inf, 0, 0, -np.inf, -np.inf, 0.5, 0.5, -np.pi, -np.inf],
- [np.inf,wl_img.shape[0],wl_img.shape[1],np.inf, np.inf, wl_img.shape[0],wl_img.shape[0],np.pi,np.inf])
- msgs.info("Fitting a 2D Gaussian to the datacube")
- popt, pcov = opt.curve_fit(gaussian2D_cube, (xx[ww], yy[ww], zz[ww]), cubefit[ww],
- sigma=cubesigfit[ww], bounds=bounds, p0=initial_guess)
- # Subtract off the best-fitting continuum
- popt[-1] = 0
- # Generate the best-fitting model to be used as an optimal profile
- model = gaussian2D_cube((xx, yy, zz), *popt).reshape(flxcube.shape)
- numim = flxcube.shape[0]*flxcube.shape[1]
-
- # Optimally extract
- msgs.info("Optimally extracting...")
- sciimg = (flxcube*mask).reshape((numim, numwave)).T
- ivar = utils.inverse((varcube*mask**2).reshape((numim, numwave)).T)
- optmask = fitmask.reshape((numim, numwave)).T
- waveimg = np.ones((numwave, numim)) # Just a dummy array - not needed
- skyimg = np.zeros((numwave, numim)) # Just a dummy array - not needed
- thismask = np.ones((numwave, numim)) # Just a dummy array - not needed
- oprof = model.reshape((numim, numwave)).T
- sobj = specobj.SpecObj('IFU', 'DET01', SLITID=0)
- extract.extract_optimal(sciimg, ivar, optmask, waveimg, skyimg, thismask, oprof, sobj)
- opt_flux, opt_var, opt_gpm = sobj.OPT_COUNTS, sobj.OPT_COUNTS_SIG**2, sobj.OPT_MASK
- # Setup the return values
- ret_flux, ret_var, ret_gpm = opt_flux, opt_var, opt_gpm
- elif method == 'optimal':
- msgs.error("Use method=boxcar... this method has not been thoroughly tested")
- # First do a boxcar along one dimension
- msgs.info("Collapsing datacube to a 2D image")
- omask = mask+smask
- idx_sum = 0
- cntmask = np.logical_not(bpmcube) * omask
- scimask = flxcube * cntmask
- varmask = varcube * cntmask**2
- cnt_spec = cntmask.sum(idx_sum) * utils.inverse(omask.sum(idx_sum))
- nrmcnt = utils.inverse(cnt_spec)
- box_sciimg = scimask.sum(idx_sum) * nrmcnt
- box_scivar = varmask.sum(idx_sum) * nrmcnt**2
- box_sciivar = utils.inverse(box_scivar)
- # Transpose for optimal
- box_sciimg = box_sciimg.T
- box_sciivar = box_sciivar.T
-
- # Prepare for optimal
- msgs.info("Starting optimal extraction")
- thismask = np.ones(box_sciimg.shape, dtype=bool)
- nspec, nspat = thismask.shape[0], thismask.shape[1]
- slit_left = np.zeros(nspec)
- slit_right = np.ones(nspec)*(nspat-1)
- tilts = np.outer(np.linspace(0.0,1.0,nspec), np.ones(nspat))
- waveimg = np.outer(wave.value, np.ones(nspat))
- global_sky = np.zeros_like(box_sciimg)
- # Find objects and then extract
- sobj = findobj_skymask.objs_in_slit(box_sciimg, thismask, slit_left, slit_right)
- skysub.local_skysub_extract(box_sciimg, box_sciivar, tilts, waveimg, global_sky, thismask, slit_left,
- slit_right, sobj, model_noise=False)
- opt_flux, opt_var, opt_gpm = sobj.OPT_COUNTS[0,:], sobj.OPT_COUNTS_SIG[0,:]**2, sobj.OPT_MASK[0,:]
- # Setup the return values
- ret_flux, ret_var, ret_gpm = opt_flux, opt_var, opt_gpm
- else:
- msgs.error("Unknown extraction method: ", method)
+ msgs.info("Extracting a boxcar spectrum of datacube")
+ # Construct an image that contains the fraction of flux included in the
+ # boxcar extraction at each wavelength interval
+ norm_flux = wl_img[:,:,np.newaxis] * mask
+ norm_flux /= np.sum(norm_flux)
+ # Extract boxcar
+ cntmask = np.logical_not(bpmcube) * mask # Good pixels within the masked region around the standard star
+ flxscl = (norm_flux * cntmask).sum(0).sum(0) # This accounts for the flux that is missing due to masked pixels
+ scimask = flxcube * cntmask
+ varmask = varcube * cntmask**2
+ nrmcnt = utils.inverse(flxscl)
+ box_flux = scimask.sum(0).sum(0) * nrmcnt
+ box_var = varmask.sum(0).sum(0) * nrmcnt**2
+ box_gpm = flxscl > 1/3 # Good pixels are those where at least one-third of the standard star flux is measured
+ # Setup the return values
+ ret_flux, ret_var, ret_gpm = box_flux, box_var, box_gpm
# Convert from counts/s/Ang/arcsec**2 to counts/s/Ang
arcsecSQ = 3600.0*3600.0*(stdwcs.wcs.cdelt[0]*stdwcs.wcs.cdelt[1])
@@ -660,6 +284,71 @@ def extract_standard_spec(stdcube, subpixel=20, method='boxcar'):
return wave, ret_flux, utils.inverse(ret_var), ret_gpm
+def make_sensfunc(ss_file, senspar, blaze_wave=None, blaze_spline=None, grating_corr=False):
+ """
+ Generate the sensitivity function from a standard star DataCube.
+
+ Args:
+ ss_file (:obj:`str`_):
+ The relative path and filename of the standard star datacube. It should be fits format, and
+ for full functionality, should ideally of the form `pypeit.coadd3d.DataCube`_
+ senspar (:class:`~pypeit.par.pypeitpar.SensFuncPar`):
+ The parameters required for the sensitivity function computation.
+ blaze_wave (`numpy.ndarray`_, optional):
+ Wavelength array used to construct blaze_spline
+ blaze_spline (`scipy.interpolate.interp1d`_, optional):
+ Spline representation of the reference blaze function (based on the illumflat).
+ grating_corr (:obj:`bool`_, optional):
+ If a grating correction should be performed, set this variable to True.
+
+ Returns:
+ `numpy.ndarray`_: A mask of the good sky pixels (True = good)
+ """
+ # Check if the standard star datacube exists
+ if not os.path.exists(ss_file):
+ msgs.error("Standard cube does not exist:" + msgs.newline() + ss_file)
+ msgs.info(f"Loading standard star cube: {ss_file:s}")
+ # Load the standard star cube and retrieve its RA + DEC
+ stdcube = fits.open(ss_file)
+ star_ra, star_dec = stdcube[1].header['CRVAL1'], stdcube[1].header['CRVAL2']
+
+ # Extract a spectrum of the standard star
+ wave, Nlam_star, Nlam_ivar_star, gpm_star = extract_standard_spec(stdcube)
+
+ # Extract the information about the blaze
+ if grating_corr:
+ blaze_wave_curr, blaze_spec_curr = stdcube['BLAZE_WAVE'].data, stdcube['BLAZE_SPEC'].data
+ blaze_spline_curr = interp1d(blaze_wave_curr, blaze_spec_curr,
+ kind='linear', bounds_error=False, fill_value="extrapolate")
+ # Perform a grating correction
+ grat_corr = correct_grating_shift(wave, blaze_wave_curr, blaze_spline_curr, blaze_wave, blaze_spline)
+ # Apply the grating correction to the standard star spectrum
+ Nlam_star /= grat_corr
+ Nlam_ivar_star *= grat_corr ** 2
+
+ # Read in some information above the standard star
+ std_dict = flux_calib.get_standard_spectrum(star_type=senspar['star_type'],
+ star_mag=senspar['star_mag'],
+ ra=star_ra, dec=star_dec)
+ # Calculate the sensitivity curve
+ # TODO :: This needs to be addressed... unify flux calibration into the main PypeIt routines.
+ msgs.warn("Datacubes are currently flux-calibrated using the UVIS algorithm... this will be deprecated soon")
+ zeropoint_data, zeropoint_data_gpm, zeropoint_fit, zeropoint_fit_gpm = \
+ flux_calib.fit_zeropoint(wave, Nlam_star, Nlam_ivar_star, gpm_star, std_dict,
+ mask_hydrogen_lines=senspar['mask_hydrogen_lines'],
+ mask_helium_lines=senspar['mask_helium_lines'],
+ hydrogen_mask_wid=senspar['hydrogen_mask_wid'],
+ nresln=senspar['UVIS']['nresln'],
+ resolution=senspar['UVIS']['resolution'],
+ trans_thresh=senspar['UVIS']['trans_thresh'],
+ polyorder=senspar['polyorder'],
+ polycorrect=senspar['UVIS']['polycorrect'],
+ polyfunc=senspar['UVIS']['polyfunc'])
+ wgd = np.where(zeropoint_fit_gpm)
+ sens = np.power(10.0, -0.4 * (zeropoint_fit[wgd] - flux_calib.ZP_UNIT_CONST)) / np.square(wave[wgd])
+ return interp1d(wave[wgd], sens, kind='linear', bounds_error=False, fill_value="extrapolate")
+
+
def make_good_skymask(slitimg, tilts):
"""
Mask the spectral edges of each slit (i.e. the pixels near the ends of the
@@ -695,6 +384,49 @@ def make_good_skymask(slitimg, tilts):
return gpm
+def get_output_filename(fil, par_outfile, combine, idx=1):
+ """
+ Get the output filename of a datacube, given the input
+
+ Args:
+ fil (str):
+ The spec2d filename.
+ par_outfile (str):
+ The user-specified output filename (see cubepar['output_filename'])
+ combine (bool):
+ Should the input frames be combined into a single datacube?
+ idx (int, optional):
+ Index of filename to be saved. Required if combine=False.
+
+ Returns:
+ str: The output filename to use.
+ """
+ if combine:
+ if par_outfile == '':
+ par_outfile = 'datacube.fits'
+ # Check if we needs to append an extension
+ return par_outfile if '.fits' in par_outfile else f'{par_outfile}.fits'
+ if par_outfile == '':
+ return fil.replace('spec2d_', 'spec3d_')
+ # Finally, if nothing else, use the output filename as a prefix, and a numerical suffic
+ return os.path.splitext(par_outfile)[0] + f'_{idx:03}.fits'
+
+
+def get_output_whitelight_filename(outfile):
+ """
+ Given the output filename of a datacube, create an appropriate whitelight
+ fits file name
+
+ Args:
+ outfile (str):
+ The output filename used for the datacube.
+
+ Returns:
+ A string containing the output filename to use for the whitelight image.
+ """
+ return os.path.splitext(outfile)[0] + "_whitelight.fits"
+
+
def get_whitelight_pixels(all_wave, min_wl, max_wl):
"""
Determine which pixels are included within the specified wavelength range
@@ -781,7 +513,7 @@ def make_whitelight_fromcube(cube, wave=None, wavemin=None, wavemax=None):
reduce the wavelength range.
Returns:
- `numpy.ndarray`_: Whitelight image of the input cube.
+ A whitelight image of the input cube (of type `numpy.ndarray`_).
"""
# Make a wavelength cut, if requested
cutcube = cube.copy()
@@ -829,120 +561,171 @@ def load_imageWCS(filename, ext=0):
return image, imgwcs
-def make_whitelight_frompixels(all_ra, all_dec, all_wave, all_sci, all_wghts, all_idx, dspat,
- all_ivar=None, whitelightWCS=None, numra=None, numdec=None, trim=1):
+def align_user_offsets(all_ra, all_dec, all_idx, ifu_ra, ifu_dec, ra_offset, dec_offset):
"""
- Generate a whitelight image using the individual pixels of every input frame
+ Align the RA and DEC of all input frames, and then
+ manually shift the cubes based on user-provided offsets.
+ The offsets should be specified in arcseconds, and the
+ ra_offset should include the cos(dec) factor.
Args:
all_ra (`numpy.ndarray`_):
- 1D flattened array containing the RA values of each pixel from all
- spec2d files
+ A 1D array containing the RA values of each detector pixel of every frame.
all_dec (`numpy.ndarray`_):
- 1D flattened array containing the DEC values of each pixel from all
- spec2d files
- all_wave (`numpy.ndarray`_):
- 1D flattened array containing the wavelength values of each pixel
- from all spec2d files
- all_sci (`numpy.ndarray`_):
- 1D flattened array containing the counts of each pixel from all
- spec2d files
- all_wghts (`numpy.ndarray`_):
- 1D flattened array containing the weights attributed to each pixel
- from all spec2d files
+ A 1D array containing the Dec values of each detector pixel of every frame.
+ Same size as all_ra.
all_idx (`numpy.ndarray`_):
- 1D flattened array containing an integer identifier indicating which
- spec2d file each pixel originates from. For example, a 0 would
- indicate that a pixel originates from the first spec2d frame listed
- in the input file. a 1 would indicate that this pixel originates
- from the second spec2d file, and so forth.
- dspat (float):
- The size of each spaxel on the sky (in degrees)
- all_ivar (`numpy.ndarray`_, optional):
- 1D flattened array containing of the inverse variance of each pixel
- from all spec2d files. If provided, inverse variance images will be
- calculated and returned for each white light image.
- whitelightWCS (`astropy.wcs.WCS`_, optional):
- The WCS of a reference white light image. If supplied, you must also
- supply numra and numdec.
- numra (int, optional):
- Number of RA spaxels in the reference white light image
- numdec (int, optional):
- Number of DEC spaxels in the reference white light image
- trim (int, optional):
- Number of pixels to grow around a masked region
+ A 1D array containing an ID value for each detector frame (0-indexed).
+ Same size as all_ra.
+ ifu_ra (`numpy.ndarray`_):
+ A list of RA values of the IFU (one value per frame)
+ ifu_dec (`numpy.ndarray`_):
+ A list of Dec values of the IFU (one value per frame)
+ ra_offset (`numpy.ndarray`_):
+ A list of RA offsets to be applied to the input pixel values (one value per frame).
+ Note, the ra_offset MUST contain the cos(dec) factor. This is the number of arcseconds
+ on the sky that represents the telescope offset.
+ dec_offset (`numpy.ndarray`_):
+ A list of Dec offsets to be applied to the input pixel values (one value per frame).
Returns:
- tuple: two 3D arrays will be returned, each of shape [N, M, numfiles],
- where N and M are the spatial dimensions of the combined white light
- images. The first array is a white light image, and the second array is
- the corresponding inverse variance image. If all_ivar is None, this will
- be an empty array.
+ A tuple containing a new set of RA and Dec values that have been aligned. Both arrays
+ are of type `numpy.ndarray`_.
"""
- # Determine number of files
- numfiles = np.unique(all_idx).size
+ # First, translate all coordinates to the coordinates of the first frame
+ # Note: You do not need cos(dec) here, this just overrides the IFU coordinate centre of each frame
+ # The cos(dec) factor should be input by the user, and should be included in the self.opts['ra_offset']
+ ref_shift_ra = ifu_ra[0] - ifu_ra
+ ref_shift_dec = ifu_dec[0] - ifu_dec
+ numfiles = ra_offset.size
+ for ff in range(numfiles):
+ # Apply the shift
+ all_ra[all_idx == ff] += ref_shift_ra[ff] + ra_offset[ff] / 3600.0
+ all_dec[all_idx == ff] += ref_shift_dec[ff] + dec_offset[ff] / 3600.0
+ msgs.info("Spatial shift of cube #{0:d}:".format(ff + 1) + msgs.newline() +
+ "RA, DEC (arcsec) = {0:+0.3f} E, {1:+0.3f} N".format(ra_offset[ff], dec_offset[ff]))
+ return all_ra, all_dec
- if whitelightWCS is None:
- # Generate a 2D WCS to register all frames
- coord_min = [np.min(all_ra), np.min(all_dec), np.min(all_wave)]
- coord_dlt = [dspat, dspat, np.max(all_wave) - np.min(all_wave)]
- whitelightWCS = generate_WCS(coord_min, coord_dlt)
- # Generate coordinates
- cosdec = np.cos(np.mean(all_dec) * np.pi / 180.0)
- numra = 1+int((np.max(all_ra) - np.min(all_ra)) * cosdec / dspat)
- numdec = 1+int((np.max(all_dec) - np.min(all_dec)) / dspat)
- else:
- # If a WCS is supplied, the numra and numdec must be specified
- if (numra is None) or (numdec is None):
- msgs.error("A WCS has been supplied to make_whitelight." + msgs.newline() +
- "numra and numdec must also be specified")
- xbins = np.arange(1 + numra) - 1
- ybins = np.arange(1 + numdec) - 1
- spec_bins = np.arange(2) - 1
- bins = (xbins, ybins, spec_bins)
+def set_voxel_sampling(spatscale, specscale, dspat=None, dwv=None):
+ """
+ This function checks if the spatial and spectral scales of all frames are consistent.
+ If the user has not specified either the spatial or spectral scales, they will be set here.
- whitelight_Imgs = np.zeros((numra, numdec, numfiles))
- whitelight_ivar = np.zeros((numra, numdec, numfiles))
- for ff in range(numfiles):
- msgs.info("Generating white light image of frame {0:d}/{1:d}".format(ff + 1, numfiles))
- ww = (all_idx == ff)
- # Make the cube
- pix_coord = whitelightWCS.wcs_world2pix(np.vstack((all_ra[ww], all_dec[ww], all_wave[ww] * 1.0E-10)).T, 0)
- wlcube, edges = np.histogramdd(pix_coord, bins=bins, weights=all_sci[ww] * all_wghts[ww])
- norm, edges = np.histogramdd(pix_coord, bins=bins, weights=all_wghts[ww])
- nrmCube = (norm > 0) / (norm + (norm == 0))
- whtlght = (wlcube * nrmCube)[:, :, 0]
- # Create a mask of good pixels (trim the edges)
- gpm = grow_mask(whtlght == 0, trim) == 0 # A good pixel = 1
- whtlght *= gpm
- # Set the masked regions to the minimum value
- minval = np.min(whtlght[gpm == 1])
- whtlght[gpm == 0] = minval
- # Store the white light image
- whitelight_Imgs[:, :, ff] = whtlght.copy()
- # Now operate on the inverse variance image
- if all_ivar is not None:
- ivar_img, _ = np.histogramdd(pix_coord, bins=bins, weights=all_ivar[ww])
- ivar_img = ivar_img[:, :, 0]
- ivar_img *= gpm
- minval = np.min(ivar_img[gpm == 1])
- ivar_img[gpm == 0] = minval
- whitelight_ivar[:, :, ff] = ivar_img.copy()
- return whitelight_Imgs, whitelight_ivar, whitelightWCS
-
-
-def create_wcs(cubepar, all_ra, all_dec, all_wave, dspat, dwv, collapse=False, equinox=2000.0,
- specname="PYP_SPEC"):
+ Parameters
+ ----------
+ spatscale : `numpy.ndarray`_
+ 2D array, shape is (N, 2), listing the native spatial scales of N spec2d frames.
+ spatscale[:,0] refers to the spatial pixel scale of each frame
+ spatscale[:,1] refers to the slicer scale of each frame
+ Each element of the array must be in degrees
+ specscale : `numpy.ndarray`_
+ 1D array listing the native spectral scales of multiple frames. The length of this array should be equal
+ to the number of frames you are using. Each element of the array must be in Angstrom
+ dspat: :obj:`float`, optional
+ Spatial scale to use as the voxel spatial sampling. If None, a new value will be derived based on the inputs
+ dwv: :obj:`float`, optional
+ Spectral scale to use as the voxel spectral sampling. If None, a new value will be derived based on the inputs
+
+ Returns
+ -------
+ _dspat : :obj:`float`
+ Spatial sampling
+ _dwv : :obj:`float`
+ Wavelength sampling
+ """
+ # Make sure all frames have consistent pixel scales
+ ratio = (spatscale[:, 0] - spatscale[0, 0]) / spatscale[0, 0]
+ if np.any(np.abs(ratio) > 1E-4):
+ msgs.warn("The pixel scales of all input frames are not the same!")
+ spatstr = ", ".join(["{0:.6f}".format(ss) for ss in spatscale[:,0]*3600.0])
+ msgs.info("Pixel scales of all input frames:" + msgs.newline() + spatstr + "arcseconds")
+ # Make sure all frames have consistent slicer scales
+ ratio = (spatscale[:, 1] - spatscale[0, 1]) / spatscale[0, 1]
+ if np.any(np.abs(ratio) > 1E-4):
+ msgs.warn("The slicer scales of all input frames are not the same!")
+ spatstr = ", ".join(["{0:.6f}".format(ss) for ss in spatscale[:,1]*3600.0])
+ msgs.info("Slicer scales of all input frames:" + msgs.newline() + spatstr + "arcseconds")
+ # Make sure all frames have consistent wavelength sampling
+ ratio = (specscale - specscale[0]) / specscale[0]
+ if np.any(np.abs(ratio) > 1E-2):
+ msgs.warn("The wavelength samplings of the input frames are not the same!")
+ specstr = ", ".join(["{0:.6f}".format(ss) for ss in specscale])
+ msgs.info("Wavelength samplings of all input frames:" + msgs.newline() + specstr + "Angstrom")
+
+ # If the user has not specified the spatial scale, then set it appropriately now to the largest spatial scale
+ _dspat = np.max(spatscale) if dspat is None else dspat
+ msgs.info("Adopting a square pixel spatial scale of {0:f} arcsec".format(3600.0 * _dspat))
+ # If the user has not specified the spectral sampling, then set it now to the largest value
+ _dwv = np.max(specscale) if dwv is None else dwv
+ msgs.info("Adopting a wavelength sampling of {0:f} Angstrom".format(_dwv))
+ return _dspat, _dwv
+
+
+def wcs_bounds(all_ra, all_dec, all_wave, ra_min=None, ra_max=None, dec_min=None, dec_max=None, wave_min=None, wave_max=None):
+ """
+ Calculate the bounds of the WCS and the expected edges of the voxels, based on user-specified
+ parameters or the extremities of the data. This is a convenience function
+ that calls the core function in `pypeit.core.datacube`_.
+
+ Parameters
+ ----------
+ all_ra : `numpy.ndarray`_
+ 1D flattened array containing the RA values of each pixel from all
+ spec2d files
+ all_dec : `numpy.ndarray`_
+ 1D flattened array containing the DEC values of each pixel from all
+ spec2d files
+ all_wave : `numpy.ndarray`_
+ 1D flattened array containing the wavelength values of each pixel from
+ all spec2d files
+ ra_min : :obj:`float`, optional
+ Minimum RA of the WCS
+ ra_max : :obj:`float`, optional
+ Maximum RA of the WCS
+ dec_min : :obj:`float`, optional
+ Minimum Dec of the WCS
+ dec_max : :obj:`float`, optional
+ Maximum RA of the WCS
+ wav_min : :obj:`float`, optional
+ Minimum wavelength of the WCS
+ wav_max : :obj:`float`, optional
+ Maximum RA of the WCS
+
+ Returns
+ -------
+ _ra_min : :obj:`float`
+ Minimum RA of the WCS
+ _ra_max : :obj:`float`
+ Maximum RA of the WCS
+ _dec_min : :obj:`float`
+ Minimum Dec of the WCS
+ _dec_max : :obj:`float`
+ Maximum RA of the WCS
+ _wav_min : :obj:`float`
+ Minimum wavelength of the WCS
+ _wav_max : :obj:`float`
+ Maximum RA of the WCS
+ """
+ # Setup the cube ranges
+ _ra_min = ra_min if ra_min is not None else np.min(all_ra)
+ _ra_max = ra_max if ra_max is not None else np.max(all_ra)
+ _dec_min = dec_min if dec_min is not None else np.min(all_dec)
+ _dec_max = dec_max if dec_max is not None else np.max(all_dec)
+ _wav_min = wave_min if wave_min is not None else np.min(all_wave)
+ _wav_max = wave_max if wave_max is not None else np.max(all_wave)
+ return _ra_min, _ra_max, _dec_min, _dec_max, _wav_min, _wav_max
+
+
+def create_wcs(all_ra, all_dec, all_wave, dspat, dwave,
+ ra_min=None, ra_max=None, dec_min=None, dec_max=None, wave_min=None, wave_max=None,
+ reference=None, collapse=False, equinox=2000.0, specname="PYP_SPEC"):
"""
Create a WCS and the expected edges of the voxels, based on user-specified
parameters or the extremities of the data.
Parameters
----------
- cubepar : :class:`~pypeit.par.pypeitpar.CubePar`
- An instance of the CubePar parameter set, contained parameters of the
- datacube reduction
all_ra : `numpy.ndarray`_
1D flattened array containing the RA values of each pixel from all
spec2d files
@@ -955,8 +738,22 @@ def create_wcs(cubepar, all_ra, all_dec, all_wave, dspat, dwv, collapse=False, e
dspat : float
Spatial size of each square voxel (in arcsec). The default is to use the
values in cubepar.
- dwv : float
+ dwave : float
Linear wavelength step of each voxel (in Angstroms)
+ ra_min : float, optional
+ Minimum RA of the WCS (degrees)
+ ra_max : float, optional
+ Maximum RA of the WCS (degrees)
+ dec_min : float, optional
+ Minimum Dec of the WCS (degrees)
+ dec_max : float, optional
+ Maximum Dec of the WCS (degrees)
+ wave_min : float, optional
+ Minimum wavelength of the WCS (degrees)
+ wave_max : float, optional
+ Maximum wavelength of the WCS (degrees)
+ reference : str, optional
+ Filename of a fits file that contains a WCS in the Primary HDU.
collapse : bool, optional
If True, the spectral dimension will be collapsed to a single channel
(primarily for white light images)
@@ -979,35 +776,31 @@ def create_wcs(cubepar, all_ra, all_dec, all_wave, dspat, dwv, collapse=False, e
cosdec = np.cos(np.mean(all_dec) * np.pi / 180.0)
# Setup the cube ranges
- reference_image = None # The default behaviour is that the reference image is not used
- ra_min = cubepar['ra_min'] if cubepar['ra_min'] is not None else np.min(all_ra)
- ra_max = cubepar['ra_max'] if cubepar['ra_max'] is not None else np.max(all_ra)
- dec_min = cubepar['dec_min'] if cubepar['dec_min'] is not None else np.min(all_dec)
- dec_max = cubepar['dec_max'] if cubepar['dec_max'] is not None else np.max(all_dec)
- wav_min = cubepar['wave_min'] if cubepar['wave_min'] is not None else np.min(all_wave)
- wav_max = cubepar['wave_max'] if cubepar['wave_max'] is not None else np.max(all_wave)
- dwave = cubepar['wave_delta'] if cubepar['wave_delta'] is not None else dwv
+ _ra_min, _ra_max, _dec_min, _dec_max, _wav_min, _wav_max = \
+ wcs_bounds(all_ra, all_dec, all_wave, ra_min=ra_min, ra_max=ra_max, dec_min=dec_min, dec_max=dec_max,
+ wave_min=wave_min, wave_max=wave_max)
# Number of voxels in each dimension
- numra = int((ra_max-ra_min) * cosdec / dspat)
- numdec = int((dec_max-dec_min)/dspat)
- numwav = int(np.round((wav_max-wav_min)/dwave))
+ numra = int((_ra_max - _ra_min) * cosdec / dspat)
+ numdec = int((_dec_max - _dec_min) / dspat)
+ numwav = int(np.round((_wav_max - _wav_min) / dwave))
# If a white light WCS is being generated, make sure there's only 1 wavelength bin
if collapse:
- wav_min = np.min(all_wave)
- wav_max = np.max(all_wave)
- dwave = wav_max - wav_min
+ _wav_min = np.min(all_wave)
+ _wav_max = np.max(all_wave)
+ dwave = _wav_max - _wav_min
numwav = 1
# Generate a master WCS to register all frames
- coord_min = [ra_min, dec_min, wav_min]
+ coord_min = [_ra_min, _dec_min, _wav_min]
coord_dlt = [dspat, dspat, dwave]
# If a reference image is being used and a white light image is requested (collapse=True) update the celestial parts
- if cubepar["reference_image"] is not None:
+ reference_image = None
+ if reference is not None:
# Load the requested reference image
- reference_image, imgwcs = load_imageWCS(cubepar["reference_image"])
+ reference_image, imgwcs = load_imageWCS(reference)
# Update the celestial WCS
coord_min[:2] = imgwcs.wcs.crval
coord_dlt[:2] = imgwcs.wcs.cdelt
@@ -1018,15 +811,15 @@ def create_wcs(cubepar, all_ra, all_dec, all_wave, dspat, dwv, collapse=False, e
msgs.newline() + "Parameters of the WCS:" +
msgs.newline() + "RA min = {0:f}".format(coord_min[0]) +
msgs.newline() + "DEC min = {0:f}".format(coord_min[1]) +
- msgs.newline() + "WAVE min, max = {0:f}, {1:f}".format(wav_min, wav_max) +
- msgs.newline() + "Spaxel size = {0:f} arcsec".format(3600.0*dspat) +
+ msgs.newline() + "WAVE min, max = {0:f}, {1:f}".format(_wav_min, _wav_max) +
+ msgs.newline() + "Spaxel size = {0:f} arcsec".format(3600.0 * dspat) +
msgs.newline() + "Wavelength step = {0:f} A".format(dwave) +
msgs.newline() + "-" * 40)
# Generate the output binning
- xbins = np.arange(1+numra)-0.5
- ybins = np.arange(1+numdec)-0.5
- spec_bins = np.arange(1+numwav)-0.5
+ xbins = np.arange(1 + numra) - 0.5
+ ybins = np.arange(1 + numdec) - 0.5
+ spec_bins = np.arange(1 + numwav) - 0.5
voxedges = (xbins, ybins, spec_bins)
return cubewcs, voxedges, reference_image
@@ -1067,6 +860,120 @@ def generate_WCS(crval, cdelt, equinox=2000.0, name="PYP_SPEC"):
return w
+def compute_weights_frompix(all_ra, all_dec, all_wave, all_sci, all_ivar, all_idx, dspat, dwv, mnmx_wv, all_wghts,
+ all_spatpos, all_specpos, all_spatid, all_tilts, all_slits, all_align, all_dar,
+ ra_min=None, ra_max=None, dec_min=None, dec_max=None, wave_min=None, wave_max=None,
+ sn_smooth_npix=None, relative_weights=False, reference_image=None, whitelight_range=None,
+ specname="PYPSPEC"):
+ r"""
+ Calculate wavelength dependent optimal weights. The weighting is currently
+ based on a relative :math:`(S/N)^2` at each wavelength. Note, this function
+ first prepares a whitelight image, and then calls compute_weights() to
+ determine the appropriate weights of each pixel.
+
+ Args:
+ all_ra (`numpy.ndarray`_):
+ 1D flattened array containing the RA values of each pixel from all
+ spec2d files
+ all_dec (`numpy.ndarray`_):
+ 1D flattened array containing the DEC values of each pixel from all
+ spec2d files
+ all_wave (`numpy.ndarray`_):
+ 1D flattened array containing the wavelength values of each pixel
+ from all spec2d files
+ all_sci (`numpy.ndarray`_):
+ 1D flattened array containing the counts of each pixel from all
+ spec2d files
+ all_ivar (`numpy.ndarray`_):
+ 1D flattened array containing the inverse variance of each pixel
+ from all spec2d files
+ all_idx (`numpy.ndarray`_):
+ 1D flattened array containing an integer identifier indicating which
+ spec2d file each pixel originates from. For example, a 0 would
+ indicate that a pixel originates from the first spec2d frame listed
+ in the input file. a 1 would indicate that this pixel originates
+ from the second spec2d file, and so forth.
+ dspat (float):
+ The size of each spaxel on the sky (in degrees)
+ dwv (float):
+ The size of each wavelength pixel (in Angstroms)
+ mnmx_wv (`numpy.ndarray`_):
+ The minimum and maximum wavelengths of every slit and frame. The shape is (Nframes, Nslits, 2),
+ The minimum and maximum wavelengths are stored in the [:,:,0] and [:,:,1] indices, respectively.
+ all_wghts (`numpy.ndarray`_):
+ 1D flattened array containing the weights of each pixel to be used
+ in the combination
+ all_spatpos (`numpy.ndarray`_):
+ 1D flattened array containing the detector pixel location in the
+ spatial direction
+ all_specpos (`numpy.ndarray`_):
+ 1D flattened array containing the detector pixel location in the
+ spectral direction
+ all_spatid (`numpy.ndarray`_):
+ 1D flattened array containing the spatid of each pixel
+ tilts (`numpy.ndarray`_, list):
+ 2D wavelength tilts frame, or a list of tilt frames (see all_idx)
+ slits (:class:`~pypeit.slittrace.SlitTraceSet`, list):
+ Information stored about the slits, or a list of SlitTraceSet (see
+ all_idx)
+ astrom_trans (:class:`~pypeit.alignframe.AlignmentSplines`, list):
+ A Class containing the transformation between detector pixel
+ coordinates and WCS pixel coordinates, or a list of Alignment
+ Splines (see all_idx)
+ all_dar (:class:`~pypeit.coadd3d.DARcorrection`, list):
+ A Class containing the DAR correction information, or a list of DARcorrection
+ classes. If a list, it must be the same length as astrom_trans.
+ ra_min (float, optional):
+ Minimum RA of the WCS (degrees)
+ ra_max (float, optional):
+ Maximum RA of the WCS (degrees)
+ dec_min (float, optional):
+ Minimum Dec of the WCS (degrees)
+ dec_max (float, optional):
+ Maximum Dec of the WCS (degrees)
+ wave_min (float, optional):
+ Minimum wavelength of the WCS (degrees)
+ wave_max (float, optional):
+ Maximum wavelength of the WCS (degrees)
+ sn_smooth_npix (float, optional):
+ Number of pixels used for determining smoothly varying S/N ratio
+ weights. This is currently not required, since a relative weighting
+ scheme with a polynomial fit is used to calculate the S/N weights.
+ relative_weights (bool, optional):
+ Calculate weights by fitting to the ratio of spectra?
+ reference_image (`numpy.ndarray`_):
+ Reference image to use for the determination of the highest S/N spaxel in the image.
+ specname (str):
+ Name of the spectrograph
+
+ Returns:
+ `numpy.ndarray`_ : a 1D array the same size as all_sci, containing
+ relative wavelength dependent weights of each input pixel.
+ """
+ # Find the wavelength range where all frames overlap
+ min_wl, max_wl = get_whitelight_range(np.max(mnmx_wv[:, :, 0]), # The max blue wavelength
+ np.min(mnmx_wv[:, :, 1]), # The min red wavelength
+ whitelight_range) # The user-specified values (if any)
+ # Get the good white light pixels
+ ww, wavediff = get_whitelight_pixels(all_wave, min_wl, max_wl)
+
+ # Generate the WCS
+ image_wcs, voxedge, reference_image = \
+ create_wcs(all_ra, all_dec, all_wave, dspat, wavediff,
+ ra_min=ra_min, ra_max=ra_max, dec_min=dec_min, dec_max=dec_max, wave_min=wave_min, wave_max=wave_max,
+ reference=reference_image, collapse=True, equinox=2000.0,
+ specname=specname)
+
+ # Generate the white light image
+ # NOTE: hard-coding subpixel=1 in both directions for speed, and combining into a single image
+ wl_full = generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts,
+ all_spatpos, all_specpos, all_spatid, all_tilts, all_slits, all_align, all_dar,
+ voxedge, all_idx=all_idx, spec_subpixel=1, spat_subpixel=1, combine=True)
+ # Compute the weights
+ return compute_weights(all_ra, all_dec, all_wave, all_sci, all_ivar, all_idx, wl_full[:, :, 0], dspat, dwv,
+ sn_smooth_npix=sn_smooth_npix, relative_weights=relative_weights)
+
+
def compute_weights(all_ra, all_dec, all_wave, all_sci, all_ivar, all_idx, whitelight_img, dspat, dwv,
sn_smooth_npix=None, relative_weights=False):
r"""
@@ -1153,7 +1060,8 @@ def compute_weights(all_ra, all_dec, all_wave, all_sci, all_ivar, all_idx, white
mask_stack = (flux_stack != 0.0) & (ivar_stack != 0.0)
# Obtain a wavelength of each pixel
wcs_res = whitelightWCS.wcs_pix2world(np.vstack((np.zeros(numwav), np.zeros(numwav), np.arange(numwav))).T, 0)
- wave_spec = wcs_res[:, 2] * 1.0E10
+ wcs_scale = (1.0 * whitelightWCS.wcs.cunit[2]).to_value(units.Angstrom) # Ensures the WCS is in Angstroms
+ wave_spec = wcs_scale * wcs_res[:, 2]
# Compute the smoothing scale to use
if sn_smooth_npix is None:
sn_smooth_npix = int(np.round(0.1 * wave_spec.size))
@@ -1170,9 +1078,9 @@ def compute_weights(all_ra, all_dec, all_wave, all_sci, all_ivar, all_idx, white
return all_wghts
-def generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, all_spatpos, all_specpos,
- all_spatid, tilts, slits, astrom_trans, bins, all_idx=None,
- spec_subpixel=10, spat_subpixel=10, combine=False):
+def generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts,
+ all_spatpos, all_specpos, all_spatid, tilts, slits, astrom_trans, all_dar, bins,
+ all_idx=None, spec_subpixel=10, spat_subpixel=10, combine=False):
"""
Generate a white light image from the input pixels
@@ -1214,6 +1122,9 @@ def generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave, all_sci, all_i
A Class containing the transformation between detector pixel
coordinates and WCS pixel coordinates, or a list of Alignment
Splines (see all_idx)
+ all_dar (:class:`~pypeit.coadd3d.DARcorrection`, list):
+ A Class containing the DAR correction information, or a list of DARcorrection
+ classes. If a list, it must be the same length as astrom_trans.
bins (tuple):
A 3-tuple (x,y,z) containing the histogram bin edges in x,y spatial
and z wavelength coordinates
@@ -1248,9 +1159,9 @@ def generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave, all_sci, all_i
numfr = 1
else:
numfr = np.unique(_all_idx).size
- if len(tilts) != numfr or len(slits) != numfr or len(astrom_trans) != numfr:
+ if len(tilts) != numfr or len(slits) != numfr or len(astrom_trans) != numfr or len(all_dar) != numfr:
msgs.error("The following arguments must be the same length as the expected number of frames to be combined:"
- + msgs.newline() + "tilts, slits, astrom_trans")
+ + msgs.newline() + "tilts, slits, astrom_trans, all_dar")
# Prepare the array of white light images to be stored
numra = bins[0].size-1
numdec = bins[1].size-1
@@ -1263,26 +1174,25 @@ def generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave, all_sci, all_i
# Subpixellate
img, _, _ = subpixellate(image_wcs, all_ra, all_dec, all_wave,
all_sci, all_ivar, all_wghts, all_spatpos,
- all_specpos, all_spatid, tilts, slits, astrom_trans, bins,
+ all_specpos, all_spatid, tilts, slits, astrom_trans, all_dar, bins,
spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, all_idx=_all_idx)
else:
ww = np.where(_all_idx == fr)
# Subpixellate
img, _, _ = subpixellate(image_wcs, all_ra[ww], all_dec[ww], all_wave[ww],
all_sci[ww], all_ivar[ww], all_wghts[ww], all_spatpos[ww],
- all_specpos[ww], all_spatid[ww], tilts[fr], slits[fr], astrom_trans[fr], bins,
- spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel)
+ all_specpos[ww], all_spatid[ww], tilts[fr], slits[fr], astrom_trans[fr],
+ all_dar[fr], bins, spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel)
all_wl_imgs[:, :, fr] = img[:, :, 0]
# Return the constructed white light images
return all_wl_imgs
def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts,
- all_spatpos, all_specpos, all_spatid, tilts, slits, astrom_trans, bins,
- all_idx=None, spec_subpixel=10, spat_subpixel=10, overwrite=False, blaze_wave=None,
- blaze_spec=None, fluxcal=False, sensfunc=None, whitelight_range=None,
- specname="PYP_SPEC", debug=False):
- r"""
+ all_spatpos, all_specpos, all_spatid, tilts, slits, astrom_trans, all_dar, bins,
+ all_idx=None, spec_subpixel=10, spat_subpixel=10, overwrite=False,
+ whitelight_range=None, debug=False):
+ """
Save a datacube using the subpixel algorithm. Refer to the subpixellate()
docstring for further details about this algorithm
@@ -1326,6 +1236,9 @@ def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_s
A Class containing the transformation between detector pixel
coordinates and WCS pixel coordinates, or a list of Alignment
Splines (see all_idx)
+ all_dar (:class:`~pypeit.coadd3d.DARcorrection`, list):
+ A Class containing the DAR correction information, or a list of DARcorrection
+ classes. If a list, it must be the same length as astrom_trans.
bins (tuple):
A 3-tuple (x,y,z) containing the histogram bin edges in x,y spatial
and z wavelength coordinates
@@ -1349,17 +1262,6 @@ def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_s
direction.
overwrite (bool, optional):
If True, the output cube will be overwritten.
- blaze_wave (`numpy.ndarray`_, optional):
- Wavelength array of the spectral blaze function
- blaze_spec (`numpy.ndarray`_, optional):
- Spectral blaze function
- fluxcal (bool, optional):
- Are the data flux calibrated? If True, the units are: :math:`{\rm
- erg/s/cm}^2{\rm /Angstrom/arcsec}^2` multiplied by the
- PYPEIT_FLUX_SCALE. Otherwise, the units are: :math:`{\rm
- counts/s/Angstrom/arcsec}^2`.
- sensfunc (`numpy.ndarray`_, None, optional):
- Sensitivity function that has been applied to the datacube
whitelight_range (None, list, optional):
A two element list that specifies the minimum and maximum
wavelengths (in Angstroms) to use when constructing the white light
@@ -1368,34 +1270,40 @@ def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_s
either element of the list is None, then the minimum/maximum
wavelength range of that element will be set by the minimum/maximum
wavelength of all_wave.
- specname (str, optional):
- Name of the spectrograph
debug (bool, optional):
If True, a residuals cube will be output. If the datacube generation
is correct, the distribution of pixels in the residual cube with no
flux should have mean=0 and std=1.
+
+ Returns:
+ :obj:`tuple`: Four `numpy.ndarray`_ objects containing
+ (1) the datacube generated from the subpixellated inputs,
+ (2) the corresponding error cube (standard deviation),
+ (3) the corresponding bad pixel mask cube, and
+ (4) a 1D array containing the wavelength at each spectral coordinate of the datacube.
"""
# Prepare the header, and add the unit of flux to the header
hdr = output_wcs.to_header()
- if fluxcal:
- hdr['FLUXUNIT'] = (flux_calib.PYPEIT_FLUX_SCALE, "Flux units -- erg/s/cm^2/Angstrom/arcsec^2")
- else:
- hdr['FLUXUNIT'] = (1, "Flux units -- counts/s/Angstrom/arcsec^2")
# Subpixellate
subpix = subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, all_spatpos, all_specpos,
- all_spatid, tilts, slits, astrom_trans, bins, all_idx=all_idx,
+ all_spatid, tilts, slits, astrom_trans, all_dar, bins, all_idx=all_idx,
spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, debug=debug)
# Extract the variables that we need
if debug:
- datacube, varcube, bpmcube, residcube = subpix
+ flxcube, varcube, bpmcube, residcube = subpix
# Save a residuals cube
outfile_resid = outfile.replace(".fits", "_resid.fits")
msgs.info("Saving residuals datacube as: {0:s}".format(outfile_resid))
hdu = fits.PrimaryHDU(residcube.T, header=hdr)
hdu.writeto(outfile_resid, overwrite=overwrite)
else:
- datacube, varcube, bpmcube = subpix
+ flxcube, varcube, bpmcube = subpix
+
+ # Get wavelength of each pixel
+ nspec = flxcube.shape[2]
+ wcs_scale = (1.0*output_wcs.spectral.wcs.cunit[0]).to(units.Angstrom).value # Ensures the WCS is in Angstroms
+ wave = wcs_scale * output_wcs.spectral.wcs_pix2world(np.arange(nspec), 0)[0]
# Check if the user requested a white light image
if whitelight_range is not None:
@@ -1410,23 +1318,16 @@ def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_s
whitelight_range[0], whitelight_range[1]))
# Get the output filename for the white light image
out_whitelight = get_output_whitelight_filename(outfile)
- nspec = datacube.shape[2]
- # Get wavelength of each pixel, and note that the WCS gives this in m, so convert to Angstroms (x 1E10)
- wave = 1.0E10 * output_wcs.spectral.wcs_pix2world(np.arange(nspec), 0)[0]
- whitelight_img = make_whitelight_fromcube(datacube, wave=wave, wavemin=whitelight_range[0], wavemax=whitelight_range[1])
+ whitelight_img = make_whitelight_fromcube(flxcube, wave=wave, wavemin=whitelight_range[0], wavemax=whitelight_range[1])
msgs.info("Saving white light image as: {0:s}".format(out_whitelight))
img_hdu = fits.PrimaryHDU(whitelight_img.T, header=whitelight_wcs.to_header())
img_hdu.writeto(out_whitelight, overwrite=overwrite)
-
- # Write out the datacube
- msgs.info("Saving datacube as: {0:s}".format(outfile))
- final_cube = DataCube(datacube.T, np.sqrt(varcube.T), bpmcube.T, specname, blaze_wave, blaze_spec,
- sensfunc=sensfunc, fluxed=fluxcal)
- final_cube.to_file(outfile, hdr=hdr, overwrite=overwrite)
+ # TODO :: Avoid transposing these large cubes
+ return flxcube.T, np.sqrt(varcube.T), bpmcube.T, wave
def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, all_spatpos, all_specpos,
- all_spatid, tilts, slits, astrom_trans, bins, all_idx=None,
+ all_spatid, tilts, slits, astrom_trans, all_dar, bins, all_idx=None,
spec_subpixel=10, spat_subpixel=10, debug=False):
r"""
Subpixellate the input data into a datacube. This algorithm splits each
@@ -1479,6 +1380,9 @@ def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_w
A Class containing the transformation between detector pixel
coordinates and WCS pixel coordinates, or a list of Alignment
Splines (see all_idx)
+ all_dar (:class:`~pypeit.coadd3d.DARcorrection`, list):
+ A Class containing the DAR correction information, or a list of DARcorrection
+ classes. If a list, it must be the same length as astrom_trans.
bins (tuple):
A 3-tuple (x,y,z) containing the histogram bin edges in x,y spatial
and z wavelength coordinates
@@ -1509,15 +1413,15 @@ def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_w
:obj:`tuple`: Three or four `numpy.ndarray`_ objects containing (1) the
datacube generated from the subpixellated inputs, (2) the corresponding
variance cube, (3) the corresponding bad pixel mask cube, and (4) the
- residual cube. The latter is only returned if debug is True.
+ residual cube. The latter is only returned if debug is True.
"""
# Check for combinations of lists or not
- if type(tilts) is list and type(slits) is list and type(astrom_trans) is list:
+ if all([isinstance(l, list) for l in [tilts, slits, astrom_trans, all_dar]]):
# Several frames are being combined. Check the lists have the same length
numframes = len(tilts)
- if len(slits) != numframes or len(astrom_trans) != numframes:
+ if len(slits) != numframes or len(astrom_trans) != numframes or len(all_dar) != numframes:
msgs.error("The following lists must have the same length:" + msgs.newline() +
- "tilts, slits, astrom_trans")
+ "tilts, slits, astrom_trans, all_dar")
# Check all_idx has been set
if all_idx is None:
if numframes != 1:
@@ -1529,20 +1433,19 @@ def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_w
if tmp != numframes:
msgs.warn("Indices in argument 'all_idx' does not match the number of frames expected.")
# Store in the following variables
- _tilts, _slits, _astrom_trans = tilts, slits, astrom_trans
- elif type(tilts) is not list and type(slits) is not list and \
- type(astrom_trans) is not list:
+ _tilts, _slits, _astrom_trans, _all_dar = tilts, slits, astrom_trans, all_dar
+ elif all([not isinstance(l, list) for l in [tilts, slits, astrom_trans, all_dar]]):
# Just a single frame - store as lists for this code
- _tilts, _slits, _astrom_trans = [tilts], [slits], [astrom_trans],
+ _tilts, _slits, _astrom_trans, _all_dar = [tilts], [slits], [astrom_trans], [all_dar]
all_idx = np.zeros(all_sci.size)
numframes = 1
else:
msgs.error("The following input arguments should all be of type 'list', or all not be type 'list':" +
- msgs.newline() + "tilts, slits, astrom_trans")
+ msgs.newline() + "tilts, slits, astrom_trans, all_dar")
# Prepare the output arrays
outshape = (bins[0].size-1, bins[1].size-1, bins[2].size-1)
binrng = [[bins[0][0], bins[0][-1]], [bins[1][0], bins[1][-1]], [bins[2][0], bins[2][-1]]]
- datacube, varcube, normcube = np.zeros(outshape), np.zeros(outshape), np.zeros(outshape)
+ flxcube, varcube, normcube = np.zeros(outshape), np.zeros(outshape), np.zeros(outshape)
if debug:
residcube = np.zeros(outshape)
# Divide each pixel into subpixels
@@ -1572,809 +1475,41 @@ def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_w
wspl = all_wave[this_sl]
asrt = np.argsort(yspl)
wave_spl = interp1d(yspl[asrt], wspl[asrt], kind='linear', bounds_error=False, fill_value='extrapolate')
+ # Calculate the wavelength at each subpixel
+ this_wave = wave_spl(tiltpos)
+ # Calculate the DAR correction at each sub pixel
+ ra_corr, dec_corr = _all_dar[fr].correction(this_wave) # This routine needs the wavelengths to be expressed in Angstroms
# Calculate spatial and spectral positions of the subpixels
spat_xx = np.add.outer(wpix[1], spat_x.flatten()).flatten()
spec_yy = np.add.outer(wpix[0], spec_y.flatten()).flatten()
# Transform this to spatial location
spatpos_subpix = _astrom_trans[fr].transform(sl, spat_xx, spec_yy)
spatpos = _astrom_trans[fr].transform(sl, all_spatpos[this_sl], all_specpos[this_sl])
- ra_coeff = np.polyfit(spatpos, all_ra[this_sl], 1)
- dec_coeff = np.polyfit(spatpos, all_dec[this_sl], 1)
- this_ra = np.polyval(ra_coeff, spatpos_subpix)#ra_spl(spatpos_subpix)
- this_dec = np.polyval(dec_coeff, spatpos_subpix)#dec_spl(spatpos_subpix)
- # ssrt = np.argsort(spatpos)
- # ra_spl = interp1d(spatpos[ssrt], all_ra[this_sl][ssrt], kind='linear', bounds_error=False, fill_value='extrapolate')
- # dec_spl = interp1d(spatpos[ssrt], all_dec[this_sl][ssrt], kind='linear', bounds_error=False, fill_value='extrapolate')
- # this_ra = ra_spl(spatpos_subpix)
- # this_dec = dec_spl(spatpos_subpix)
- this_wave = wave_spl(tiltpos)
+ # Interpolate the RA/Dec over the subpixel spatial positions
+ ssrt = np.argsort(spatpos)
+ tmp_ra = all_ra[this_sl]
+ tmp_dec = all_dec[this_sl]
+ ra_spl = interp1d(spatpos[ssrt], tmp_ra[ssrt], kind='linear', bounds_error=False, fill_value='extrapolate')
+ dec_spl = interp1d(spatpos[ssrt], tmp_dec[ssrt], kind='linear', bounds_error=False, fill_value='extrapolate')
+ this_ra = ra_spl(spatpos_subpix)
+ this_dec = dec_spl(spatpos_subpix)
+ # Now apply the DAR correction
+ this_ra += ra_corr
+ this_dec += dec_corr
# Convert world coordinates to voxel coordinates, then histogram
vox_coord = output_wcs.wcs_world2pix(np.vstack((this_ra, this_dec, this_wave * 1.0E-10)).T, 0)
- if histogramdd is not None:
- # use the "fast histogram" algorithm, that assumes regular bin spacing
- datacube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_sci[this_sl] * all_wght_subpix[this_sl], num_subpixels))
- varcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_var[this_sl] * all_wght_subpix[this_sl]**2, num_subpixels))
- normcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_wght_subpix[this_sl], num_subpixels))
- if debug:
- residcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_sci[this_sl] * np.sqrt(all_ivar[this_sl]), num_subpixels))
- else:
- datacube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_sci[this_sl] * all_wght_subpix[this_sl], num_subpixels))[0]
- varcube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_var[this_sl] * all_wght_subpix[this_sl]**2, num_subpixels))[0]
- normcube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_wght_subpix[this_sl], num_subpixels))[0]
- if debug:
- residcube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_sci[this_sl] * np.sqrt(all_ivar[this_sl]), num_subpixels))[0]
+ # Use the "fast histogram" algorithm, that assumes regular bin spacing
+ flxcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_sci[this_sl] * all_wght_subpix[this_sl], num_subpixels))
+ varcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_var[this_sl] * all_wght_subpix[this_sl]**2, num_subpixels))
+ normcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_wght_subpix[this_sl], num_subpixels))
+ if debug:
+ residcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_sci[this_sl] * np.sqrt(all_ivar[this_sl]), num_subpixels))
# Normalise the datacube and variance cube
nc_inverse = utils.inverse(normcube)
- datacube *= nc_inverse
+ flxcube *= nc_inverse
varcube *= nc_inverse**2
bpmcube = (normcube == 0).astype(np.uint8)
if debug:
residcube *= nc_inverse
- return datacube, varcube, bpmcube, residcube
- return datacube, varcube, bpmcube
-
-
-def get_output_filename(fil, par_outfile, combine, idx=1):
- """
- Get the output filename of a datacube, given the input
-
- Args:
- fil (str):
- The spec2d filename.
- par_outfile (str):
- The user-specified output filename (see cubepar['output_filename'])
- combine (bool):
- Should the input frames be combined into a single datacube?
- idx (int, optional):
- Index of filename to be saved. Required if combine=False.
-
- Returns:
- str: The output filename to use.
- """
- if combine:
- if par_outfile == "":
- par_outfile = "datacube.fits"
- # Check the output files don't exist
- outfile = par_outfile if ".fits" in par_outfile else par_outfile + ".fits"
- else:
- if par_outfile == "":
- outfile = fil.replace("spec2d_", "spec3d_")
- else:
- # Use the output filename as a prefix
- outfile = os.path.splitext(par_outfile)[0] + "_{0:03d}.fits".format(idx)
- # Return the outfile
- return outfile
-
-
-def get_output_whitelight_filename(outfile):
- """
- Given the output filename of a datacube, create an appropriate whitelight
- fits file name
-
- Args:
- outfile (str):
- The output filename used for the datacube.
-
- Returns:
- str: The output filename to use for the whitelight image.
- """
- out_wl_filename = os.path.splitext(outfile)[0] + "_whitelight.fits"
- return out_wl_filename
-
-
-def coadd_cube(files, opts, spectrograph=None, parset=None, overwrite=False):
- """ Main routine to coadd spec2D files into a 3D datacube
-
- Args:
- files (:obj:`list`):
- List of all spec2D files
- opts (:obj:`dict`):
- coadd2d options associated with each spec2d file
- spectrograph (:obj:`str`, :class:`~pypeit.spectrographs.spectrograph.Spectrograph`, optional):
- The name or instance of the spectrograph used to obtain the data.
- If None, this is pulled from the file header.
- parset (:class:`~pypeit.par.pypeitpar.PypeItPar`, optional):
- An instance of the parameter set. If None, assumes that detector 1
- is the one reduced and uses the default reduction parameters for the
- spectrograph (see
- :func:`~pypeit.spectrographs.spectrograph.Spectrograph.default_pypeit_par`
- for the relevant spectrograph class).
- overwrite (:obj:`bool`, optional):
- Overwrite the output file, if it exists?
- """
- if spectrograph is None:
- with fits.open(files[0]) as hdu:
- spectrograph = hdu[0].header['PYP_SPEC']
-
- if isinstance(spectrograph, str):
- spec = load_spectrograph(spectrograph)
- specname = spectrograph
- else:
- # Assume it's a Spectrograph instance
- spec = spectrograph
- specname = spectrograph.name
-
- # Grab the parset, if not provided
- if parset is None:
- # TODO :: Use config_specific_par instead?
- parset = spec.default_pypeit_par()
- cubepar = parset['reduce']['cube']
- flatpar = parset['calibrations']['flatfield']
- senspar = parset['sensfunc']
-
- # prep
- numfiles = len(files)
- method = cubepar['method'].lower()
- combine = cubepar['combine']
- align = cubepar['align']
- # If there is only one frame being "combined" AND there's no reference image, then don't compute the translation.
- if numfiles == 1 and cubepar["reference_image"] is None:
- if not align:
- msgs.warn("Parameter 'align' should be False when there is only one frame and no reference image")
- msgs.info("Setting 'align' to False")
- align = False
- if opts['ra_offset'] is not None:
- if not align:
- msgs.warn("When 'ra_offset' and 'dec_offset' are set, 'align' must be True.")
- msgs.info("Setting 'align' to True")
- align = True
- # TODO :: The default behaviour (combine=False, align=False) produces a datacube that uses the instrument WCS
- # It should be possible (and perhaps desirable) to do a spatial alignment (i.e. align=True), apply this to the
- # RA,Dec values of each pixel, and then use the instrument WCS to save the output (or, just adjust the crval).
- # At the moment, if the user wishes to spatially align the frames, a different WCS is generated.
- if histogramdd is None:
- msgs.warn("Generating a datacube is faster if you install fast-histogram:"+msgs.newline()+
- "https://pypi.org/project/fast-histogram/")
- if method != 'ngp':
- msgs.warn("Forcing NGP algorithm, because fast-histogram is not installed")
- method = 'ngp'
-
- # Determine what method is requested
- spec_subpixel, spat_subpixel = 1, 1
- if method == "subpixel":
- msgs.info("Adopting the subpixel algorithm to generate the datacube.")
- spec_subpixel, spat_subpixel = cubepar['spec_subpixel'], cubepar['spat_subpixel']
- elif method == "ngp":
- msgs.info("Adopting the nearest grid point (NGP) algorithm to generate the datacube.")
- else:
- msgs.error(f"The following datacube method is not allowed: {method}")
-
- # Get the detector number and string representation
- det = 1 if parset['rdx']['detnum'] is None else parset['rdx']['detnum']
- detname = spec.get_det_name(det)
-
- # Check if the output file exists
- if combine:
- outfile = get_output_filename("", cubepar['output_filename'], combine)
- out_whitelight = get_output_whitelight_filename(outfile)
- if os.path.exists(outfile) and not overwrite:
- msgs.error("Output filename already exists:"+msgs.newline()+outfile)
- if os.path.exists(out_whitelight) and cubepar['save_whitelight'] and not overwrite:
- msgs.error("Output filename already exists:"+msgs.newline()+out_whitelight)
- else:
- # Finally, if there's just one file, check if the output filename is given
- if numfiles == 1 and cubepar['output_filename'] != "":
- outfile = get_output_filename("", cubepar['output_filename'], True, -1)
- out_whitelight = get_output_whitelight_filename(outfile)
- if os.path.exists(outfile) and not overwrite:
- msgs.error("Output filename already exists:" + msgs.newline() + outfile)
- if os.path.exists(out_whitelight) and cubepar['save_whitelight'] and not overwrite:
- msgs.error("Output filename already exists:" + msgs.newline() + out_whitelight)
- else:
- for ff in range(numfiles):
- outfile = get_output_filename(files[ff], cubepar['output_filename'], combine, ff+1)
- out_whitelight = get_output_whitelight_filename(outfile)
- if os.path.exists(outfile) and not overwrite:
- msgs.error("Output filename already exists:" + msgs.newline() + outfile)
- if os.path.exists(out_whitelight) and cubepar['save_whitelight'] and not overwrite:
- msgs.error("Output filename already exists:" + msgs.newline() + out_whitelight)
-
- # Check the reference cube and image exist, if requested
- fluxcal = False
- blaze_wave, blaze_spec = None, None
- blaze_spline, flux_spline = None, None
- if cubepar['standard_cube'] is not None:
- fluxcal = True
- ss_file = cubepar['standard_cube']
- if not os.path.exists(ss_file):
- msgs.error("Standard cube does not exist:" + msgs.newline() + ss_file)
- msgs.info(f"Loading standard star cube: {ss_file:s}")
- # Load the standard star cube and retrieve its RA + DEC
- stdcube = fits.open(ss_file)
- star_ra, star_dec = stdcube[1].header['CRVAL1'], stdcube[1].header['CRVAL2']
-
- # Extract a spectrum of the standard star
- wave, Nlam_star, Nlam_ivar_star, gpm_star = extract_standard_spec(stdcube)
-
- # Extract the information about the blaze
- if cubepar['grating_corr']:
- blaze_wave_curr, blaze_spec_curr = stdcube['BLAZE_WAVE'].data, stdcube['BLAZE_SPEC'].data
- blaze_spline_curr = interp1d(blaze_wave_curr, blaze_spec_curr,
- kind='linear', bounds_error=False, fill_value="extrapolate")
- # The first standard star cube is used as the reference blaze spline
- if blaze_spline is None:
- blaze_wave, blaze_spec = stdcube['BLAZE_WAVE'].data, stdcube['BLAZE_SPEC'].data
- blaze_spline = interp1d(blaze_wave, blaze_spec,
- kind='linear', bounds_error=False, fill_value="extrapolate")
- # Perform a grating correction
- grat_corr = correct_grating_shift(wave.value, blaze_wave_curr, blaze_spline_curr, blaze_wave, blaze_spline)
- # Apply the grating correction to the standard star spectrum
- Nlam_star /= grat_corr
- Nlam_ivar_star *= grat_corr**2
-
- # Read in some information above the standard star
- std_dict = flux_calib.get_standard_spectrum(star_type=senspar['star_type'],
- star_mag=senspar['star_mag'],
- ra=star_ra, dec=star_dec)
- # Calculate the sensitivity curve
- # TODO :: This needs to be addressed... unify flux calibration into the main PypeIt routines.
- msgs.warn("Datacubes are currently flux-calibrated using the UVIS algorithm... this will be deprecated soon")
- zeropoint_data, zeropoint_data_gpm, zeropoint_fit, zeropoint_fit_gpm =\
- flux_calib.fit_zeropoint(wave.value, Nlam_star, Nlam_ivar_star, gpm_star, std_dict,
- mask_hydrogen_lines=senspar['mask_hydrogen_lines'],
- mask_helium_lines=senspar['mask_helium_lines'],
- hydrogen_mask_wid=senspar['hydrogen_mask_wid'],
- nresln=senspar['UVIS']['nresln'], resolution=senspar['UVIS']['resolution'],
- trans_thresh=senspar['UVIS']['trans_thresh'], polyorder=senspar['polyorder'],
- polycorrect=senspar['UVIS']['polycorrect'], polyfunc=senspar['UVIS']['polyfunc'])
- wgd = np.where(zeropoint_fit_gpm)
- sens = np.power(10.0, -0.4 * (zeropoint_fit[wgd] - flux_calib.ZP_UNIT_CONST)) / np.square(wave[wgd])
- flux_spline = interp1d(wave[wgd], sens, kind='linear', bounds_error=False, fill_value="extrapolate")
-
- # If a reference image has been set, check that it exists
- if cubepar['reference_image'] is not None:
- if not os.path.exists(cubepar['reference_image']):
- msgs.error("Reference image does not exist:" + msgs.newline() + cubepar['reference_image'])
-
- # Initialise arrays for storage
- ifu_ra, ifu_dec = np.array([]), np.array([]) # The RA and Dec at the centre of the IFU, as stored in the header
- all_ra, all_dec, all_wave = np.array([]), np.array([]), np.array([])
- all_sci, all_ivar, all_idx, all_wghts = np.array([]), np.array([]), np.array([]), np.array([])
- all_spatpos, all_specpos, all_spatid = np.array([], dtype=int), np.array([], dtype=int), np.array([], dtype=int)
- all_tilts, all_slits, all_align = [], [], []
- all_wcs = []
- dspat = None if cubepar['spatial_delta'] is None else cubepar['spatial_delta']/3600.0 # binning size on the sky (/3600 to convert to degrees)
- dwv = cubepar['wave_delta'] # binning size in wavelength direction (in Angstroms)
- wave_ref = None
- mnmx_wv = None # Will be used to store the minimum and maximum wavelengths of every slit and frame.
- weights = np.ones(numfiles) # Weights to use when combining cubes
- flat_splines = dict() # A dictionary containing the splines of the flatfield
- # Load the default scaleimg frame for the scale correction
- scalecorr_default = "none"
- relScaleImgDef = np.array([1])
- if cubepar['scale_corr'] is not None:
- if cubepar['scale_corr'] == "image":
- msgs.info("The default relative spectral illumination correction will use the science image")
- scalecorr_default = "image"
- else:
- msgs.info("Loading default scale image for relative spectral illumination correction:" +
- msgs.newline() + cubepar['scale_corr'])
- try:
- spec2DObj = spec2dobj.Spec2DObj.from_file(cubepar['scale_corr'], detname)
- relScaleImgDef = spec2DObj.scaleimg
- scalecorr_default = cubepar['scale_corr']
- except:
- msgs.warn("Could not load scaleimg from spec2d file:" + msgs.newline() +
- cubepar['scale_corr'] + msgs.newline() +
- "scale correction will not be performed unless you have specified the correct" + msgs.newline() +
- "scale_corr file in the spec2d block")
- cubepar['scale_corr'] = None
- scalecorr_default = "none"
-
- # Load the default sky frame to be used for sky subtraction
- skysub_default = "image"
- skyImgDef, skySclDef = None, None # This is the default behaviour (i.e. to use the "image" for the sky subtraction)
- if cubepar['skysub_frame'] in [None, 'none', '', 'None']:
- skysub_default = "none"
- skyImgDef = np.array([0.0]) # Do not perform sky subtraction
- skySclDef = np.array([0.0]) # Do not perform sky subtraction
- elif cubepar['skysub_frame'].lower() == "image":
- msgs.info("The sky model in the spec2d science frames will be used for sky subtraction" +msgs.newline() +
- "(unless specific skysub frames have been specified)")
- skysub_default = "image"
- else:
- msgs.info("Loading default image for sky subtraction:" +
- msgs.newline() + cubepar['skysub_frame'])
- try:
- spec2DObj = spec2dobj.Spec2DObj.from_file(cubepar['skysub_frame'], detname)
- skysub_exptime = fits.open(cubepar['skysub_frame'])[0].header['EXPTIME']
- except:
- msgs.error("Could not load skysub image from spec2d file:" + msgs.newline() + cubepar['skysub_frame'])
- skysub_default = cubepar['skysub_frame']
- skyImgDef = spec2DObj.sciimg/skysub_exptime # Sky counts/second
- skySclDef = spec2DObj.scaleimg
-
- # Load all spec2d files and prepare the data for making a datacube
- for ff, fil in enumerate(files):
- # Load it up
- msgs.info("Loading PypeIt spec2d frame:" + msgs.newline() + fil)
- spec2DObj = spec2dobj.Spec2DObj.from_file(fil, detname)
- detector = spec2DObj.detector
- spat_flexure = None #spec2DObj.sci_spat_flexure
-
- # Load the header
- hdr = spec2DObj.head0
- ifu_ra = np.append(ifu_ra, spec.get_meta_value([hdr], 'ra'))
- ifu_dec = np.append(ifu_dec, spec.get_meta_value([hdr], 'dec'))
-
- # Get the exposure time
- exptime = hdr['EXPTIME']
-
- # Setup for PypeIt imports
- msgs.reset(verbosity=2)
-
- # TODO :: Consider loading all calibrations into a single variable.
-
- # Initialise the slit edges
- msgs.info("Constructing slit image")
- slits = spec2DObj.slits
- slitid_img_init = slits.slit_img(pad=0, initial=True, flexure=spat_flexure)
- slits_left, slits_right, _ = slits.select_edges(initial=True, flexure=spat_flexure)
-
- # The order of operations below proceeds as follows:
- # (1) Get science image
- # (2) Subtract sky (note, if a joint fit has been performed, the relative scale correction is applied in the reduction!)
- # (3) Apply relative scale correction to both science and ivar
-
- # Set the default behaviour if a global skysub frame has been specified
- this_skysub = skysub_default
- if skysub_default == "image":
- skyImg = spec2DObj.skymodel
- skyScl = spec2DObj.scaleimg
- else:
- skyImg = skyImgDef.copy() * exptime
- skyScl = skySclDef.copy()
- # See if there's any changes from the default behaviour
- if opts['skysub_frame'][ff] is not None:
- if opts['skysub_frame'][ff].lower() == 'default':
- if skysub_default == "image":
- skyImg = spec2DObj.skymodel
- skyScl = spec2DObj.scaleimg
- this_skysub = "image" # Use the current spec2d for sky subtraction
- else:
- skyImg = skyImgDef.copy() * exptime
- skyScl = skySclDef.copy()
- this_skysub = skysub_default # Use the global value for sky subtraction
- elif opts['skysub_frame'][ff].lower() == 'image':
- skyImg = spec2DObj.skymodel
- skyScl = spec2DObj.scaleimg
- this_skysub = "image" # Use the current spec2d for sky subtraction
- elif opts['skysub_frame'][ff].lower() == 'none':
- skyImg = np.array([0.0])
- skyScl = np.array([1.0])
- this_skysub = "none" # Don't do sky subtraction
- else:
- # Load a user specified frame for sky subtraction
- msgs.info("Loading skysub frame:" + msgs.newline() + opts['skysub_frame'][ff])
- try:
- spec2DObj_sky = spec2dobj.Spec2DObj.from_file(opts['skysub_frame'][ff], detname)
- skysub_exptime = fits.open(opts['skysub_frame'][ff])[0].header['EXPTIME']
- except:
- msgs.error("Could not load skysub image from spec2d file:" + msgs.newline() + opts['skysub_frame'][ff])
- # TODO :: Consider allowing the actual frame (instead of the skymodel) to be used as the skysub image - make sure the BPM is carried over.
- # :: Allow sky data fitting (i.e. scale the flux of a skysub frame to the science frame data)
- skyImg = spec2DObj_sky.sciimg * exptime / skysub_exptime # Sky counts
- # skyImg = spec2DObj_sky.skymodel * exptime / skysub_exptime # Sky counts
- skyScl = spec2DObj_sky.scaleimg
- this_skysub = opts['skysub_frame'][ff] # User specified spec2d for sky subtraction
- if this_skysub == "none":
- msgs.info("Sky subtraction will not be performed.")
- else:
- msgs.info("Using the following frame for sky subtraction:"+msgs.newline()+this_skysub)
-
- # Load the relative scale image, if something other than the default has been provided
- this_scalecorr = scalecorr_default
- relScaleImg = relScaleImgDef.copy()
- if opts['scale_corr'][ff] is not None:
- if opts['scale_corr'][ff].lower() == 'default':
- if scalecorr_default == "image":
- relScaleImg = spec2DObj.scaleimg
- this_scalecorr = "image" # Use the current spec2d for the relative spectral illumination scaling
- else:
- this_scalecorr = scalecorr_default # Use the default value for the scale correction
- elif opts['scale_corr'][ff].lower() == 'image':
- relScaleImg = spec2DObj.scaleimg
- this_scalecorr = "image" # Use the current spec2d for the relative spectral illumination scaling
- elif opts['scale_corr'][ff].lower() == 'none':
- relScaleImg = np.array([1])
- this_scalecorr = "none" # Don't do relative spectral illumination scaling
- else:
- # Load a user specified frame for sky subtraction
- msgs.info("Loading the following frame for the relative spectral illumination correction:" +
- msgs.newline() + opts['scale_corr'][ff])
- try:
- spec2DObj_scl = spec2dobj.Spec2DObj.from_file(opts['scale_corr'][ff], detname)
- except:
- msgs.error("Could not load skysub image from spec2d file:" + msgs.newline() + opts['skysub_frame'][ff])
- relScaleImg = spec2DObj_scl.scaleimg
- this_scalecorr = opts['scale_corr'][ff]
- if this_scalecorr == "none":
- msgs.info("Relative spectral illumination correction will not be performed.")
- else:
- msgs.info("Using the following frame for the relative spectral illumination correction:" +
- msgs.newline()+this_scalecorr)
-
- # Prepare the relative scaling factors
- relSclSky = skyScl/spec2DObj.scaleimg # This factor ensures the sky has the same relative scaling as the science frame
- relScale = spec2DObj.scaleimg/relScaleImg # This factor is applied to the sky subtracted science frame
-
- # Extract the relevant information from the spec2d file
- sciImg = (spec2DObj.sciimg - skyImg*relSclSky)*relScale # Subtract sky and apply relative illumination
- ivar = spec2DObj.ivarraw / relScale**2
- waveimg = spec2DObj.waveimg
- bpmmask = spec2DObj.bpmmask
-
- # TODO :: Really need to write some detailed information in the docs about all of the various corrections that can optionally be applied
-
- # TODO :: Include a flexure correction from the sky frame? Note, you cannot use the waveimg from a sky frame,
- # since the heliocentric correction may have been applied to the sky frame. Need to recalculate waveimg using
- # the slitshifts from a skyimage, and then apply the vel_corr from the science image.
-
- wnonzero = (waveimg != 0.0)
- if not np.any(wnonzero):
- msgs.error("The wavelength image contains only zeros - You need to check the data reduction.")
- wave0 = waveimg[wnonzero].min()
- # Calculate the delta wave in every pixel on the slit
- waveimp = np.roll(waveimg, 1, axis=0)
- waveimn = np.roll(waveimg, -1, axis=0)
- dwaveimg = np.zeros_like(waveimg)
- # All good pixels
- wnz = np.where((waveimg!=0) & (waveimp!=0))
- dwaveimg[wnz] = np.abs(waveimg[wnz]-waveimp[wnz])
- # All bad pixels
- wnz = np.where((waveimg!=0) & (waveimp==0))
- dwaveimg[wnz] = np.abs(waveimg[wnz]-waveimn[wnz])
- # All endpoint pixels
- dwaveimg[0, :] = np.abs(waveimg[0, :] - waveimn[0, :])
- dwaveimg[-1, :] = np.abs(waveimg[-1, :] - waveimp[-1, :])
- dwv = np.median(dwaveimg[dwaveimg != 0.0]) if cubepar['wave_delta'] is None else cubepar['wave_delta']
-
- msgs.info("Using wavelength solution: wave0={0:.3f}, dispersion={1:.3f} Angstrom/pixel".format(wave0, dwv))
-
- # Obtain the minimum and maximum wavelength of all slits
- if mnmx_wv is None:
- mnmx_wv = np.zeros((len(files), slits.nslits, 2))
- for slit_idx, slit_spat in enumerate(slits.spat_id):
- onslit_init = (slitid_img_init == slit_spat)
- mnmx_wv[ff, slit_idx, 0] = np.min(waveimg[onslit_init])
- mnmx_wv[ff, slit_idx, 1] = np.max(waveimg[onslit_init])
-
- # Remove edges of the spectrum where the sky model is bad
- sky_is_good = make_good_skymask(slitid_img_init, spec2DObj.tilts)
-
- # Construct a good pixel mask
- # TODO: This should use the mask function to figure out which elements are masked.
- onslit_gpm = (slitid_img_init > 0) & (bpmmask.mask == 0) & sky_is_good
-
- # Grab the WCS of this frame
- frame_wcs = spec.get_wcs(spec2DObj.head0, slits, detector.platescale, wave0, dwv)
- all_wcs.append(copy.deepcopy(frame_wcs))
-
- # Find the largest spatial scale of all images being combined
- # TODO :: probably need to put this in the DetectorContainer
- pxscl = detector.platescale * parse.parse_binning(detector.binning)[1] / 3600.0 # This should be degrees/pixel
- slscl = spec.get_meta_value([spec2DObj.head0], 'slitwid')
- if dspat is None:
- dspat = max(pxscl, slscl)
- if pxscl > dspat:
- msgs.warn("Spatial scale requested ({0:f} arcsec) is less than the pixel scale ({1:f} arcsec)".format(3600.0*dspat, 3600.0*pxscl))
- if slscl > dspat:
- msgs.warn("Spatial scale requested ({0:f} arcsec) is less than the slicer scale ({1:f} arcsec)".format(3600.0*dspat, 3600.0*slscl))
-
- # Loading the alignments frame for these data
- alignments = None
- if cubepar['astrometric']:
- key = alignframe.Alignments.calib_type.upper()
- if key in spec2DObj.calibs:
- alignfile = os.path.join(spec2DObj.calibs['DIR'], spec2DObj.calibs[key])
- if os.path.exists(alignfile) and cubepar['astrometric']:
- msgs.info("Loading alignments")
- alignments = alignframe.Alignments.from_file(alignfile)
- else:
- msgs.warn(f'Processed alignment frame not recorded or not found!')
- msgs.info("Using slit edges for astrometric transform")
- else:
- msgs.info("Using slit edges for astrometric transform")
- # If nothing better was provided, use the slit edges
- if alignments is None:
- left, right, _ = slits.select_edges(initial=True, flexure=spat_flexure)
- locations = [0.0, 1.0]
- traces = np.append(left[:,None,:], right[:,None,:], axis=1)
- else:
- locations = parset['calibrations']['alignment']['locations']
- traces = alignments.traces
- # Generate an RA/DEC image
- msgs.info("Generating RA/DEC image")
- alignSplines = alignframe.AlignmentSplines(traces, locations, spec2DObj.tilts)
- raimg, decimg, minmax = slits.get_radec_image(frame_wcs, alignSplines, spec2DObj.tilts,
- initial=True, flexure=spat_flexure)
- # Perform the DAR correction
- if wave_ref is None:
- wave_ref = 0.5*(np.min(waveimg[onslit_gpm]) + np.max(waveimg[onslit_gpm]))
- # Get DAR parameters
- raval = spec.get_meta_value([spec2DObj.head0], 'ra')
- decval = spec.get_meta_value([spec2DObj.head0], 'dec')
- obstime = spec.get_meta_value([spec2DObj.head0], 'obstime')
- pressure = spec.get_meta_value([spec2DObj.head0], 'pressure')
- temperature = spec.get_meta_value([spec2DObj.head0], 'temperature')
- rel_humidity = spec.get_meta_value([spec2DObj.head0], 'humidity')
- coord = SkyCoord(raval, decval, unit=(units.deg, units.deg))
- location = spec.location # TODO :: spec.location should probably end up in the TelescopePar (spec.telescope.location)
- if pressure == 0.0:
- msgs.warn("Pressure is set to zero - DAR correction will not be performed")
- else:
- msgs.info("DAR correction parameters:"+msgs.newline() +
- " Pressure = {0:f} bar".format(pressure) + msgs.newline() +
- " Temperature = {0:f} deg C".format(temperature) + msgs.newline() +
- " Humidity = {0:f}".format(rel_humidity))
- ra_corr, dec_corr = correct_dar(waveimg[onslit_gpm], coord, obstime, location,
- pressure * units.bar, temperature * units.deg_C, rel_humidity, wave_ref=wave_ref)
- raimg[onslit_gpm] += ra_corr*np.cos(np.mean(decimg[onslit_gpm]) * np.pi / 180.0)
- decimg[onslit_gpm] += dec_corr
-
- # Get copies of arrays to be saved
- wave_ext = waveimg[onslit_gpm].copy()
- flux_ext = sciImg[onslit_gpm].copy()
- ivar_ext = ivar[onslit_gpm].copy()
- dwav_ext = dwaveimg[onslit_gpm].copy()
-
- # Correct for sensitivity as a function of grating angle
- # (this assumes the spectrum of the flatfield lamp has the same shape for all setups)
- key = flatfield.FlatImages.calib_type.upper()
- if key not in spec2DObj.calibs:
- msgs.error('Processed flat calibration file not recorded by spec2d file!')
- flatfile = os.path.join(spec2DObj.calibs['DIR'], spec2DObj.calibs[key])
- if cubepar['grating_corr'] and flatfile not in flat_splines.keys():
- msgs.info("Calculating relative sensitivity for grating correction")
- # Check if the Flat file exists
- if not os.path.exists(flatfile):
- msgs.error("Grating correction requested, but the following file does not exist:" +
- msgs.newline() + flatfile)
- # Load the Flat file
- flatimages = flatfield.FlatImages.from_file(flatfile)
- total_illum = flatimages.fit2illumflat(slits, finecorr=False, frametype='illum', initial=True, spat_flexure=spat_flexure) * \
- flatimages.fit2illumflat(slits, finecorr=True, frametype='illum', initial=True, spat_flexure=spat_flexure)
- flatframe = flatimages.pixelflat_raw / total_illum
- if flatimages.pixelflat_spec_illum is None:
- # Calculate the relative scale
- scale_model = flatfield.illum_profile_spectral(flatframe, waveimg, slits,
- slit_illum_ref_idx=flatpar['slit_illum_ref_idx'], model=None,
- skymask=None, trim=flatpar['slit_trim'], flexure=spat_flexure,
- smooth_npix=flatpar['slit_illum_smooth_npix'])
- else:
- msgs.info("Using relative spectral illumination from FlatImages")
- scale_model = flatimages.pixelflat_spec_illum
- # Apply the relative scale and generate a 1D "spectrum"
- onslit = waveimg != 0
- wavebins = np.linspace(np.min(waveimg[onslit]), np.max(waveimg[onslit]), slits.nspec)
- hist, edge = np.histogram(waveimg[onslit], bins=wavebins, weights=flatframe[onslit]/scale_model[onslit])
- cntr, edge = np.histogram(waveimg[onslit], bins=wavebins)
- cntr = cntr.astype(float)
- norm = (cntr != 0) / (cntr + (cntr == 0))
- spec_spl = hist * norm
- wave_spl = 0.5 * (wavebins[1:] + wavebins[:-1])
- flat_splines[flatfile] = interp1d(wave_spl, spec_spl, kind='linear',
- bounds_error=False, fill_value="extrapolate")
- flat_splines[flatfile+"_wave"] = wave_spl.copy()
- # Check if a reference blaze spline exists (either from a standard star if fluxing or from a previous
- # exposure in this for loop)
- if blaze_spline is None:
- blaze_wave, blaze_spec = wave_spl, spec_spl
- blaze_spline = interp1d(wave_spl, spec_spl, kind='linear',
- bounds_error=False, fill_value="extrapolate")
-
- # Perform extinction correction
- msgs.info("Applying extinction correction")
- longitude = spec.telescope['longitude']
- latitude = spec.telescope['latitude']
- airmass = spec2DObj.head0[spec.meta['airmass']['card']]
- extinct = flux_calib.load_extinction_data(longitude, latitude, senspar['UVIS']['extinct_file'])
- # extinction_correction requires the wavelength is sorted
- wvsrt = np.argsort(wave_ext)
- ext_corr = flux_calib.extinction_correction(wave_ext[wvsrt] * units.AA, airmass, extinct)
- # Grating correction
- grat_corr = 1.0
- if cubepar['grating_corr']:
- grat_corr = correct_grating_shift(wave_ext[wvsrt], flat_splines[flatfile + "_wave"], flat_splines[flatfile],
- blaze_wave, blaze_spline)
- # Sensitivity function
- sens_func = 1.0
- if fluxcal:
- msgs.info("Calculating the sensitivity function")
- sens_func = flux_spline(wave_ext[wvsrt])
- # Convert the flux_sav to counts/s, correct for the relative sensitivity of different setups
- ext_corr *= sens_func / (exptime * grat_corr)
- # Correct for extinction
- flux_sav = flux_ext[wvsrt] * ext_corr
- ivar_sav = ivar_ext[wvsrt] / ext_corr ** 2
-
- # Convert units to Counts/s/Ang/arcsec2
- # Slicer sampling * spatial pixel sampling
- sl_deg = np.sqrt(frame_wcs.wcs.cd[0, 0] ** 2 + frame_wcs.wcs.cd[1, 0] ** 2)
- px_deg = np.sqrt(frame_wcs.wcs.cd[1, 1] ** 2 + frame_wcs.wcs.cd[0, 1] ** 2)
- scl_units = dwav_ext[wvsrt] * (3600.0 * sl_deg) * (3600.0 * px_deg)
- flux_sav /= scl_units
- ivar_sav *= scl_units ** 2
-
- # sort back to the original ordering
- resrt = np.argsort(wvsrt)
- numpix = raimg[onslit_gpm].size
-
- # Calculate the weights relative to the zeroth cube
- weights[ff] = 1.0#exptime #np.median(flux_sav[resrt]*np.sqrt(ivar_sav[resrt]))**2
-
- # Get the slit image and then unset pixels in the slit image that are bad
- this_specpos, this_spatpos = np.where(onslit_gpm)
- this_spatid = slitid_img_init[onslit_gpm]
-
- # If individual frames are to be output without aligning them,
- # there's no need to store information, just make the cubes now
- if not combine and not align:
- # Get the output filename
- if numfiles == 1 and cubepar['output_filename'] != "":
- outfile = get_output_filename("", cubepar['output_filename'], True, -1)
- else:
- outfile = get_output_filename(fil, cubepar['output_filename'], combine, ff+1)
- # Get the coordinate bounds
- slitlength = int(np.round(np.median(slits.get_slitlengths(initial=True, median=True))))
- numwav = int((np.max(waveimg) - wave0) / dwv)
- bins = spec.get_datacube_bins(slitlength, minmax, numwav)
- # Generate the output WCS for the datacube
- crval_wv = cubepar['wave_min'] if cubepar['wave_min'] is not None else 1.0E10 * frame_wcs.wcs.crval[2]
- cd_wv = cubepar['wave_delta'] if cubepar['wave_delta'] is not None else 1.0E10 * frame_wcs.wcs.cd[2, 2]
- output_wcs = spec.get_wcs(spec2DObj.head0, slits, detector.platescale, crval_wv, cd_wv)
- # Set the wavelength range of the white light image.
- wl_wvrng = None
- if cubepar['save_whitelight']:
- wl_wvrng = get_whitelight_range(np.max(mnmx_wv[ff, :, 0]),
- np.min(mnmx_wv[ff, :, 1]),
- cubepar['whitelight_range'])
- # Make the datacube
- if method in ['subpixel', 'ngp']:
- # Generate the datacube
- generate_cube_subpixel(outfile, output_wcs, raimg[onslit_gpm], decimg[onslit_gpm], wave_ext,
- flux_sav[resrt], ivar_sav[resrt], np.ones(numpix),
- this_spatpos, this_specpos, this_spatid,
- spec2DObj.tilts, slits, alignSplines, bins,
- all_idx=None, overwrite=overwrite, blaze_wave=blaze_wave, blaze_spec=blaze_spec,
- fluxcal=fluxcal, specname=specname, whitelight_range=wl_wvrng,
- spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel)
- continue
-
- # Store the information if we are combining multiple frames
- all_ra = np.append(all_ra, raimg[onslit_gpm].copy())
- all_dec = np.append(all_dec, decimg[onslit_gpm].copy())
- all_wave = np.append(all_wave, wave_ext.copy())
- all_sci = np.append(all_sci, flux_sav[resrt].copy())
- all_ivar = np.append(all_ivar, ivar_sav[resrt].copy())
- all_idx = np.append(all_idx, ff*np.ones(numpix))
- all_wghts = np.append(all_wghts, weights[ff]*np.ones(numpix)/weights[0])
- all_spatpos = np.append(all_spatpos, this_spatpos)
- all_specpos = np.append(all_specpos, this_specpos)
- all_spatid = np.append(all_spatid, this_spatid)
- all_tilts.append(spec2DObj.tilts)
- all_slits.append(slits)
- all_align.append(alignSplines)
-
- # No need to continue if we are not combining nor aligning frames
- if not combine and not align:
- return
-
- # Grab cos(dec) for convenience
- cosdec = np.cos(np.mean(all_dec) * np.pi / 180.0)
-
- # Register spatial offsets between all frames
- if align:
- if opts['ra_offset'] is not None:
- # First, translate all coordinates to the coordinates of the first frame
- # Note :: Don't need cosdec here, this just overrides the IFU coordinate centre of each frame
- ref_shift_ra = ifu_ra[0] - ifu_ra
- ref_shift_dec = ifu_dec[0] - ifu_dec
- for ff in range(numfiles):
- # Apply the shift
- all_ra[all_idx == ff] += ref_shift_ra[ff] + opts['ra_offset'][ff]/3600.0
- all_dec[all_idx == ff] += ref_shift_dec[ff] + opts['dec_offset'][ff]/3600.0
- msgs.info("Spatial shift of cube #{0:d}: RA, DEC (arcsec) = {1:+0.3f} E, {2:+0.3f} N".format(ff + 1, opts['ra_offset'][ff], opts['dec_offset'][ff]))
- else:
- # Find the wavelength range where all frames overlap
- min_wl, max_wl = get_whitelight_range(np.max(mnmx_wv[:, :, 0]), # The max blue wavelength
- np.min(mnmx_wv[:, :, 1]), # The min red wavelength
- cubepar['whitelight_range']) # The user-specified values (if any)
- # Get the good whitelight pixels
- ww, wavediff = get_whitelight_pixels(all_wave, min_wl, max_wl)
- # Iterate over white light image generation and spatial shifting
- numiter = 2
- for dd in range(numiter):
- msgs.info(f"Iterating on spatial translation - ITERATION #{dd+1}/{numiter}")
- # Setup the WCS to use for all white light images
- ref_idx = None # Don't use an index - This is the default behaviour when a reference image is supplied
- image_wcs, voxedge, reference_image = create_wcs(cubepar, all_ra[ww], all_dec[ww], all_wave[ww],
- dspat, wavediff, collapse=True)
- if voxedge[2].size != 2:
- msgs.error("Spectral range for WCS is incorrect for white light image")
-
- wl_imgs = generate_image_subpixel(image_wcs, all_ra[ww], all_dec[ww], all_wave[ww],
- all_sci[ww], all_ivar[ww], all_wghts[ww],
- all_spatpos[ww], all_specpos[ww], all_spatid[ww],
- all_tilts, all_slits, all_align, voxedge, all_idx=all_idx[ww],
- spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel)
- if reference_image is None:
- # ref_idx will be the index of the cube with the highest S/N
- ref_idx = np.argmax(weights)
- reference_image = wl_imgs[:, :, ref_idx].copy()
- msgs.info("Calculating spatial translation of each cube relative to cube #{0:d})".format(ref_idx+1))
- else:
- msgs.info("Calculating the spatial translation of each cube relative to user-defined 'reference_image'")
-
- # Calculate the image offsets relative to the reference image
- for ff in range(numfiles):
- # Calculate the shift
- ra_shift, dec_shift = calculate_image_phase(reference_image.copy(), wl_imgs[:, :, ff], maskval=0.0)
- # Convert pixel shift to degrees shift
- ra_shift *= dspat/cosdec
- dec_shift *= dspat
- msgs.info("Spatial shift of cube #{0:d}: RA, DEC (arcsec) = {1:+0.3f} E, {2:+0.3f} N".format(ff+1, ra_shift*3600.0, dec_shift*3600.0))
- # Apply the shift
- all_ra[all_idx == ff] += ra_shift
- all_dec[all_idx == ff] += dec_shift
-
- # Calculate the relative spectral weights of all pixels
- if numfiles == 1:
- # No need to calculate weights if there's just one frame
- all_wghts = np.ones_like(all_sci)
- else:
- # Find the wavelength range where all frames overlap
- min_wl, max_wl = get_whitelight_range(np.max(mnmx_wv[:, :, 0]), # The max blue wavelength
- np.min(mnmx_wv[:, :, 1]), # The min red wavelength
- cubepar['whitelight_range']) # The user-specified values (if any)
- # Get the good white light pixels
- ww, wavediff = get_whitelight_pixels(all_wave, min_wl, max_wl)
- # Get a suitable WCS
- image_wcs, voxedge, reference_image = create_wcs(cubepar, all_ra, all_dec, all_wave, dspat, wavediff, collapse=True)
- # Generate the white light image (note: hard-coding subpixel=1 in both directions, and combining into a single image)
- wl_full = generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave,
- all_sci, all_ivar, all_wghts,
- all_spatpos, all_specpos, all_spatid,
- all_tilts, all_slits, all_align, voxedge, all_idx=all_idx,
- spec_subpixel=1, spat_subpixel=1, combine=True)
- # Compute the weights
- all_wghts = compute_weights(all_ra, all_dec, all_wave, all_sci, all_ivar, all_idx, wl_full[:, :, 0],
- dspat, dwv, relative_weights=cubepar['relative_weights'])
-
- # Generate the WCS, and the voxel edges
- cube_wcs, vox_edges, _ = create_wcs(cubepar, all_ra, all_dec, all_wave, dspat, dwv)
-
- sensfunc = None
- if flux_spline is not None:
- # Get wavelength of each pixel, and note that the WCS gives this in m, so convert to Angstroms (x 1E10)
- numwav = vox_edges[2].size-1
- senswave = cube_wcs.spectral.wcs_pix2world(np.arange(numwav), 0)[0] * 1.0E10
- sensfunc = flux_spline(senswave)
-
- # Generate a datacube
- outfile = get_output_filename("", cubepar['output_filename'], True, -1)
- if method in ['subpixel', 'ngp']:
- # Generate the datacube
- wl_wvrng = None
- if cubepar['save_whitelight']:
- wl_wvrng = get_whitelight_range(np.max(mnmx_wv[:, :, 0]),
- np.min(mnmx_wv[:, :, 1]),
- cubepar['whitelight_range'])
- if combine:
- generate_cube_subpixel(outfile, cube_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar,
- np.ones(all_wghts.size), # all_wghts,
- all_spatpos, all_specpos, all_spatid, all_tilts, all_slits, all_align, vox_edges,
- all_idx=all_idx, overwrite=overwrite, blaze_wave=blaze_wave, blaze_spec=blaze_spec,
- fluxcal=fluxcal, sensfunc=sensfunc, specname=specname, whitelight_range=wl_wvrng,
- spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel)
- else:
- for ff in range(numfiles):
- outfile = get_output_filename("", cubepar['output_filename'], False, ff)
- ww = np.where(all_idx == ff)
- generate_cube_subpixel(outfile, cube_wcs, all_ra[ww], all_dec[ww], all_wave[ww], all_sci[ww], all_ivar[ww], np.ones(all_wghts[ww].size),
- all_spatpos[ww], all_specpos[ww], all_spatid[ww], all_tilts[ff], all_slits[ff], all_align[ff], vox_edges,
- all_idx=all_idx[ww], overwrite=overwrite, blaze_wave=blaze_wave, blaze_spec=blaze_spec,
- fluxcal=fluxcal, sensfunc=sensfunc, specname=specname, whitelight_range=wl_wvrng,
- spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel)
-
-
+ return flxcube, varcube, bpmcube, residcube
+ return flxcube, varcube, bpmcube
diff --git a/pypeit/core/extract.py b/pypeit/core/extract.py
index 0e26639340..a618028af5 100644
--- a/pypeit/core/extract.py
+++ b/pypeit/core/extract.py
@@ -451,6 +451,51 @@ def extract_boxcar(sciimg, ivar, mask, waveimg, skyimg, spec, fwhmimg=None, base
spec.BOX_NPIX = pixtot-pixmsk
+def extract_hist_spectrum(waveimg, frame, gpm=None, bins=1000):
+ """
+ Generate a quick spectrum using the nearest grid point (histogram) algorithm.
+
+ Args:
+ waveimg (`numpy.ndarray`_):
+ A 2D image of the wavelength at each pixel.
+ frame (`numpy.ndarray`_):
+ The frame to use to extract a spectrum. Shape should be the same as waveimg
+ gpm (`numpy.ndarray`_, optional):
+ A boolean array indicating the pixels to include in the histogram (True = include)
+ bins (`numpy.ndarray`_, int, optional):
+ Either a 1D array indicating the bin edges to be used for the histogram,
+ or an integer that specifies the number of bin edges to generate
+
+ Returns:
+ A tuple containing the wavelength and spectrum at the centre of each histogram bin. Both
+ arrays returned in the tuple are `numpy.ndarray`_.
+ """
+ # Check the inputs
+ if waveimg.shape != frame.shape:
+ msgs.error("Wavelength image is not the same shape as the input frame")
+ # Check the GPM
+ _gpm = gpm if gpm is not None else waveimg > 0
+ if waveimg.shape != _gpm.shape:
+ msgs.error("Wavelength image is not the same shape as the GPM")
+ # Set the bins
+ if isinstance(bins, int):
+ _bins = np.linspace(np.min(waveimg[_gpm]), np.max(waveimg[_gpm]), bins)
+ elif isinstance(bins, np.ndarray):
+ _bins = bins
+ else:
+ msgs.error("Argument 'bins' should be an integer or a numpy array")
+
+ # Construct a histogram and the normalisation
+ hist, edge = np.histogram(waveimg[gpm], bins=_bins, weights=frame[gpm])
+ cntr, edge = np.histogram(waveimg[gpm], bins=_bins)
+ # Normalise
+ cntr = cntr.astype(float)
+ spec = hist * utils.inverse(cntr)
+ # Generate the corresponding wavelength array - set it to be the bin centre
+ wave = 0.5 * (_bins[1:] + _bins[:-1])
+ return wave, spec
+
+
def findfwhm(model, sig_x):
r""" Calculate the spatial FWHM of an object profile.
This is utility routine is used in :func:`~pypeit.core.extract.fit_profile`.
diff --git a/pypeit/core/flat.py b/pypeit/core/flat.py
index 9f4475b4fa..3d8b640fc5 100644
--- a/pypeit/core/flat.py
+++ b/pypeit/core/flat.py
@@ -297,7 +297,7 @@ def illum_profile_spectral_poly(rawimg, waveimg, slitmask, slitmask_trim, model,
waveimg : `numpy.ndarray`_
Wavelength image
slitmask : `numpy.ndarray`_
- A 2D int mask, the same shape as rawimg, indicating which pixels are on a slit. A zero value
+ A 2D int mask, the same shape as rawimg, indicating which pixels are on a slit. A -1 value
indicates not on a slit, while any pixels on a slit should have the value of the slit spatial ID
number.
slitmask_trim :
@@ -321,7 +321,7 @@ def illum_profile_spectral_poly(rawimg, waveimg, slitmask, slitmask_trim, model,
"""
msgs.info(f"Performing relative spectral sensitivity correction (reference slit = {slit_illum_ref_idx})")
# Generate the mask
- _thismask = thismask if (thismask is not None) else slitmask
+ _thismask = thismask if (thismask is not None) else (slitmask > 0)
gpm = gpmask if (gpmask is not None) else np.ones_like(rawimg, dtype=bool)
# Extract the list of spatial IDs from the slitmask
slitmask_spatid = np.unique(slitmask)
@@ -348,7 +348,7 @@ def illum_profile_spectral_poly(rawimg, waveimg, slitmask, slitmask_trim, model,
coeff = np.polyfit(wavcen[wgd], scale_bin[wgd], w=1/scale_err[wgd], deg=2)
scaleImg[this_slit] *= np.polyval(coeff, waveimg[this_slit])
if sl == slit_illum_ref_idx:
- scaleImg[_thismask] /= np.polyval(coeff, waveimg[_thismask])
+ scaleImg[_thismask] *= utils.inverse(np.polyval(coeff, waveimg[_thismask]))
minv, maxv = np.min(scaleImg[_thismask]), np.max(scaleImg[_thismask])
msgs.info("Minimum/Maximum scales = {0:.5f}, {1:.5f}".format(minv, maxv))
return scaleImg
diff --git a/pypeit/core/meta.py b/pypeit/core/meta.py
index a4eacf5f9b..3d8092e853 100644
--- a/pypeit/core/meta.py
+++ b/pypeit/core/meta.py
@@ -143,22 +143,28 @@ def define_additional_meta(nlamps=20):
'dichroic': dict(dtype=str, comment='Beam splitter'),
'dispangle': dict(dtype=float, comment='Angle of the disperser', rtol=0.),
'cenwave': dict(dtype=float, comment='Central wavelength of the disperser', rtol=0.),
+ # TODO what is the difference between dither and dithoff? Also, we should rename these to be
+ # more clearly the offset along the slit to distinguish from the IFU case of RA_off and DEC_off
'dither': dict(dtype=float, comment='Dither amount in arcsec'),
+ 'dithoff': dict(dtype=float, comment='Dither offset'),
'dithpat': dict(dtype=str, comment='Dither pattern'),
'dithpos': dict(dtype=str, comment='Dither position'),
- 'dithoff': dict(dtype=float, comment='Dither offset'),
+ 'posang': dict(dtype=float, comment='Position angle of the observation (degrees, positive is East from North)'),
+ 'ra_off': dict(dtype=float, comment='Dither offset in RA'),
+ 'dec_off': dict(dtype=float, comment='Dither offset in DEC'),
'echangle':dict(dtype=float, comment='Echelle angle'),
'filter1': dict(dtype=str, comment='First filter in optical path'),
'frameno': dict(dtype=str, comment='Frame number provided by instrument software'),
'hatch': dict(dtype=str, comment='Position of instrument hatch'),
- 'humidity': dict(dtype=float, comment='Relative humidity (0 to 1) at observation time'),
+ 'humidity': dict(dtype=float, comment='Humidity at observation time (as a percentage, not a fraction)'),
'idname': dict(dtype=str, comment='Instrument supplied frametype (e.g. bias)'),
'instrument': dict(dtype=str, comment='Header supplied instrument name'),
'mode': dict(dtype=str, comment='Observing mode'),
'object': dict(dtype=str, comment='Alternative object name (cf. target)'),
'obstime': dict(dtype=str, comment='Observation time'),
'oscansec': dict(dtype=str, comment='Overscan section (windowing)'),
- 'pressure': dict(dtype=float, comment='Pressure (units.bar) at observation time'),
+ 'parangle': dict(dtype=float, comment='Parallactic angle (units.radian)'),
+ 'pressure': dict(dtype=float, comment='Pressure (units.pascal) at observation time'),
'seq_expno': dict(dtype=int, comment='Number of exposure in observing sequence'),
'slitwid': dict(dtype=float, comment='Slit width, sometimes distinct from decker'),
'slitlength': dict(dtype=float, comment='Slit length, used only for long slits'),
diff --git a/pypeit/data/arc_lines/reid_arxiv/keck_kcrm_RL.fits b/pypeit/data/arc_lines/reid_arxiv/keck_kcrm_RL.fits
new file mode 100644
index 0000000000..081de6726a
Binary files /dev/null and b/pypeit/data/arc_lines/reid_arxiv/keck_kcrm_RL.fits differ
diff --git a/pypeit/deprecated/datacube.py b/pypeit/deprecated/datacube.py
index 56b3c1cc27..abeb9a1e4c 100644
--- a/pypeit/deprecated/datacube.py
+++ b/pypeit/deprecated/datacube.py
@@ -288,3 +288,158 @@ def generate_cube_ngp(outfile, hdr, all_sci, all_ivar, all_wghts, vox_coord, bin
final_cube = DataCube(datacube.T, np.sqrt(var_cube.T), bpmcube.T, specname, blaze_wave, blaze_spec,
sensfunc=sensfunc, fluxed=fluxcal)
final_cube.to_file(outfile, hdr=hdr, overwrite=overwrite)
+
+
+def gaussian2D_cube(tup, intflux, xo, yo, dxdz, dydz, sigma_x, sigma_y, theta, offset):
+ """
+ Fit a 2D Gaussian function to a datacube. This function assumes that each
+ wavelength slice of the datacube is well-fit by a 2D Gaussian. The centre of
+ the Gaussian is allowed to vary linearly as a function of wavelength.
+
+ .. note::
+
+ The integrated flux does not vary with wavelength.
+
+ Args:
+ tup (:obj:`tuple`):
+ A three element tuple containing the x, y, and z locations of each
+ pixel in the cube
+ intflux (float):
+ The Integrated flux of the Gaussian
+ xo (float):
+ The centre of the Gaussian along the x-coordinate when z=0
+ yo (float):
+ The centre of the Gaussian along the y-coordinate when z=0
+ dxdz (float):
+ The change of xo with increasing z
+ dydz (float):
+ The change of yo with increasing z
+ sigma_x (float):
+ The standard deviation in the x-direction
+ sigma_y (float):
+ The standard deviation in the y-direction
+ theta (float):
+ The orientation angle of the 2D Gaussian
+ offset (float):
+ Constant offset
+
+ Returns:
+ `numpy.ndarray`_: The 2D Gaussian evaluated at the coordinate (x, y, z)
+ """
+ # Extract the (x, y, z) coordinates of each pixel from the tuple
+ (x, y, z) = tup
+ # Calculate the centre of the Gaussian for each z coordinate
+ xo = float(xo) + z*dxdz
+ yo = float(yo) + z*dydz
+ # Account for a rotated 2D Gaussian
+ a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
+ b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
+ c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
+ # Normalise so that the integrated flux is a parameter, instead of the amplitude
+ norm = 1/(2*np.pi*np.sqrt(a*c-b*b))
+ gtwod = offset + norm*intflux*np.exp(-(a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) + c*((y-yo)**2)))
+ return gtwod.ravel()
+
+
+def make_whitelight_frompixels(all_ra, all_dec, all_wave, all_sci, all_wghts, all_idx, dspat,
+ all_ivar=None, whitelightWCS=None, numra=None, numdec=None, trim=1):
+ """
+ Generate a whitelight image using the individual pixels of every input frame
+
+ Args:
+ all_ra (`numpy.ndarray`_):
+ 1D flattened array containing the RA values of each pixel from all
+ spec2d files
+ all_dec (`numpy.ndarray`_):
+ 1D flattened array containing the DEC values of each pixel from all
+ spec2d files
+ all_wave (`numpy.ndarray`_):
+ 1D flattened array containing the wavelength values of each pixel
+ from all spec2d files
+ all_sci (`numpy.ndarray`_):
+ 1D flattened array containing the counts of each pixel from all
+ spec2d files
+ all_wghts (`numpy.ndarray`_):
+ 1D flattened array containing the weights attributed to each pixel
+ from all spec2d files
+ all_idx (`numpy.ndarray`_):
+ 1D flattened array containing an integer identifier indicating which
+ spec2d file each pixel originates from. For example, a 0 would
+ indicate that a pixel originates from the first spec2d frame listed
+ in the input file. a 1 would indicate that this pixel originates
+ from the second spec2d file, and so forth.
+ dspat (float):
+ The size of each spaxel on the sky (in degrees)
+ all_ivar (`numpy.ndarray`_, optional):
+ 1D flattened array containing of the inverse variance of each pixel
+ from all spec2d files. If provided, inverse variance images will be
+ calculated and returned for each white light image.
+ whitelightWCS (`astropy.wcs.WCS`_, optional):
+ The WCS of a reference white light image. If supplied, you must also
+ supply numra and numdec.
+ numra (int, optional):
+ Number of RA spaxels in the reference white light image
+ numdec (int, optional):
+ Number of DEC spaxels in the reference white light image
+ trim (int, optional):
+ Number of pixels to grow around a masked region
+
+ Returns:
+ tuple: two 3D arrays will be returned, each of shape [N, M, numfiles],
+ where N and M are the spatial dimensions of the combined white light
+ images. The first array is a white light image, and the second array is
+ the corresponding inverse variance image. If all_ivar is None, this will
+ be an empty array.
+ """
+ # Determine number of files
+ numfiles = np.unique(all_idx).size
+
+ if whitelightWCS is None:
+ # Generate a 2D WCS to register all frames
+ coord_min = [np.min(all_ra), np.min(all_dec), np.min(all_wave)]
+ coord_dlt = [dspat, dspat, np.max(all_wave) - np.min(all_wave)]
+ whitelightWCS = generate_WCS(coord_min, coord_dlt)
+
+ # Generate coordinates
+ cosdec = np.cos(np.mean(all_dec) * np.pi / 180.0)
+ numra = 1+int((np.max(all_ra) - np.min(all_ra)) * cosdec / dspat)
+ numdec = 1+int((np.max(all_dec) - np.min(all_dec)) / dspat)
+ else:
+ # If a WCS is supplied, the numra and numdec must be specified
+ if (numra is None) or (numdec is None):
+ msgs.error("A WCS has been supplied to make_whitelight." + msgs.newline() +
+ "numra and numdec must also be specified")
+ xbins = np.arange(1 + numra) - 1
+ ybins = np.arange(1 + numdec) - 1
+ spec_bins = np.arange(2) - 1
+ bins = (xbins, ybins, spec_bins)
+
+ whitelight_Imgs = np.zeros((numra, numdec, numfiles))
+ whitelight_ivar = np.zeros((numra, numdec, numfiles))
+ for ff in range(numfiles):
+ msgs.info("Generating white light image of frame {0:d}/{1:d}".format(ff + 1, numfiles))
+ ww = (all_idx == ff)
+ # Make the cube
+ pix_coord = whitelightWCS.wcs_world2pix(np.vstack((all_ra[ww], all_dec[ww], all_wave[ww] * 1.0E-10)).T, 0)
+ wlcube, edges = np.histogramdd(pix_coord, bins=bins, weights=all_sci[ww] * all_wghts[ww])
+ norm, edges = np.histogramdd(pix_coord, bins=bins, weights=all_wghts[ww])
+ nrmCube = (norm > 0) / (norm + (norm == 0))
+ whtlght = (wlcube * nrmCube)[:, :, 0]
+ # Create a mask of good pixels (trim the edges)
+ gpm = grow_mask(whtlght == 0, trim) == 0 # A good pixel = 1
+ whtlght *= gpm
+ # Set the masked regions to the minimum value
+ minval = np.min(whtlght[gpm == 1])
+ whtlght[gpm == 0] = minval
+ # Store the white light image
+ whitelight_Imgs[:, :, ff] = whtlght.copy()
+ # Now operate on the inverse variance image
+ if all_ivar is not None:
+ ivar_img, _ = np.histogramdd(pix_coord, bins=bins, weights=all_ivar[ww])
+ ivar_img = ivar_img[:, :, 0]
+ ivar_img *= gpm
+ minval = np.min(ivar_img[gpm == 1])
+ ivar_img[gpm == 0] = minval
+ whitelight_ivar[:, :, ff] = ivar_img.copy()
+ return whitelight_Imgs, whitelight_ivar, whitelightWCS
+
diff --git a/pypeit/display/display.py b/pypeit/display/display.py
index 6d3b574199..dbd0c0cda3 100644
--- a/pypeit/display/display.py
+++ b/pypeit/display/display.py
@@ -443,7 +443,7 @@ def show_slits(viewer, ch, left, right, slit_ids=None, left_ids=None, right_ids=
def show_trace(viewer, ch, trace, trc_name=None, maskdef_extr=None, manual_extr=None, clear=False,
- rotate=False, pstep=50, yval=None, color='blue'):
+ rotate=False, pstep=3, yval=None, color='blue'):
r"""
Args:
diff --git a/pypeit/find_objects.py b/pypeit/find_objects.py
index ec0bfce603..de2b921407 100644
--- a/pypeit/find_objects.py
+++ b/pypeit/find_objects.py
@@ -42,7 +42,7 @@ class FindObjects:
Specifies object being reduced. Should be 'science',
'standard', or 'science_coadd2d'.
wv_calib (:class:`~pypeit.wavecalib.WaveCalib`, optional):
- This is only used for the IFU child when a joint sky subtraction
+ This is only used for the :class:`SlicerIFUFindObjects` child when a joint sky subtraction
is requested.
waveTilts (:class:`~pypeit.wavetilts.WaveTilts`, optional):
Calibration frame with arc/sky line tracing of the wavelength
@@ -449,7 +449,7 @@ def find_objects(self, image, ivar, std_trace=None,
# For nobj we take only the positive objects
return sobjs_obj_single, nobj_single
- # TODO maybe we don't need parent and children for this method. But IFU has a bunch of extra methods.
+ # TODO maybe we don't need parent and children for this method. But SlicerIFU has a bunch of extra methods.
def find_objects_pypeline(self, image, ivar, std_trace=None,
show_peaks=False, show_fits=False, show_trace=False,
show=False, save_objfindQA=False, neg=False, debug=False,
@@ -471,7 +471,8 @@ def get_platescale(self, slitord_id=None):
Args:
slitord_id (:obj:`int`, optional):
- slit spat_id (MultiSlit, IFU) or ech_order (Echelle) value
+ slit spat_id (:class:`MultiSlitFindObjects`, :class:`SlicerIFUFindObjects`)
+ or ech_order (:class:`EchelleFindObjects`) value.
Returns:
:obj:`float`: plate scale in binned pixels
@@ -678,7 +679,7 @@ def get_platescale(self, slitord_id=None):
Args:
slitord_id (:obj:`int`, optional):
- slit spat_id (MultiSlit, IFU) or ech_order (Echelle) value
+ slit spat_id (MultiSlit, SlicerIFU) or ech_order (Echelle) value
Returns:
:obj:`float`: plate scale in binned pixels
@@ -828,7 +829,7 @@ def get_platescale(self, slitord_id=None):
Args:
slitord_id (:obj:`int`, optional):
- slit spat_id (MultiSlit, IFU) or ech_order (Echelle) value
+ slit spat_id (MultiSlit, SlicerIFU) or ech_order (Echelle) value
Returns:
:obj:`float`: plate scale in binned pixels
@@ -941,9 +942,9 @@ def find_objects_pypeline(self, image, ivar, std_trace=None,
return sobjs_ech, len(sobjs_ech)
-class IFUFindObjects(MultiSlitFindObjects):
+class SlicerIFUFindObjects(MultiSlitFindObjects):
"""
- Child of Reduce for IFU reductions
+ Child of Reduce for SlicerIFU reductions
See parent doc string for Args and Attributes
@@ -957,7 +958,7 @@ def find_objects_pypeline(self, image, ivar, std_trace=None,
show=False, save_objfindQA=False, neg=False, debug=False,
manual_extract_dict=None):
"""
- See MultiSlitReduce for slit-based IFU reductions
+ See MultiSlitReduce for SlicerIFU reductions
"""
if self.par['reduce']['cube']['slit_spec']:
return super().find_objects_pypeline(image, ivar, std_trace=std_trace,
@@ -1239,7 +1240,7 @@ def joint_skysub(self, skymask=None, update_crmask=True, trim_edg=(0,0),
def global_skysub(self, skymask=None, update_crmask=True, trim_edg=(0,0),
previous_sky=None, show_fit=False, show=False, show_objs=False, objs_not_masked=False):
"""
- Perform global sky subtraction. This IFU-specific routine ensures that the
+ Perform global sky subtraction. This SlicerIFU-specific routine ensures that the
edges of the slits are not trimmed, and performs a spatial and spectral
correction using the sky spectrum, if requested. See Reduce.global_skysub()
for parameter definitions.
diff --git a/pypeit/flatfield.py b/pypeit/flatfield.py
index 75096d4e72..ea13b2c3c3 100644
--- a/pypeit/flatfield.py
+++ b/pypeit/flatfield.py
@@ -1370,7 +1370,7 @@ def spatial_fit(self, norm_spec, spat_coo, median_slit_width, spat_gpm, gpm, deb
def spatial_fit_finecorr(self, spat_illum, onslit_tweak, slit_idx, slit_spat, gpm, slit_trim=3, doqa=False):
"""
- Generate a relative scaling image for a slit-based IFU. All
+ Generate a relative scaling image for a slicer IFU. All
slits are scaled relative to a reference slit, specified in
the spectrograph settings file.
@@ -1478,7 +1478,7 @@ def spatial_fit_finecorr(self, spat_illum, onslit_tweak, slit_idx, slit_spat, gp
def extract_structure(self, rawflat_orig, slit_trim=3):
"""
- Generate a relative scaling image for a slit-based IFU. All
+ Generate a relative scaling image for a slicer IFU. All
slits are scaled relative to a reference slit, specified in
the spectrograph settings file.
@@ -1550,7 +1550,7 @@ def extract_structure(self, rawflat_orig, slit_trim=3):
def spectral_illumination(self, gpm=None, debug=False):
"""
- Generate a relative scaling image for a slit-based IFU. All
+ Generate a relative scaling image for a slicer IFU. All
slits are scaled relative to a reference slit, specified in
the spectrograph settings file.
diff --git a/pypeit/inputfiles.py b/pypeit/inputfiles.py
index 126ad2f2b2..83619c6d8d 100644
--- a/pypeit/inputfiles.py
+++ b/pypeit/inputfiles.py
@@ -796,14 +796,14 @@ def options(self):
Dictionary containing cube options.
"""
# Define the list of allowed parameters
- opts = dict(scale_corr=None, skysub_frame=None)
+ opts = dict(scale_corr=None, skysub_frame=None, ra_offset=None, dec_offset=None)
# Get the scale correction files
- scale_corr = self.path_and_files('scale_corr', skip_blank=True)
+ scale_corr = self.path_and_files('scale_corr', skip_blank=False, check_exists=False)
if scale_corr is None:
opts['scale_corr'] = [None]*len(self.filenames)
elif len(scale_corr) == 1 and len(self.filenames) > 1:
- opts['scale_corr'] = scale_corr*len(self.filenames)
+ opts['scale_corr'] = scale_corr.lower()*len(self.filenames)
elif len(scale_corr) != 0:
opts['scale_corr'] = scale_corr
diff --git a/pypeit/par/pypeitpar.py b/pypeit/par/pypeitpar.py
index a68b70c3b9..2fa8156edc 100644
--- a/pypeit/par/pypeitpar.py
+++ b/pypeit/par/pypeitpar.py
@@ -337,7 +337,7 @@ def __init__(self, trim=None, apply_gain=None, orient=None,
dtypes['use_specillum'] = bool
descr['use_specillum'] = 'Use the relative spectral illumination profiles to correct ' \
'the spectral illumination profile of each slit. This is ' \
- 'primarily used for IFUs. To use this, you must set ' \
+ 'primarily used for slicer IFUs. To use this, you must set ' \
'``slit_illum_relative=True`` in the ``flatfield`` parameter set!'
# Flexure
@@ -680,7 +680,7 @@ def __init__(self, method=None, pixelflat_file=None, spec_samp_fine=None,
'for a multi-slit setup. If you set ``use_slitillum = ' \
'True`` for any of the frames that use the flatfield ' \
'model, this *must* be set to True. Currently, this is ' \
- 'only used for IFU reductions.'
+ 'only used for SlicerIFU reductions.'
defaults['illum_iter'] = 0
dtypes['illum_iter'] = int
@@ -1456,13 +1456,14 @@ def __init__(self, slit_spec=None, relative_weights=None, align=None, combine=No
'line map to register two frames.' \
defaults['method'] = "subpixel"
+ options['method'] = ["subpixel", "ngp"]
dtypes['method'] = str
descr['method'] = 'What method should be used to generate the datacube. There are currently two options: ' \
'(1) "subpixel" (default) - this algorithm divides each pixel in the spec2d frames ' \
'into subpixels, and assigns each subpixel to a voxel of the datacube. Flux is conserved, ' \
'but voxels are correlated, and the error spectrum does not account for covariance between ' \
'adjacent voxels. See also, spec_subpixel and spat_subpixel. ' \
- '(2) "NGP" (nearest grid point) - this algorithm is effectively a 3D histogram. Flux is ' \
+ '(2) "ngp" (nearest grid point) - this algorithm is effectively a 3D histogram. Flux is ' \
'conserved, voxels are not correlated, however this option suffers the same downsides as ' \
'any histogram; the choice of bin sizes can change how the datacube appears. This algorithm ' \
'takes each pixel on the spec2d frame and puts the flux of this pixel into one voxel in the ' \
@@ -1471,9 +1472,6 @@ def __init__(self, slit_spec=None, relative_weights=None, align=None, combine=No
'pixels that contribute to the same voxel are inverse variance weighted (e.g. if two ' \
'pixels have the same variance, the voxel would be assigned the average flux of the two ' \
'pixels).'
- # '(3) "resample" - this algorithm resamples the spec2d frames into a datacube. ' \
- # 'Flux is conserved, but voxels are correlated, and the error spectrum does not account ' \
- # 'for covariance between neighbouring pixels. ' \
defaults['spec_subpixel'] = 5
dtypes['spec_subpixel'] = int
@@ -1591,9 +1589,13 @@ def from_dict(cls, cfg):
return cls(**kwargs)
def validate(self):
- allowed_methods = ["subpixel", "NGP"]#, "resample"
- if self.data['method'] not in allowed_methods:
- raise ValueError("The 'method' must be one of:\n"+", ".join(allowed_methods))
+ # Check the skysub options
+ allowed_skysub_options = ["none", "image", ""] # Note, "None" is treated as None which gets assigned to the default value "image".
+ if self.data['skysub_frame'] not in allowed_skysub_options:
+ # Check if the supplied name exists
+ if not os.path.exists(self.data['skysub_frame']):
+ raise ValueError("The 'skysub_frame' must be one of:\n" + ", ".join(allowed_skysub_options) +
+ "\nor, the relative path to a spec2d file.")
if len(self.data['whitelight_range']) != 2:
raise ValueError("The 'whitelight_range' must be a two element list of either NoneType or float")
diff --git a/pypeit/pypeit.py b/pypeit/pypeit.py
index c733d0b1e1..69d9908ab1 100644
--- a/pypeit/pypeit.py
+++ b/pypeit/pypeit.py
@@ -785,7 +785,7 @@ def objfind_one(self, frames, det, bg_frames=None, std_outfile=None):
(self.objtype == 'standard' and self.par['calibrations']['standardframe']['process']['spat_flexure_correct']):
spat_flexure = sciImg.spat_flexure
# Build the initial sky mask
- initial_skymask = self.load_skyregions(initial_slits=self.spectrograph.pypeline != 'IFU',
+ initial_skymask = self.load_skyregions(initial_slits=self.spectrograph.pypeline != 'SlicerIFU',
scifile=sciImg.files[0], frame=frames[0], spat_flexure=spat_flexure)
# Deal with manual extraction
@@ -942,10 +942,11 @@ def extract_one(self, frames, det, sciImg, objFind, initial_sky, sobjs_obj):
## TODO JFH I think all of this about determining the final global sky should be moved out of this method
## and preferably into the FindObjects class. I see why we are doing it like this since for multislit we need
# to find all of the objects first using slitmask meta data, but this comes at the expense of a much more complicated
- # control sctucture.
+ # control structure.
# Update the global sky
if 'standard' in self.fitstbl['frametype'][frames[0]] or \
+ self.par['reduce']['findobj']['skip_skysub'] or \
self.par['reduce']['findobj']['skip_final_global'] or \
self.par['reduce']['skysub']['user_regions'] is not None:
final_global_sky = initial_sky
diff --git a/pypeit/scripts/coadd_datacube.py b/pypeit/scripts/coadd_datacube.py
index 789c071112..26e12a6bd2 100644
--- a/pypeit/scripts/coadd_datacube.py
+++ b/pypeit/scripts/coadd_datacube.py
@@ -1,6 +1,6 @@
"""
This script enables the user to convert spec2D FITS files
-from IFU instruments into a 3D cube with a defined WCS.
+from SlicerIFU instruments into a 3D cube with a defined WCS.
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
@@ -10,7 +10,7 @@
from pypeit import par
from pypeit import inputfiles
from pypeit import utils
-from pypeit.core.datacube import coadd_cube
+from pypeit.coadd3d import CoAdd3D
from pypeit.spectrographs.util import load_spectrograph
from pypeit.scripts import scriptbase
@@ -52,7 +52,18 @@ def main(args):
msgs.info("Restricting to detector={}".format(args.det))
parset['rdx']['detnum'] = int(args.det)
- # Coadd the files
+ # Extract the options
+ ra_offsets = coadd3dfile.options['ra_offset']
+ dec_offsets = coadd3dfile.options['dec_offset']
+ skysub_frame = coadd3dfile.options['skysub_frame']
+ scale_corr = coadd3dfile.options['scale_corr']
+
+ # Instantiate CoAdd3d
tstart = time.time()
- coadd_cube(coadd3dfile.filenames, coadd3dfile.options, parset=parset, overwrite=args.overwrite)
+ coadd = CoAdd3D.get_instance(coadd3dfile.filenames, parset, skysub_frame=skysub_frame, scale_corr=scale_corr,
+ ra_offsets=ra_offsets, dec_offsets=dec_offsets, spectrograph=spectrograph,
+ det=args.det, overwrite=args.overwrite)
+
+ # Coadd the files
+ coadd.run()
msgs.info(utils.get_time_string(time.time()-tstart))
diff --git a/pypeit/scripts/show_2dspec.py b/pypeit/scripts/show_2dspec.py
index c2c0e67363..fdfe2f5b83 100644
--- a/pypeit/scripts/show_2dspec.py
+++ b/pypeit/scripts/show_2dspec.py
@@ -200,7 +200,7 @@ def main(args):
msgs.info(f'Offseting slits by {sci_spat_flexure} pixels.')
pypeline = hdu[f'{detname}-SCIIMG'].header['PYPELINE'] \
if 'PYPELINE' in hdu[f'{detname}-SCIIMG'].header else None
- if pypeline in ['MultiSlit', 'IFU']:
+ if pypeline in ['MultiSlit', 'SlicerIFU']:
slit_slid_IDs = slit_spat_id
elif pypeline == 'Echelle':
slit_slid_IDs = hdu[_ext].data['ech_order'] \
diff --git a/pypeit/slittrace.py b/pypeit/slittrace.py
index 7ff941178c..5554ac753b 100644
--- a/pypeit/slittrace.py
+++ b/pypeit/slittrace.py
@@ -330,13 +330,13 @@ def slit_info(self):
@property
def slitord_id(self):
"""
- Return array of slit_spatId (MultiSlit, IFU) or ech_order (Echelle) values
+ Return array of slit_spatId (MultiSlit, SlicerIFU) or ech_order (Echelle) values
Returns:
`numpy.ndarray`_:
"""
- if self.pypeline in ['MultiSlit', 'IFU']:
+ if self.pypeline in ['MultiSlit', 'SlicerIFU']:
return self.spat_id
if self.pypeline == 'Echelle':
return self.ech_order
@@ -345,13 +345,13 @@ def slitord_id(self):
@property
def slitord_txt(self):
"""
- Return string indicating if the logs/QA should use "slit" (MultiSlit, IFU) or "order" (Echelle)
+ Return string indicating if the logs/QA should use "slit" (MultiSlit, SlicerIFU) or "order" (Echelle)
Returns:
str: Either 'slit' or 'order'
"""
- if self.pypeline in ['MultiSlit', 'IFU']:
+ if self.pypeline in ['MultiSlit', 'SlicerIFU']:
return 'slit'
if self.pypeline == 'Echelle':
return 'order'
@@ -381,7 +381,7 @@ def slitord_to_zero(self, slitord):
int: zero-based index of the input spat_id
"""
- if self.pypeline in ['MultiSlit', 'IFU']:
+ if self.pypeline in ['MultiSlit', 'SlicerIFU']:
return np.where(self.spat_id == slitord)[0][0]
elif self.pypeline in ['Echelle']:
return np.where(self.ech_order == slitord)[0][0]
@@ -418,7 +418,7 @@ def get_slitlengths(self, initial=False, median=False):
def get_radec_image(self, wcs, alignSplines, tilts, initial=True, flexure=None):
"""Generate an RA and DEC image for every pixel in the frame
- NOTE: This function is currently only used for IFU reductions.
+ NOTE: This function is currently only used for SlicerIFU reductions.
Parameters
----------
@@ -447,6 +447,7 @@ def get_radec_image(self, wcs, alignSplines, tilts, initial=True, flexure=None):
reference (usually the centre of the slit) and the edges of the
slits. Shape is (nslits, 2).
"""
+ msgs.info("Generating an RA/DEC image")
# Initialise the output
raimg = np.zeros((self.nspec, self.nspat))
decimg = np.zeros((self.nspec, self.nspat))
diff --git a/pypeit/specobj.py b/pypeit/specobj.py
index a7b428aa4c..530dfef9a1 100644
--- a/pypeit/specobj.py
+++ b/pypeit/specobj.py
@@ -40,14 +40,14 @@ class SpecObj(datamodel.DataContainer):
Args:
PYPELINE (:obj:`str`):
Name of the ``PypeIt`` pipeline method. Allowed options are
- MultiSlit, Echelle, or IFU.
+ MultiSlit, Echelle, or SlicerIFU.
DET (:obj:`str`):
The name of the detector or mosaic from which the spectrum was
extracted. For example, DET01.
OBJTYPE (:obj:`str`, optional):
Object type. For example: 'unknown', 'standard', 'science'.
SLITID (:obj:`int`, optional):
- For multislit and IFU reductions, this is an identifier for the slit
+ For multislit and SlicerIFU reductions, this is an identifier for the slit
(max=9999).
ECH_ORDER (:obj:`int`, optional):
Physical order number.
@@ -265,7 +265,7 @@ def _validate(self):
"""
Validate the object.
"""
- pypelines = ['MultiSlit', 'IFU', 'Echelle']
+ pypelines = ['MultiSlit', 'SlicerIFU', 'Echelle']
if self.PYPELINE not in pypelines:
msgs.error(f'{self.PYPELINE} is not a known pipeline procedure. Options are: '
f"{', '.join(pypelines)}")
@@ -310,7 +310,7 @@ def slit_order(self):
return self.ECH_ORDER
elif self.PYPELINE == 'MultiSlit':
return self.SLITID
- elif self.PYPELINE == 'IFU':
+ elif self.PYPELINE == 'SlicerIFU':
return self.SLITID
else:
msgs.error("Bad PYPELINE")
@@ -322,7 +322,7 @@ def slit_orderindx(self):
return self.ECH_ORDERINDX
elif self.PYPELINE == 'MultiSlit':
return self.SLITID
- elif self.PYPELINE == 'IFU':
+ elif self.PYPELINE == 'SlicerIFU':
return self.SLITID
else:
msgs.error("Bad PYPELINE")
@@ -377,7 +377,7 @@ def set_name(self):
The ``PypeIt`` name depends on the type of data being processed:
- - For multislit and IFU data, the name is
+ - For multislit and SlicerIFU data, the name is
``SPATnnnn-SLITmmmm-{DET}``, where ``nnnn`` is the nearest integer
pixel in the spatial direction (at the spectral midpoint) where
the object was extracted, ``mmmm`` is the slit identification
@@ -414,7 +414,7 @@ def set_name(self):
name += '{:04d}'.format(self.ECH_ORDER)
self.ECH_NAME = ech_name
self.NAME = name
- elif self.PYPELINE in ['MultiSlit', 'IFU']:
+ elif self.PYPELINE in ['MultiSlit', 'SlicerIFU']:
# Spat
name = naming_model['spat']
if self['SPAT_PIXPOS'] is None:
diff --git a/pypeit/specobjs.py b/pypeit/specobjs.py
index 7d72fb81d5..e45c155203 100644
--- a/pypeit/specobjs.py
+++ b/pypeit/specobjs.py
@@ -257,7 +257,7 @@ def unpack_object(self, ret_flam=False, extract_type='OPT'):
meta_spec['DET'] = np.array(detector)
meta_spec['DISPNAME'] = self.header['DISPNAME']
# Return
- if self[0].PYPELINE in ['MultiSlit', 'IFU'] and self.nobj == 1:
+ if self[0].PYPELINE in ['MultiSlit', 'SlicerIFU'] and self.nobj == 1:
meta_spec['ECH_ORDERS'] = None
return wave.reshape(nspec), flux.reshape(nspec), flux_ivar.reshape(nspec), \
flux_gpm.reshape(nspec), trace_spec.reshape(nspec), trace_spat.reshape(nspec), meta_spec, self.header
@@ -282,7 +282,7 @@ def get_std(self, multi_spec_det=None):
"""
# Is this MultiSlit or Echelle
pypeline = (self.PYPELINE)[0]
- if 'MultiSlit' in pypeline or 'IFU' in pypeline:
+ if 'MultiSlit' in pypeline or 'SlicerIFU' in pypeline:
# Have to do a loop to extract the counts for all objects
if self.OPT_COUNTS[0] is not None:
SNR = np.median(self.OPT_COUNTS * np.sqrt(self.OPT_COUNTS_IVAR), axis=1)
@@ -364,7 +364,7 @@ def append_neg(self, sobjs_neg):
sobjs_neg.OBJID = -sobjs_neg.OBJID
elif sobjs_neg[0].PYPELINE == 'MultiSlit':
sobjs_neg.OBJID = -sobjs_neg.OBJID
- elif sobjs_neg[0].PYPELINE == 'IFU':
+ elif sobjs_neg[0].PYPELINE == 'SlicerIFU':
sobjs_neg.OBJID = -sobjs_neg.OBJID
else:
msgs.error("The '{0:s}' PYPELINE is not defined".format(self[0].PYPELINE))
@@ -385,7 +385,7 @@ def purge_neg(self):
index = self.ECH_OBJID < 0
elif self[0].PYPELINE == 'MultiSlit':
index = self.OBJID < 0
- elif self[0].PYPELINE == 'IFU':
+ elif self[0].PYPELINE == 'SlicerIFU':
index = self.OBJID < 0
else:
msgs.error("The '{0:s}' PYPELINE is not defined".format(self[0].PYPELINE))
@@ -403,7 +403,7 @@ def make_neg_pos(self):
index = self.ECH_OBJID < 0
elif self[0].PYPELINE == 'MultiSlit':
index = self.OBJID < 0
- elif self[0].PYPELINE == 'IFU':
+ elif self[0].PYPELINE == 'SlicerIFU':
index = self.OBJID < 0
else:
msgs.error("Should not get here")
@@ -429,7 +429,7 @@ def slitorder_indices(self, slitorder):
indx = self.ECH_ORDER == slitorder
elif self[0].PYPELINE == 'MultiSlit':
indx = self.SLITID == slitorder
- elif self[0].PYPELINE == 'IFU':
+ elif self[0].PYPELINE == 'SlicerIFU':
indx = self.SLITID == slitorder
else:
msgs.error("The '{0:s}' PYPELINE is not defined".format(self[0].PYPELINE))
@@ -451,7 +451,7 @@ def name_indices(self, name):
indx = self.ECH_NAME == name
elif self[0].PYPELINE == 'MultiSlit':
indx = self.NAME == name
- elif self[0].PYPELINE == 'IFU':
+ elif self[0].PYPELINE == 'SlicerIFU':
indx = self.NAME == name
else:
msgs.error("The '{0:s}' PYPELINE is not defined".format(self[0].PYPELINE))
@@ -481,7 +481,7 @@ def slitorder_objid_indices(self, slitorder, objid, toler=5):
indx = (self.ECH_ORDER == slitorder) & (self.ECH_OBJID == objid)
elif self[0].PYPELINE == 'MultiSlit':
indx = (np.abs(self.SLITID - slitorder) <= toler) & (self.OBJID == objid)
- elif self[0].PYPELINE == 'IFU':
+ elif self[0].PYPELINE == 'SlicerIFU':
indx = (self.SLITID == slitorder) & (self.OBJID == objid)
else:
msgs.error("The '{0:s}' PYPELINE is not defined".format(self[0].PYPELINE))
@@ -863,7 +863,7 @@ def write_info(self, outfile, pypeline):
spat_fracpos.append(specobj.SPAT_FRACPOS)
slits.append(specobj.SLITID)
names.append(specobj.NAME)
- elif pypeline == 'IFU':
+ elif pypeline == 'SlicerIFU':
spat_fracpos.append(specobj.SPAT_FRACPOS)
slits.append(specobj.SLITID)
names.append(specobj.NAME)
@@ -909,7 +909,7 @@ def write_info(self, outfile, pypeline):
if pypeline == 'MultiSlit':
obj_tbl['slit'] = slits
obj_tbl['slit'].format = 'd'
- elif pypeline == 'IFU':
+ elif pypeline == 'SlicerIFU':
obj_tbl['slit'] = slits
obj_tbl['slit'].format = 'd'
elif pypeline == 'Echelle':
diff --git a/pypeit/spectrographs/gemini_gnirs.py b/pypeit/spectrographs/gemini_gnirs.py
index 73ad5fdea8..921ecccaea 100644
--- a/pypeit/spectrographs/gemini_gnirs.py
+++ b/pypeit/spectrographs/gemini_gnirs.py
@@ -135,21 +135,30 @@ def compound_meta(self, headarr, meta_key):
return 0.0
elif meta_key == 'pressure':
try:
- return headarr[0]['PRESSURE'] * 0.001 # Must be in astropy.units.bar
+ return headarr[0]['PRESSUR2']/100.0 # Must be in astropy.units.mbar
except KeyError:
- msgs.warn("Pressure is not in header")
- return 0.0
+ msgs.warn("Pressure is not in header - The default pressure (611 mbar) will be assumed")
+ return 611.0
elif meta_key == 'temperature':
try:
return headarr[0]['TAMBIENT'] # Must be in astropy.units.deg_C
except KeyError:
- msgs.warn("Temperature is not in header")
- return 0.0
+ msgs.warn("Temperature is not in header - The default temperature (1.5 deg C) will be assumed")
+ return 1.5 # van Kooten & Izett, arXiv:2208.11794
elif meta_key == 'humidity':
try:
+ # Humidity expressed as a percentage, not a fraction
return headarr[0]['HUMIDITY']
except KeyError:
- msgs.warn("Humidity is not in header")
+ msgs.warn("Humidity is not in header - The default relative humidity (20 %) will be assumed")
+ return 20.0 # van Kooten & Izett, arXiv:2208.11794
+ elif meta_key == 'parangle':
+ try:
+ # Humidity expressed as a percentage, not a fraction
+ msgs.warn("Parallactic angle is not available for GNIRS - DAR correction may be incorrect")
+ return headarr[0]['PARANGLE'] # Must be expressed in radians
+ except KeyError:
+ msgs.warn("Parallactic angle is not in header - The default parallactic angle (0 degrees) will be assumed")
return 0.0
else:
msgs.error("Not ready for this compound meta")
@@ -577,7 +586,7 @@ class GNIRSIFUSpectrograph(GeminiGNIRSSpectrograph):
# * Have a high threshold for detecting slit edges (par['calibrations']['slitedges']['edge_thresh'] = 100.), and have an option when inserting new traces to be the median of all other slit lengths (or a fit to the slit lengths).
# * Need to store a wavelength solution for different grating options (Note, the Holy Grail algorithm works pretty well, most of the time)
name = 'gemini_gnirs_ifu'
- pypeline = 'IFU'
+ pypeline = 'SlicerIFU'
def init_meta(self):
super().init_meta()
@@ -585,6 +594,7 @@ def init_meta(self):
self.meta['pressure'] = dict(card=None, compound=True, required=False)
self.meta['temperature'] = dict(card=None, compound=True, required=False)
self.meta['humidity'] = dict(card=None, compound=True, required=False)
+ self.meta['parangle'] = dict(card=None, compound=True, required=False)
@classmethod
def default_pypeit_par(cls):
diff --git a/pypeit/spectrographs/gtc_osiris.py b/pypeit/spectrographs/gtc_osiris.py
index 9c8777746d..2c788caab9 100644
--- a/pypeit/spectrographs/gtc_osiris.py
+++ b/pypeit/spectrographs/gtc_osiris.py
@@ -173,22 +173,31 @@ def compound_meta(self, headarr, meta_key):
return binning
elif meta_key == 'pressure':
try:
- return headarr[0]['PRESSURE'] * 0.001 # Must be in astropy.units.bar
+ return headarr[0]['PRESSURE'] # Must be in astropy.units.mbar
except KeyError:
msgs.warn("Pressure is not in header")
- return 0.0
+ msgs.info("The default pressure will be assumed: 611 mbar")
+ return 611.0
elif meta_key == 'temperature':
try:
return headarr[0]['TAMBIENT'] # Must be in astropy.units.deg_C
except KeyError:
msgs.warn("Temperature is not in header")
- return 0.0
+ msgs.info("The default temperature will be assumed: 1.5 deg C")
+ return 1.5
elif meta_key == 'humidity':
try:
return headarr[0]['HUMIDITY']
except KeyError:
msgs.warn("Humidity is not in header")
- return 0.0
+ msgs.info("The default relative humidity will be assumed: 20 %")
+ return 20.0
+ elif meta_key == 'parangle':
+ try:
+ msgs.work("Parallactic angle is not available for MAAT - DAR correction may be incorrect")
+ return headarr[0]['PARANG'] # Must be expressed in radians
+ except KeyError:
+ msgs.error("Parallactic angle is not in header")
elif meta_key == 'obstime':
return Time(headarr[0]['DATE-END'])
elif meta_key == 'gain':
@@ -430,7 +439,7 @@ def bpm(self, filename, det, shape=None, msbias=None):
class GTCMAATSpectrograph(GTCOSIRISPlusSpectrograph):
- pypeline = 'IFU'
+ pypeline = 'SlicerIFU'
name = 'gtc_maat'
def init_meta(self):
@@ -439,6 +448,7 @@ def init_meta(self):
self.meta['pressure'] = dict(card=None, compound=True, required=False)
self.meta['temperature'] = dict(card=None, compound=True, required=False)
self.meta['humidity'] = dict(card=None, compound=True, required=False)
+ self.meta['parangle'] = dict(card=None, compound=True, required=False)
@classmethod
def default_pypeit_par(cls):
diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py
index ecbe246898..f7163cae78 100644
--- a/pypeit/spectrographs/keck_kcwi.py
+++ b/pypeit/spectrographs/keck_kcwi.py
@@ -40,7 +40,7 @@ class KeckKCWIKCRMSpectrograph(spectrograph.Spectrograph):
"""
ndet = 1
telescope = telescopes.KeckTelescopePar()
- pypeline = 'IFU'
+ pypeline = 'SlicerIFU'
supported = True
def __init__(self):
@@ -73,6 +73,10 @@ def init_meta(self):
self.meta['mjd'] = dict(ext=0, card='MJD')
self.meta['exptime'] = dict(card=None, compound=True)
self.meta['airmass'] = dict(ext=0, card='AIRMASS')
+ self.meta['posang'] = dict(card=None, compound=True)
+ self.meta['ra_off'] = dict(ext=0, card='RAOFF')
+ self.meta['dec_off'] = dict(ext=0, card='DECOFF')
+
# Extras for config and frametyping
self.meta['hatch'] = dict(ext=0, card='HATPOS')
@@ -86,6 +90,7 @@ def init_meta(self):
self.meta['pressure'] = dict(card=None, compound=True, required=False)
self.meta['temperature'] = dict(card=None, compound=True, required=False)
self.meta['humidity'] = dict(card=None, compound=True, required=False)
+ self.meta['parangle'] = dict(card=None, compound=True, required=False)
self.meta['instrument'] = dict(ext=0, card='INSTRUME')
# Lamps
@@ -102,6 +107,7 @@ def init_meta(self):
self.meta['lampstat{:02d}'.format(len(lamp_names) + 1)] = dict(ext=0, card='FLSPECTR')
self.meta['lampshst{:02d}'.format(len(lamp_names) + 1)] = dict(ext=0, card=None, default=1)
+
def config_specific_par(self, scifile, inp_par=None):
"""
Modify the PypeIt parameters to hard-wired values used for
@@ -132,6 +138,8 @@ def config_specific_par(self, scifile, inp_par=None):
par['calibrations']['wavelengths']['reid_arxiv'] = 'keck_kcwi_BM.fits'
elif self.get_meta_value(headarr, 'dispname') == 'BL':
par['calibrations']['wavelengths']['reid_arxiv'] = 'keck_kcwi_BL.fits'
+ elif self.get_meta_value(headarr, 'dispname') == 'RL':
+ par['calibrations']['wavelengths']['reid_arxiv'] = 'keck_kcrm_RL.fits'
elif self.get_meta_value(headarr, 'dispname') == 'RM1':
par['calibrations']['wavelengths']['reid_arxiv'] = 'keck_kcrm_RM1.fits'
elif self.get_meta_value(headarr, 'dispname') == 'RM2':
@@ -163,7 +171,7 @@ def configuration_keys(self):
and used to constuct the :class:`~pypeit.metadata.PypeItMetaData`
object.
"""
- return ['dispname', 'decker', 'binning', 'dispangle']
+ return ['dispname', 'decker', 'binning', 'cenwave']
def compound_meta(self, headarr, meta_key):
"""
@@ -211,11 +219,11 @@ def compound_meta(self, headarr, meta_key):
return headarr[0][hdrstr]
elif meta_key == 'pressure':
try:
- return headarr[0]['WXPRESS'] * 0.001 # Must be in astropy.units.bar
+ return headarr[0]['WXPRESS'] # Must be in astropy.units.mbar
except KeyError:
msgs.warn("Pressure is not in header")
- msgs.info("The default pressure will be assumed: 0.611 bar")
- return 0.611
+ msgs.info("The default pressure will be assumed: 611 mbar")
+ return 611.0
elif meta_key == 'temperature':
try:
return headarr[0]['WXOUTTMP'] # Must be in astropy.units.deg_C
@@ -225,13 +233,34 @@ def compound_meta(self, headarr, meta_key):
return 1.5 # van Kooten & Izett, arXiv:2208.11794
elif meta_key == 'humidity':
try:
- return headarr[0]['WXOUTHUM'] / 100.0
+ # Humidity expressed as a percentage, not a fraction
+ return headarr[0]['WXOUTHUM']
except KeyError:
msgs.warn("Humidity is not in header")
msgs.info("The default relative humidity will be assumed: 20 %")
- return 0.2 # van Kooten & Izett, arXiv:2208.11794
+ return 20.0 # van Kooten & Izett, arXiv:2208.11794
+ elif meta_key == 'parangle':
+ try:
+ # Parallactic angle expressed in radians
+ return headarr[0]['PARANG'] * np.pi / 180.0
+ except KeyError:
+ msgs.error("Parallactic angle is not in header")
elif meta_key == 'obstime':
return Time(headarr[0]['DATE-END'])
+ elif meta_key == 'posang':
+ hdr = headarr[0]
+ # Get rotator position
+ if 'ROTPOSN' in hdr:
+ rpos = hdr['ROTPOSN']
+ else:
+ rpos = 0.
+ if 'ROTREFAN' in hdr:
+ rref = hdr['ROTREFAN']
+ else:
+ rref = 0.
+ # Get the offset and PA
+ skypa = rpos + rref # IFU position angle (degrees)
+ return skypa
else:
msgs.error("Not ready for this compound meta")
@@ -271,7 +300,7 @@ def default_pypeit_par(cls):
par['reduce']['cube']['combine'] = False # Make separate spec3d files from the input spec2d files
# Sky subtraction parameters
- par['reduce']['skysub']['no_poly'] = True
+ par['reduce']['skysub']['no_poly'] = False
par['reduce']['skysub']['bspline_spacing'] = 0.6
par['reduce']['skysub']['joint_fit'] = False
@@ -294,7 +323,7 @@ def pypeit_file_keys(self):
:class:`~pypeit.metadata.PypeItMetaData` instance to print to the
:ref:`pypeit_file`.
"""
- return super().pypeit_file_keys() + ['idname', 'calpos']
+ return super().pypeit_file_keys() + ['posang', 'ra_off', 'dec_off', 'idname', 'calpos']
def check_frame_type(self, ftype, fitstbl, exprng=None):
"""
@@ -563,36 +592,17 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None):
slitlength = int(np.round(np.median(slits.get_slitlengths(initial=True, median=True))))
# Get RA/DEC
- raval = self.compound_meta([hdr], 'ra')
- decval = self.compound_meta([hdr], 'dec')
+ ra = self.compound_meta([hdr], 'ra')
+ dec = self.compound_meta([hdr], 'dec')
- # Create a coordinate
- coord = SkyCoord(raval, decval, unit=(units.deg, units.deg))
-
- # Get rotator position
- if 'ROTPOSN' in hdr:
- rpos = hdr['ROTPOSN']
- else:
- rpos = 0.
- if 'ROTREFAN' in hdr:
- rref = hdr['ROTREFAN']
- else:
- rref = 0.
- # Get the offset and PA
+ skypa = self.compound_meta([hdr], 'posang')
rotoff = 0.0 # IFU-SKYPA offset (degrees)
- skypa = rpos + rref # IFU position angle (degrees)
crota = np.radians(-(skypa + rotoff))
# Calculate the fits coordinates
cdelt1 = -slscl
cdelt2 = pxscl
- if coord is None:
- ra = 0.
- dec = 0.
- crota = 1
- else:
- ra = coord.ra.degree
- dec = coord.dec.degree
+
# Calculate the CD Matrix
cd11 = cdelt1 * np.cos(crota) # RA degrees per column
cd12 = abs(cdelt2) * np.sign(cdelt1) * np.sin(crota) # RA degrees per row
@@ -738,9 +748,9 @@ def bpm(self, filename, det, shape=None, msbias=None):
[1838, 1838, 933, 2055]]
elif ampmode == 'L2U2':
if binning == '1,1':
- bc = [[3458, 3462, 0, 613]]
+ bc = [[649, 651, 0, 613]] # This accounts for the spatflip - not sure if the 649-651 is too broad though...
elif binning == '2,2':
- bc = [[1730, 1730, 0, 307]]
+ bc = [[325, 325, 0, 307]] # This accounts for the spatflip
elif ampmode == "L2U2L1U1":
pass
# Currently unchecked...
@@ -844,6 +854,8 @@ def init_meta(self):
super().init_meta()
self.meta['dispname'] = dict(ext=0, card='BGRATNAM')
self.meta['dispangle'] = dict(ext=0, card='BGRANGLE', rtol=0.01)
+ self.meta['cenwave'] = dict(ext=0, card='BCWAVE', rtol=0.01)
+
def raw_header_cards(self):
"""
@@ -1112,7 +1124,7 @@ class KeckKCRMSpectrograph(KeckKCWIKCRMSpectrograph):
camera = 'KCRM'
url = 'https://www2.keck.hawaii.edu/inst/kcwi/' # TODO :: Need to update this website
header_name = 'KCRM'
- comment = 'Supported setups: RM1, RM2, RH3; see :doc:`keck_kcwi`'
+ comment = 'Supported setups: RL, RM1, RM2, RH3; see :doc:`keck_kcwi`'
def get_detector_par(self, det, hdu=None):
"""
@@ -1162,7 +1174,7 @@ def get_detector_par(self, det, hdu=None):
dataext = 0,
specaxis = 0,
specflip = specflip,
- spatflip = False,
+ spatflip = True, # Due to the extra mirror, the slices are flipped relative to KCWI
platescale = 0.145728, # arcsec/pixel TODO :: Need to double check this
darkcurr = None, # e-/pixel/hour TODO :: Need to check this.
mincounts = -1e10,
@@ -1207,6 +1219,7 @@ def init_meta(self):
super().init_meta()
self.meta['dispname'] = dict(ext=0, card='RGRATNAM')
self.meta['dispangle'] = dict(ext=0, card='RGRANGLE', rtol=0.01)
+ self.meta['cenwave'] = dict(ext=0, card='RCWAVE', rtol=0.01)
def raw_header_cards(self):
"""
@@ -1278,6 +1291,10 @@ def default_pypeit_par(cls):
par['calibrations']['flatfield']['slit_illum_smooth_npix'] = 5 # Sufficiently small value so less structure in relative weights
par['calibrations']['flatfield']['fit_2d_det_response'] = True # Include the 2D detector response in the pixelflat.
+ # Sky subtraction parameters
+ par['reduce']['skysub']['bspline_spacing'] = 0.4
+ par['reduce']['skysub']['joint_fit'] = False
+
return par
@staticmethod
diff --git a/setup.cfg b/setup.cfg
index 7b7b5c476c..5f0e25ee7e 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -39,6 +39,8 @@ install_requires =
scipy>=1.7
matplotlib>=3.7
PyYAML>=5.1
+ PyERFA>=2.0.0
+ fast-histogram>=0.11
configobj>=5.0.6
scikit-learn>=1.0
IPython>=7.10.0