From 2f3a1860c0f3ef12c77e4686b8122e6cb436b5ea Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Fri, 14 Jul 2023 11:54:52 -0700 Subject: [PATCH 01/12] start work on params_by_name --- pyuvdata/parameter.py | 20 ++-- pyuvdata/tests/test_utils.py | 47 +++++--- pyuvdata/utils.py | 124 +++++++++++-------- pyuvdata/uvbase.py | 10 +- pyuvdata/uvbeam/uvbeam.py | 23 +++- pyuvdata/uvcal/uvcal.py | 77 +++++++----- pyuvdata/uvdata/uvdata.py | 223 ++++++++++++++++++++--------------- pyuvdata/uvflag/uvflag.py | 45 +++++-- 8 files changed, 348 insertions(+), 221 deletions(-) diff --git a/pyuvdata/parameter.py b/pyuvdata/parameter.py index 3c09521dd3..174f8993d4 100644 --- a/pyuvdata/parameter.py +++ b/pyuvdata/parameter.py @@ -253,6 +253,7 @@ class UVParameter(object): def __init__( self, name, + *, required=True, value=None, spoof_val=None, @@ -293,7 +294,7 @@ def __init__( self.ignore_eq_none = ignore_eq_none and not required - def __eq__(self, other, silent=False): + def __eq__(self, other, *, silent=False): """ Test if classes match and values are within tolerances. @@ -562,7 +563,7 @@ def __eq__(self, other, silent=False): return True - def __ne__(self, other, silent=True): + def __ne__(self, other, *, silent=True): """ Test if classes do not match or values are not within tolerances. @@ -900,6 +901,7 @@ class LocationParameter(UVParameter): def __init__( self, name, + *, required=True, value=None, spoof_val=None, @@ -960,7 +962,10 @@ def set_lat_lon_alt(self, lat_lon_alt): self.value = None else: self.value = utils.XYZ_from_LatLonAlt( - lat_lon_alt[0], lat_lon_alt[1], lat_lon_alt[2], frame=self.frame + latitude=lat_lon_alt[0], + longitude=lat_lon_alt[1], + altitude=lat_lon_alt[2], + frame=self.frame, ) def lat_lon_alt_degrees(self): @@ -987,9 +992,9 @@ def set_lat_lon_alt_degrees(self, lat_lon_alt_degree): else: latitude, longitude, altitude = lat_lon_alt_degree self.value = utils.XYZ_from_LatLonAlt( - latitude * np.pi / 180.0, - longitude * np.pi / 180.0, - altitude, + latitude=latitude * np.pi / 180.0, + longitude=longitude * np.pi / 180.0, + altitude=altitude, frame=self.frame, ) @@ -1095,6 +1100,7 @@ class SkyCoordParameter(UVParameter): def __init__( self, name, + *, required=True, value=None, spoof_val=None, @@ -1116,7 +1122,7 @@ def __init__( tols=(0, radian_tol), ) - def __eq__(self, other, silent=False): + def __eq__(self, other, *, silent=False): if not issubclass(self.value.__class__, SkyCoord) or not issubclass( other.value.__class__, SkyCoord ): diff --git a/pyuvdata/tests/test_utils.py b/pyuvdata/tests/test_utils.py index bbcb64c25d..a7d162e6d5 100644 --- a/pyuvdata/tests/test_utils.py +++ b/pyuvdata/tests/test_utils.py @@ -1791,6 +1791,7 @@ def test_calc_app_unprojected(astrometry_args, telescope_frame): time_array=astrometry_args["time_array"], lst_array=lst_array, ) + check_coord = SkyCoord(check_ra, check_dec, unit="rad") assert np.all(astrometry_args[coord_name].separation(check_coord).uarcsec < 1.0) @@ -2270,7 +2271,7 @@ def test_lst_for_time_moon(astrometry_args): assert np.isclose(lst_array[ii], src.transform_to("icrs").ra.rad, atol=1e-4) -def test_old_phasing_funcs(): +def test_phasing_funcs(): # these tests are based on a notebook where I tested against the mwa_tools # phasing code ra_hrs = 12.1 @@ -2317,9 +2318,14 @@ def test_old_phasing_funcs(): (gcrs_from_itrs_coord.cartesian - gcrs_array_center.cartesian).get_xyz().T ) - gcrs_uvw = uvutils.old_uvw_calc( - gcrs_coord.ra.rad, gcrs_coord.dec.rad, gcrs_rel.value - ) + with uvtest.check_warnings( + DeprecationWarning, + match="This function supports the old phasing method and will be removed along " + "with the old phasing code in version 2.4", + ): + gcrs_uvw = uvutils.phase_uvw( + gcrs_coord.ra.rad, gcrs_coord.dec.rad, gcrs_rel.value + ) mwa_tools_calcuvw_u = -97.122828 mwa_tools_calcuvw_v = 50.388281 @@ -2329,11 +2335,15 @@ def test_old_phasing_funcs(): assert np.allclose(gcrs_uvw[0, 1], mwa_tools_calcuvw_v, atol=1e-3) assert np.allclose(gcrs_uvw[0, 2], mwa_tools_calcuvw_w, atol=1e-3) - # also test old unphasing - # this is all that is actually used in our code, it's used in the fix_phase method - temp2 = uvutils.undo_old_uvw_calc( - gcrs_coord.ra.rad, gcrs_coord.dec.rad, np.squeeze(gcrs_uvw) - ) + # also test unphasing + with uvtest.check_warnings( + DeprecationWarning, + match="This function supports the old phasing method and will be removed along " + "with the old phasing code in version 2.4", + ): + temp2 = uvutils.unphase_uvw( + gcrs_coord.ra.rad, gcrs_coord.dec.rad, np.squeeze(gcrs_uvw) + ) assert np.allclose(gcrs_rel.value, temp2) @@ -2617,14 +2627,17 @@ def test_redundancy_finder(): # Check with conjugated baseline redundancies returned # Ensure at least one baseline has u==0 and v!=0 (for coverage of this case) bl_positions[16, 0] = 0 - ( - baseline_groups, - vec_bin_centers, - lens, - conjugates, - ) = uvutils.get_baseline_redundancies( - uvd.baseline_array, bl_positions, tol=tol, include_conjugates=True - ) + with uvtest.check_warnings( + DeprecationWarning, "The with_conjugates keyword is deprecated" + ): + ( + baseline_groups, + vec_bin_centers, + lens, + conjugates, + ) = uvutils.get_baseline_redundancies( + uvd.baseline_array, bl_positions, tol=tol, with_conjugates=True + ) # restore baseline (16,0) and repeat to get correct groups bl_positions = bl_pos_backup diff --git a/pyuvdata/utils.py b/pyuvdata/utils.py index 63ad22c0cb..204362eddd 100644 --- a/pyuvdata/utils.py +++ b/pyuvdata/utils.py @@ -176,7 +176,7 @@ def _fits_indexhdus(hdulist): return tablenames -def _get_fits_extra_keywords(header, keywords_to_skip=None): +def _get_fits_extra_keywords(header, *, keywords_to_skip=None): """ Get any extra keywords and return as dict. @@ -298,7 +298,7 @@ def _combine_history_addition(history1, history2): return add_hist -def _test_array_constant(array, tols=None): +def _test_array_constant(array, *, tols=None): """ Check if an array contains constant values to some tolerance. @@ -344,7 +344,7 @@ def _test_array_constant(array, tols=None): ) -def _test_array_constant_spacing(array, tols=None): +def _test_array_constant_spacing(array, *, tols=None): """ Check if an array is constantly spaced to some tolerance. @@ -423,6 +423,7 @@ def _check_flex_spw_contiguous(spw_array, flex_spw_id_array): def _check_freq_spacing( + *, freq_array, freq_tols, channel_width, @@ -563,6 +564,7 @@ def _check_freq_spacing( def _sort_freq_helper( + *, Nfreqs, freq_array, Nspws, @@ -789,7 +791,7 @@ def baseline_to_antnums(baseline, Nants_telescope): return ant1.item(0), ant2.item(0) -def antnums_to_baseline(ant1, ant2, Nants_telescope, attempt256=False): +def antnums_to_baseline(ant1, ant2, Nants_telescope, *, attempt256=False): """ Get the baseline number corresponding to two given antenna numbers. @@ -884,7 +886,7 @@ def wrapper(pol, x_orientation=None): @np_cache -def polstr2num(pol: str | IterableType[str], x_orientation: str | None = None): +def polstr2num(pol: str | IterableType[str], *, x_orientation: str | None = None): """ Convert polarization str to number according to AIPS Memo 117. @@ -942,7 +944,7 @@ def polstr2num(pol: str | IterableType[str], x_orientation: str | None = None): @np_cache -def polnum2str(num, x_orientation=None): +def polnum2str(num, *, x_orientation=None): """ Convert polarization number to str according to AIPS Memo 117. @@ -996,7 +998,7 @@ def polnum2str(num, x_orientation=None): @np_cache -def jstr2num(jstr, x_orientation=None): +def jstr2num(jstr, *, x_orientation=None): """ Convert jones polarization str to number according to calfits memo. @@ -1049,7 +1051,7 @@ def jstr2num(jstr, x_orientation=None): @np_cache -def jnum2str(jnum, x_orientation=None): +def jnum2str(jnum, *, x_orientation=None): """ Convert jones polarization number to str according to calfits memo. @@ -1101,7 +1103,7 @@ def jnum2str(jnum, x_orientation=None): @np_cache -def parse_polstr(polstr, x_orientation=None): +def parse_polstr(polstr, *, x_orientation=None): """ Parse a polarization string and return pyuvdata standard polarization string. @@ -1139,7 +1141,7 @@ def parse_polstr(polstr, x_orientation=None): @np_cache -def parse_jpolstr(jpolstr, x_orientation=None): +def parse_jpolstr(jpolstr, *, x_orientation=None): """ Parse a Jones polarization string and return pyuvdata standard jones string. @@ -1240,7 +1242,7 @@ def reorder_conj_pols(pols): return conj_order -def LatLonAlt_from_XYZ(xyz, frame="ITRS", check_acceptability=True): +def LatLonAlt_from_XYZ(xyz, *, frame="ITRS", check_acceptability=True): """ Calculate lat/lon/alt from ECEF x,y,z. @@ -1429,7 +1431,7 @@ def ECEF_from_rotECEF(xyz, longitude): return rot_matrix.dot(xyz.T).T -def ENU_from_ECEF(xyz, latitude, longitude, altitude, frame="ITRS"): +def ENU_from_ECEF(xyz, *, latitude, longitude, altitude, frame="ITRS"): """ Calculate local ENU (east, north, up) coordinates from ECEF coordinates. @@ -1514,7 +1516,7 @@ def ENU_from_ECEF(xyz, latitude, longitude, altitude, frame="ITRS"): return enu -def ECEF_from_ENU(enu, latitude, longitude, altitude, frame="ITRS"): +def ECEF_from_ENU(enu, *, latitude, longitude, altitude, frame="ITRS"): """ Calculate ECEF coordinates from local ENU (east, north, up) coordinates. @@ -1652,7 +1654,7 @@ def undo_old_uvw_calc(ra, dec, uvw): ).T -def polar2_to_cart3(lon_array, lat_array): +def polar2_to_cart3(*, lon_array, lat_array): """ Convert 2D polar coordinates into 3D cartesian coordinates. @@ -2010,6 +2012,7 @@ def _rotate_two_axis(xyz_array, rot_amount1, rot_amount2, rot_axis1, rot_axis2): def calc_uvw( + *, app_ra=None, app_dec=None, frame_pa=None, @@ -2297,6 +2300,7 @@ def calc_uvw( def transform_sidereal_coords( + *, lon, lat, in_coord_frame, @@ -2436,6 +2440,7 @@ def transform_sidereal_coords( def transform_icrs_to_app( + *, time_array, ra, dec, @@ -2817,7 +2822,7 @@ def transform_icrs_to_app( tt_time, (tt_time - ut1_time) * 86400.0, cat_entry, site_loc, accuracy=0 ) xyz_array = polar2_to_cart3( - temp_ra * (np.pi / 12.0), temp_dec * (np.pi / 180.0) + lon_array=temp_ra * (np.pi / 12.0), lat_array=temp_dec * (np.pi / 180.0) ) xyz_array = novas.wobble(tt_time, pm_x, pm_y, xyz_array, 1) @@ -3191,11 +3196,11 @@ def calc_frame_pos_angle( # Run the set of offset coordinates through the "reverse" transform. The two offset # positions are concat'd together to help reduce overheads ref_ra, ref_dec = calc_sidereal_coords( - np.tile(unique_time, 2), - np.concatenate((dn_ra, up_ra)), - np.concatenate((dn_dec, up_dec)), - telescope_loc, - ref_frame, + time_array=np.tile(unique_time, 2), + app_ra=np.concatenate((dn_ra, up_ra)), + app_dec=np.concatenate((dn_dec, up_dec)), + telescope_loc=telescope_loc, + coord_frame=ref_frame, telescope_frame=telescope_frame, coord_epoch=ref_epoch, ) @@ -3677,9 +3682,9 @@ def calc_app_coords( if lst_array is None: unique_lst = get_lst_for_time( unique_time_array, - site_loc.lat.deg, - site_loc.lon.deg, - site_loc.height.to_value("m"), + latitude=site_loc.lat.deg, + longitude=site_loc.lon.deg, + altitude=site_loc.height.to_value("m"), frame=frame, ) else: @@ -3689,10 +3694,10 @@ def calc_app_coords( # If the coordinates are not in the ICRS frame, go ahead and transform them now if coord_frame != "icrs": icrs_ra, icrs_dec = transform_sidereal_coords( - lon_coord, - lat_coord, - coord_frame, - "icrs", + lon=lon_coord, + lat=lat_coord, + in_coord_frame=coord_frame, + out_coord_frame="icrs", in_coord_epoch=coord_epoch, time_array=unique_time_array, ) @@ -3700,10 +3705,10 @@ def calc_app_coords( icrs_ra = lon_coord icrs_dec = lat_coord unique_app_ra, unique_app_dec = transform_icrs_to_app( - unique_time_array, - icrs_ra, - icrs_dec, - site_loc, + time_array=unique_time_array, + ra=icrs_ra, + dec=icrs_dec, + telescope_loc=site_loc, pm_ra=pm_ra, pm_dec=pm_dec, vrad=vrad, @@ -3726,10 +3731,10 @@ def calc_app_coords( ) if coord_frame != "icrs": icrs_ra, icrs_dec = transform_sidereal_coords( - interp_ra, - interp_dec, - coord_frame, - "icrs", + lon=interp_ra, + lat=interp_dec, + in_coord_frame=coord_frame, + out_coord_frame="icrs", in_coord_epoch=coord_epoch, time_array=unique_time_array, ) @@ -3739,7 +3744,12 @@ def calc_app_coords( # TODO: Vel and distance handling to be integrated here, once they are are # needed for velocity frame tracking unique_app_ra, unique_app_dec = transform_icrs_to_app( - unique_time_array, icrs_ra, icrs_dec, site_loc, pm_ra=pm_ra, pm_dec=pm_dec + time_array=unique_time_array, + ra=icrs_ra, + dec=icrs_dec, + telescope_loc=site_loc, + pm_ra=pm_ra, + pm_dec=pm_dec, ) elif coord_type == "unprojected": # This is the easiest one - this is just supposed to be ENU, so set the @@ -3763,6 +3773,7 @@ def calc_app_coords( def calc_sidereal_coords( + *, time_array, app_ra, app_dec, @@ -3835,10 +3846,10 @@ def calc_sidereal_coords( ref_ra, ref_dec = (icrs_ra, icrs_dec) else: ref_ra, ref_dec = transform_sidereal_coords( - icrs_ra, - icrs_dec, - "icrs", - coord_frame, + lon=icrs_ra, + lat=icrs_dec, + in_coord_frame="icrs", + out_coord_frame=coord_frame, out_coord_epoch=epoch, time_array=time_array, ) @@ -3847,6 +3858,7 @@ def calc_sidereal_coords( def get_lst_for_time( + *, jd_array=None, latitude=None, longitude=None, @@ -4369,7 +4381,7 @@ def find_clusters(location_ids, location_vectors, tol, strict=False): def get_baseline_redundancies( - baselines, baseline_vecs, tol=1.0, include_conjugates=False + baselines, baseline_vecs, *, tol=1.0, include_conjugates=False ): """ Find redundant baseline groups. @@ -4442,7 +4454,7 @@ def get_baseline_redundancies( def get_antenna_redundancies( - antenna_numbers, antenna_positions, tol=1.0, include_autos=False + antenna_numbers, antenna_positions, *, tol=1.0, include_autos=False ): """ Find redundant baseline groups based on antenna positions. @@ -4514,7 +4526,7 @@ def get_antenna_redundancies( def mean_collapse( - arr, weights=None, axis=None, return_weights=False, return_weights_square=False + arr, *, weights=None, axis=None, return_weights=False, return_weights_square=False ): """ Collapse by averaging data. @@ -4564,7 +4576,7 @@ def mean_collapse( def absmean_collapse( - arr, weights=None, axis=None, return_weights=False, return_weights_square=False + arr, *, weights=None, axis=None, return_weights=False, return_weights_square=False ): """ Collapse by averaging absolute value of data. @@ -4594,7 +4606,7 @@ def absmean_collapse( def quadmean_collapse( - arr, weights=None, axis=None, return_weights=False, return_weights_square=False + arr, *, weights=None, axis=None, return_weights=False, return_weights_square=False ): """ Collapse by averaging in quadrature. @@ -4630,7 +4642,7 @@ def quadmean_collapse( def or_collapse( - arr, weights=None, axis=None, return_weights=False, return_weights_square=False + arr, *, weights=None, axis=None, return_weights=False, return_weights_square=False ): """ Collapse using OR operation. @@ -4662,7 +4674,7 @@ def or_collapse( def and_collapse( - arr, weights=None, axis=None, return_weights=False, return_weights_square=False + arr, *, weights=None, axis=None, return_weights=False, return_weights_square=False ): """ Collapse using AND operation. @@ -4694,7 +4706,13 @@ def and_collapse( def collapse( - arr, alg, weights=None, axis=None, return_weights=False, return_weights_square=False + arr, + alg, + *, + weights=None, + axis=None, + return_weights=False, + return_weights_square=False, ): """ Parent function to collapse an array with a given algorithm. @@ -4745,6 +4763,7 @@ def collapse( def uvcalibrate( uvdata, uvcal, + *, inplace=True, prop_flags=True, Dterm_cal=False, @@ -5168,7 +5187,7 @@ def uvcalibrate( def apply_uvflag( - uvd, uvf, inplace=True, unflag_first=False, flag_missing=True, force_pol=True + uvd, uvf, *, inplace=True, unflag_first=False, flag_missing=True, force_pol=True ): """ Apply flags from a UVFlag to a UVData instantiation. @@ -5286,7 +5305,7 @@ def apply_uvflag( return uvd -def parse_ants(uv, ant_str, print_toggle=False, x_orientation=None): +def parse_ants(uv, ant_str, *, print_toggle=False, x_orientation=None): """ Get antpair and polarization from parsing an aipy-style ant string. @@ -5743,7 +5762,7 @@ def _get_slice_len(s, axlen): return ((stop - 1 - start) // step) + 1 -def _index_dset(dset, indices, input_array=None): +def _index_dset(dset, indices, *, input_array=None): """ Index a UVH5 data, flags or nsamples h5py dataset. @@ -5849,7 +5868,7 @@ def _index_dset(dset, indices, input_array=None): def determine_blt_order( - time_array, ant_1_array, ant_2_array, baseline_array, Nbls, Ntimes + *, time_array, ant_1_array, ant_2_array, baseline_array, Nbls, Ntimes ) -> tuple[str] | None: """Get the blt order from analysing metadata.""" times = time_array @@ -5990,6 +6009,7 @@ def determine_blt_order( def determine_rectangularity( + *, time_array: np.ndarray, baseline_array: np.ndarray, nbls: int, diff --git a/pyuvdata/uvbase.py b/pyuvdata/uvbase.py index 82da4768aa..8df3a4599a 100644 --- a/pyuvdata/uvbase.py +++ b/pyuvdata/uvbase.py @@ -392,7 +392,7 @@ def extra(self): yield a def __eq__( - self, other, check_extra=True, allowed_failures=("filename",), silent=False + self, other, *, check_extra=True, allowed_failures=("filename",), silent=False ): """ Test if classes match and parameters are equal. @@ -503,7 +503,7 @@ def __eq__( return False def __ne__( - self, other, check_extra=True, allowed_failures=("filename",), silent=True + self, other, *, check_extra=True, allowed_failures=("filename",), silent=True ): """ Test if classes match and parameters are not equal. @@ -538,7 +538,11 @@ def __ne__( ) def check( - self, check_extra=True, run_check_acceptability=True, ignore_requirements=False + self, + *, + check_extra=True, + run_check_acceptability=True, + ignore_requirements=False, ): """ Check that required parameters exist and have the correct shapes. diff --git a/pyuvdata/uvbeam/uvbeam.py b/pyuvdata/uvbeam/uvbeam.py index f7ba98835d..aea8caad34 100644 --- a/pyuvdata/uvbeam/uvbeam.py +++ b/pyuvdata/uvbeam/uvbeam.py @@ -656,7 +656,7 @@ def _set_future_array_shapes(self): ) self.future_array_shapes = True - def use_future_array_shapes(self, unset_spw_params=True): + def use_future_array_shapes(self, *, unset_spw_params=True): """ Change the array shapes of this object to match the planned future shapes. @@ -690,7 +690,7 @@ def use_future_array_shapes(self, unset_spw_params=True): self.spw_array = None self.Nspws = None - def use_current_array_shapes(self, set_spw_params=True): + def use_current_array_shapes(self, *, set_spw_params=True): """ Change the array shapes of this object to match the current future shapes. @@ -917,7 +917,7 @@ def _fix_auto_power(self): self.data_array[:, :, pol_screen] ) - def _check_auto_power(self, fix_auto_power=False, warn_tols=(0, 0)): + def _check_auto_power(self, *, fix_auto_power=False, warn_tols=(0, 0)): """ Check for complex auto polarization power beams. @@ -976,6 +976,7 @@ def _check_auto_power(self, fix_auto_power=False, warn_tols=(0, 0)): def check( self, + *, check_extra=True, run_check_acceptability=True, check_auto_power=False, @@ -1072,6 +1073,7 @@ def peak_normalize(self): def efield_to_power( self, + *, calc_cross_pols=True, keep_basis_vector=False, run_check=True, @@ -1341,6 +1343,7 @@ def _construct_mueller(self, jones, pol_index1, pol_index2): def efield_to_pstokes( self, + *, inplace=True, run_check=True, check_extra=True, @@ -1464,7 +1467,7 @@ def efield_to_pstokes( if not inplace: return beam_object - def _interp_freq(self, freq_array, kind="linear", tol=1.0): + def _interp_freq(self, freq_array, *, kind="linear", tol=1.0): """ Interpolate function along frequency axis. @@ -1589,6 +1592,7 @@ def get_lambda(real_lut, imag_lut=None): def _interp_az_za_rect_spline( self, + *, az_array, za_array, freq_array, @@ -1881,6 +1885,7 @@ def get_lambda(real_lut, imag_lut=None): def _interp_healpix_bilinear( self, + *, az_array, za_array, freq_array, @@ -2090,6 +2095,7 @@ def _interp_healpix_bilinear( def interp( self, + *, az_array=None, za_array=None, interpolation_function=None, @@ -2444,6 +2450,7 @@ def interp( def to_healpix( self, + *, nside=None, interpolation_function=None, run_check=True, @@ -2677,6 +2684,7 @@ def get_beam_sq_area(self, pol="pI"): def __add__( self, other, + *, verbose_history=False, inplace=False, run_check=True, @@ -3343,6 +3351,7 @@ def __iadd__(self, other): def select( self, + *, axis1_inds=None, axis2_inds=None, pixels=None, @@ -3775,6 +3784,7 @@ def _convert_to_filetype(self, filetype): def read_beamfits( self, filename, + *, use_future_array_shapes=False, run_check=True, check_extra=True, @@ -3915,6 +3925,7 @@ def _read_cst_beam_yaml(self, filename): def read_cst_beam( self, filename, + *, beam_type="power", use_future_array_shapes=False, feed_pol=None, @@ -4317,6 +4328,7 @@ def read_cst_beam( def read_mwa_beam( self, h5filepath, + *, use_future_array_shapes=False, delays=None, amplitudes=None, @@ -4395,6 +4407,7 @@ def read_mwa_beam( def read( self, filename, + *, file_type=None, skip_bad_files=False, use_future_array_shapes=False, @@ -4809,6 +4822,7 @@ def read( def from_file( cls, filename, + *, file_type=None, skip_bad_files=False, use_future_array_shapes=False, @@ -5053,6 +5067,7 @@ def from_file( def write_beamfits( self, filename, + *, run_check=True, check_extra=True, run_check_acceptability=True, diff --git a/pyuvdata/uvcal/uvcal.py b/pyuvdata/uvcal/uvcal.py index 9c9c054158..80457590cd 100644 --- a/pyuvdata/uvcal/uvcal.py +++ b/pyuvdata/uvcal/uvcal.py @@ -162,6 +162,7 @@ def __init__(self): description=desc, acceptable_range=(6.35e6, 6.39e6), tols=1e-3, + frame="itrs", required=True, ) @@ -1105,7 +1106,7 @@ def use_current_array_shapes(self): else: self.freq_range = [np.min(self.freq_array), np.max(self.freq_array)] - def set_telescope_params(self, overwrite=False): + def set_telescope_params(self, *, overwrite=False): """ Set telescope related parameters. @@ -1218,7 +1219,7 @@ def _set_lsts_helper(self, astrometry_library=None): ) return - def set_lsts_from_time_array(self, background=False, astrometry_library=None): + def set_lsts_from_time_array(self, *, background=False, astrometry_library=None): """Set the lst_array or lst_range from the time_array or time_range. Parameters @@ -1263,7 +1264,7 @@ def _check_flex_spw_contiguous(self): if self.flex_spw: uvutils._check_flex_spw_contiguous(self.spw_array, self.flex_spw_id_array) - def _check_freq_spacing(self, raise_errors=True): + def _check_freq_spacing(self, *, raise_errors=True): """ Check if frequencies are evenly spaced and separated by their channel width. @@ -1285,19 +1286,23 @@ def _check_freq_spacing(self, raise_errors=True): if self.freq_array is None and self.Nfreqs == 1: return False, False return uvutils._check_freq_spacing( - self.freq_array, - self._freq_array.tols, - self.channel_width, - self._channel_width.tols, - self.flex_spw, - self.future_array_shapes, - self.spw_array, - self.flex_spw_id_array, + freq_array=self.freq_array, + freq_tols=self._freq_array.tols, + channel_width=self.channel_width, + channel_width_tols=self._channel_width.tols, + flex_spw=self.flex_spw, + future_array_shapes=self.future_array_shapes, + spw_array=self.spw_array, + flex_spw_id_array=self.flex_spw_id_array, raise_errors=raise_errors, ) def check( - self, check_extra=True, run_check_acceptability=True, check_freq_spacing=False + self, + *, + check_extra=True, + run_check_acceptability=True, + check_freq_spacing=False, ): """ Add some extra checks on top of checks on UVBase class. @@ -1464,7 +1469,7 @@ def check( ) return True - def copy(self, metadata_only=False): + def copy(self, *, metadata_only=False): """ Make and return a copy of the UVCal object. @@ -1496,7 +1501,7 @@ def copy(self, metadata_only=False): return uv - def _has_key(self, antnum=None, jpol=None): + def _has_key(self, *, antnum=None, jpol=None): """ Check if this UVCal has the requested antenna or polarization. @@ -1565,7 +1570,7 @@ def jpol2ind(self, jpol): return np.argmin(np.abs(self.jones_array - jpol)) - def _slice_array(self, key, data_array, squeeze_pol=True): + def _slice_array(self, key, data_array, *, squeeze_pol=True): """ Slice a data array given a data key. @@ -1605,7 +1610,7 @@ def _slice_array(self, key, data_array, squeeze_pol=True): ] return output - def _parse_key(self, ant, jpol=None): + def _parse_key(self, ant, *, jpol=None): """ Parse key inputs and return a standard antenna-polarization key. @@ -1637,7 +1642,7 @@ def _parse_key(self, ant, jpol=None): return key - def get_gains(self, ant, jpol=None, squeeze_pol=True): + def get_gains(self, ant, *, jpol=None, squeeze_pol=True): """ Get the gain associated with an antenna and/or polarization. @@ -1665,7 +1670,7 @@ def get_gains(self, ant, jpol=None, squeeze_pol=True): self._parse_key(ant, jpol=jpol), self.gain_array, squeeze_pol=squeeze_pol ) - def get_flags(self, ant, jpol=None, squeeze_pol=True): + def get_flags(self, ant, *, jpol=None, squeeze_pol=True): """ Get the flags associated with an antenna and/or polarization. @@ -1690,7 +1695,7 @@ def get_flags(self, ant, jpol=None, squeeze_pol=True): self._parse_key(ant, jpol=jpol), self.flag_array, squeeze_pol=squeeze_pol ) - def get_quality(self, ant, jpol=None, squeeze_pol=True): + def get_quality(self, ant, *, jpol=None, squeeze_pol=True): """ Get the qualities associated with an antenna and/or polarization. @@ -1731,6 +1736,7 @@ def get_time_array(self): def reorder_antennas( self, + *, order="number", run_check=True, check_extra=True, @@ -1813,6 +1819,7 @@ def reorder_antennas( def reorder_freqs( self, + *, spw_order=None, channel_order=None, select_spw=None, @@ -1912,16 +1919,16 @@ def reorder_freqs( else: index_array = uvutils._sort_freq_helper( - self.Nfreqs, - self.freq_array, - self.Nspws, - self.spw_array, - self.flex_spw, - self.flex_spw_id_array, - self.future_array_shapes, - spw_order, - channel_order, - select_spw, + Nfreqs=self.Nfreqs, + freq_array=self.freq_array, + Nspws=self.Nspws, + spw_array=self.spw_array, + flex_spw=self.flex_spw, + flex_spw_id_array=self.flex_spw_id_array, + future_array_shapes=self.future_array_shapes, + spw_order=spw_order, + channel_order=channel_order, + select_spw=select_spw, ) if index_array is None: @@ -1974,6 +1981,7 @@ def reorder_freqs( def reorder_times( self, + *, order="time", run_check=True, check_extra=True, @@ -2076,6 +2084,7 @@ def reorder_times( def reorder_jones( self, + *, order="name", run_check=True, check_extra=True, @@ -2166,6 +2175,7 @@ def reorder_jones( def convert_to_gain( self, + *, freq_array=None, channel_width=None, delay_convention="minus", @@ -2391,6 +2401,7 @@ def convert_to_gain( def __add__( self, other, + *, verbose_history=False, run_check=True, check_extra=True, @@ -3587,7 +3598,7 @@ def __add__( return this def __iadd__( - self, other, run_check=True, check_extra=True, run_check_acceptability=True + self, other, *, run_check=True, check_extra=True, run_check_acceptability=True ): """ Combine two UVCal objects in place. @@ -3620,6 +3631,7 @@ def fast_concat( self, other, axis, + *, inplace=False, verbose_history=False, run_check=True, @@ -4123,6 +4135,7 @@ def fast_concat( def select( self, + *, antenna_nums=None, antenna_names=None, frequencies=None, @@ -4731,6 +4744,7 @@ def _convert_to_filetype(self, filetype): def initialize_from_uvdata( cls, uvdata, + *, gain_convention, cal_style, future_array_shapes=True, @@ -4860,7 +4874,7 @@ def read_calfits(self, filename, **kwargs): del calfits_obj def read_fhd_cal( - self, cal_file, obs_file, layout_file=None, settings_file=None, **kwargs + self, *, cal_file, obs_file, layout_file=None, settings_file=None, **kwargs ): """ Read data from an FHD cal.sav file. @@ -5371,6 +5385,7 @@ def from_file( def write_calfits( self, filename, + *, run_check=True, check_extra=True, run_check_acceptability=True, diff --git a/pyuvdata/uvdata/uvdata.py b/pyuvdata/uvdata/uvdata.py index f4b1eb5514..9da33afac1 100644 --- a/pyuvdata/uvdata/uvdata.py +++ b/pyuvdata/uvdata/uvdata.py @@ -857,6 +857,7 @@ def _look_for_name(self, cat_name): def _look_in_catalog( self, cat_name=None, + *, cat_type=None, cat_lon=None, cat_lat=None, @@ -1058,6 +1059,7 @@ def _look_in_catalog( def _add_phase_center( self, cat_name, + *, cat_type=None, cat_lon=None, cat_lat=None, @@ -1484,6 +1486,7 @@ def rename_phase_center(self, catalog_identifier, new_name): def split_phase_center( self, catalog_identifier, + *, new_name=None, select_mask=None, new_id=None, @@ -1632,7 +1635,7 @@ def split_phase_center( self.phase_center_id_array[select_mask] = cat_id def merge_phase_centers( - self, catalog_identifier, force_merge=False, ignore_name=False + self, catalog_identifier, *, force_merge=False, ignore_name=False ): """ Merge two differently named objects into one within a multi-phase-ctr data set. @@ -1659,10 +1662,14 @@ def merge_phase_centers( Raises ------ ValueError - If catname1 or catname2 are not found in the UVData object, of if their + If catalog_identifiers are not found in the UVData object, of if their properties differ (and `force_merge` is not set to True). + + Warns + ----- UserWarning If forcing the merge of two objects with different properties. + """ if isinstance(catalog_identifier, (str, int)): pass @@ -1736,6 +1743,7 @@ def merge_phase_centers( def print_phase_center_info( self, catalog_identifier=None, + *, hms_format=None, return_str=False, print_table=True, @@ -2010,7 +2018,7 @@ def print_phase_center_info( if return_str: return info_str - def _update_phase_center_id(self, cat_id, new_id=None, reserved_ids=None): + def _update_phase_center_id(self, cat_id, *, new_id=None, reserved_ids=None): """ Update a phase center with a new catalog ID number. @@ -2058,7 +2066,7 @@ def _update_phase_center_id(self, cat_id, new_id=None, reserved_ids=None): self.phase_center_catalog[new_id] = self.phase_center_catalog.pop(cat_id) def _consolidate_phase_center_catalogs( - self, reference_catalog=None, other=None, ignore_name=False + self, *, reference_catalog=None, other=None, ignore_name=False ): """ Consolidate phase center catalogs with a reference or another object. @@ -2353,7 +2361,7 @@ def known_telescopes(self): """ return uvtel.known_telescopes() - def set_telescope_params(self, overwrite=False, warn=True): + def set_telescope_params(self, *, overwrite=False, warn=True): """ Set telescope related parameters. @@ -2451,16 +2459,17 @@ def _set_lsts_helper(self, astrometry_library=None): latitude, longitude, altitude = self.telescope_location_lat_lon_alt_degrees # the utility function is efficient -- it only calculates unique times self.lst_array = uvutils.get_lst_for_time( - self.time_array, - latitude, - longitude, - altitude, + jd_array=self.time_array, + latitude=latitude, + longitude=longitude, + altitude=altitude, + astrometry_lib=astrometry_library, frame=self._telescope_location.frame, astrometry_library=astrometry_library, ) return - def _set_app_coords_helper(self, pa_only=False): + def _set_app_coords_helper(self, *, pa_only=False): """ Set values for the apparent coordinate arrays. @@ -2534,7 +2543,7 @@ def _set_app_coords_helper(self, pa_only=False): self.phase_center_app_dec = app_dec self.phase_center_frame_pa = frame_pa - def set_lsts_from_time_array(self, background=False, astrometry_library=None): + def set_lsts_from_time_array(self, *, background=False, astrometry_library=None): """Set the lst_array based from the time_array. Parameters @@ -2580,7 +2589,7 @@ def _check_flex_spw_contiguous(self): if self.flex_spw: uvutils._check_flex_spw_contiguous(self.spw_array, self.flex_spw_id_array) - def _check_freq_spacing(self, raise_errors=True): + def _check_freq_spacing(self, *, raise_errors=True): """ Check if frequencies are evenly spaced and separated by their channel width. @@ -2600,18 +2609,18 @@ def _check_freq_spacing(self, raise_errors=True): """ return uvutils._check_freq_spacing( - self.freq_array, - self._freq_array.tols, - self.channel_width, - self._channel_width.tols, - self.flex_spw, - self.future_array_shapes, - self.spw_array, - self.flex_spw_id_array, + freq_array=self.freq_array, + freq_tols=self._freq_array.tols, + channel_width=self.channel_width, + channel_width_tols=self._channel_width.tols, + flex_spw=self.flex_spw, + future_array_shapes=self.future_array_shapes, + spw_array=self.spw_array, + flex_spw_id_array=self.flex_spw_id_array, raise_errors=raise_errors, ) - def remove_flex_pol(self, combine_spws=True): + def remove_flex_pol(self, *, combine_spws=True): """ Convert a flex-pol UVData object into one with a standard polarization axis. @@ -2871,7 +2880,7 @@ def remove_flex_pol(self, combine_spws=True): # Finally, remove the flex-pol attribute self.flex_spw_polarization_array = None - def _make_flex_pol(self, raise_error=False, raise_warning=True): + def _make_flex_pol(self, *, raise_error=False, raise_warning=True): """ Convert a regular UVData object into one with flex-polarization enabled. @@ -3201,6 +3210,7 @@ def _convert_old_phase_attributes(self): def check( self, + *, check_extra=True, run_check_acceptability=True, check_freq_spacing=False, @@ -3537,7 +3547,7 @@ def check( return True - def copy(self, metadata_only=False): + def copy(self, *, metadata_only=False): """ Make and return a copy of the UVData object. @@ -3591,7 +3601,7 @@ def baseline_to_antnums(self, baseline): """ return uvutils.baseline_to_antnums(baseline, self.Nants_telescope) - def antnums_to_baseline(self, ant1, ant2, attempt256=False): + def antnums_to_baseline(self, ant1, ant2, *, attempt256=False): """ Get the baseline number corresponding to two given antenna numbers. @@ -3614,7 +3624,7 @@ def antnums_to_baseline(self, ant1, ant2, attempt256=False): ant1, ant2, self.Nants_telescope, attempt256=attempt256 ) - def antpair2ind(self, ant1, ant2=None, ordered=True): + def antpair2ind(self, ant1, ant2=None, *, ordered=True): """ Get indices along the baseline-time axis for a given antenna pair. @@ -3835,7 +3845,7 @@ def _key2inds(self, key): return (blt_ind1, blt_ind2, pol_ind) def _smart_slicing( - self, data, ind1, ind2, indp, squeeze="default", force_copy=False + self, data, ind1, ind2, indp, *, squeeze="default", force_copy=False ): """ Quickly get the relevant section of a data-like array. @@ -4096,7 +4106,9 @@ def get_feedpols(self): else: return list(set("".join(self.get_pols()))) - def get_data(self, key1, key2=None, key3=None, squeeze="default", force_copy=False): + def get_data( + self, key1, key2=None, key3=None, *, squeeze="default", force_copy=False + ): """ Get the data corresonding to a baseline and/or polarization. @@ -4149,7 +4161,7 @@ def get_data(self, key1, key2=None, key3=None, squeeze="default", force_copy=Fal return out def get_flags( - self, key1, key2=None, key3=None, squeeze="default", force_copy=False + self, key1, key2=None, key3=None, *, squeeze="default", force_copy=False ): """ Get the flags corresonding to a baseline and/or polarization. @@ -4207,7 +4219,7 @@ def get_flags( return out def get_nsamples( - self, key1, key2=None, key3=None, squeeze="default", force_copy=False + self, key1, key2=None, key3=None, *, squeeze="default", force_copy=False ): """ Get the nsamples corresonding to a baseline and/or polarization. @@ -4342,7 +4354,7 @@ def get_lsts(self, key1, key2=None, key3=None): inds1, inds2, indp = self._key2inds(key) return self.lst_array[np.append(inds1, inds2)] - def get_ENU_antpos(self, center=False, pick_data_ants=False): + def get_ENU_antpos(self, *, center=False, pick_data_ants=False): """ Get antenna positions in ENU (topocentric) coordinates in units of meters. @@ -4606,7 +4618,7 @@ def set_nsamples(self, nsamples, key1, key2=None, key3=None): return - def antpairpol_iter(self, squeeze="default"): + def antpairpol_iter(self, *, squeeze="default"): """ Iterate the data for each antpair, polarization combination. @@ -4629,7 +4641,7 @@ def antpairpol_iter(self, squeeze="default"): for key in antpairpols: yield (key, self.get_data(key, squeeze=squeeze)) - def conjugate_bls(self, convention="ant1 None: + def set_rectangularity(self, *, force: bool = False) -> None: """ Set the rectangularity attributes of the object. @@ -4878,6 +4891,7 @@ def set_rectangularity(self, force: bool = False) -> None: def reorder_blts( self, + *, order="time", minor_order=None, autos_first=False, @@ -5113,6 +5127,7 @@ def reorder_blts( def reorder_freqs( self, + *, spw_order=None, channel_order=None, select_spw=None, @@ -5177,16 +5192,16 @@ def reorder_freqs( """ index_array = uvutils._sort_freq_helper( - self.Nfreqs, - self.freq_array, - self.Nspws, - self.spw_array, - self.flex_spw, - self.flex_spw_id_array, - self.future_array_shapes, - spw_order, - channel_order, - select_spw, + Nfreqs=self.Nfreqs, + freq_array=self.freq_array, + Nspws=self.Nspws, + spw_array=self.spw_array, + flex_spw=self.flex_spw, + flex_spw_id_array=self.flex_spw_id_array, + future_array_shapes=self.future_array_shapes, + spw_order=spw_order, + channel_order=channel_order, + select_spw=select_spw, ) if index_array is None: @@ -5296,7 +5311,7 @@ def remove_eq_coeffs(self): return - def _apply_w_proj(self, new_w_vals, old_w_vals, select_mask=None): + def _apply_w_proj(self, new_w_vals, old_w_vals, *, select_mask=None): """ Apply corrections based on changes to w-coord. @@ -5394,7 +5409,7 @@ def _apply_w_proj(self, new_w_vals, old_w_vals, select_mask=None): ) def unproject_phase( - self, use_ant_pos=True, select_mask=None, cat_name="unprojected" + self, *, use_ant_pos=True, select_mask=None, cat_name="unprojected" ): """ Undo phasing to get back to an `unprojected` state. @@ -5473,6 +5488,7 @@ def unproject_phase( def _phase_dict_helper( self, + *, lon, lat, epoch, @@ -5797,19 +5813,19 @@ def phase( # basic housekeeping to make sure that we've got the coordinate data that # we need in order to proceed. phase_dict = self._phase_dict_helper( - lon, - lat, - epoch, - phase_frame, - ephem_times, - cat_type, - pm_ra, - pm_dec, - dist, - vrad, - cat_name, - lookup_name, - self.time_array, + lon=lon, + lat=lat, + epoch=epoch, + phase_frame=phase_frame, + ephem_times=ephem_times, + cat_type=cat_type, + pm_ra=pm_ra, + pm_dec=pm_dec, + dist=dist, + vrad=vrad, + cat_name=cat_name, + lookup_name=lookup_name, + time_array=self.time_array, ) if phase_dict["cat_type"] not in ["ephem", "unprojected"]: @@ -5951,7 +5967,7 @@ def phase( return def phase_to_time( - self, time, phase_frame="icrs", use_ant_pos=True, select_mask=None + self, time, *, phase_frame="icrs", use_ant_pos=True, select_mask=None ): """ Phase to the ra/dec of zenith at a particular time. @@ -6015,7 +6031,7 @@ def phase_to_time( cat_name=("zenith_at_jd%f" % time.jd), ) - def set_uvws_from_antenna_positions(self, update_vis=True): + def set_uvws_from_antenna_positions(self, *, update_vis=True): """ Calculate UVWs based on antenna_positions. @@ -6062,7 +6078,7 @@ def set_uvws_from_antenna_positions(self, update_vis=True): self.uvw_array = new_uvw return - def fix_phase(self, use_ant_pos=True): + def fix_phase(self, *, use_ant_pos=True): """ Fix the data to be consistent with the new phasing method. @@ -6229,6 +6245,7 @@ def fix_phase(self, use_ant_pos=True): def __add__( self, other, + *, inplace=False, verbose_history=False, run_check=True, @@ -7010,6 +7027,7 @@ def __add__( def __iadd__( self, other, + *, run_check=True, check_extra=True, run_check_acceptability=True, @@ -7073,6 +7091,7 @@ def fast_concat( self, other, axis, + *, inplace=False, verbose_history=False, run_check=True, @@ -7449,6 +7468,7 @@ def fast_concat( def sum_vis( self, other, + *, inplace=False, difference=False, verbose_history=False, @@ -7630,6 +7650,7 @@ def sum_vis( def diff_vis( self, other, + *, inplace=False, run_check=True, check_extra=True, @@ -7703,7 +7724,7 @@ def diff_vis( override_params=override_params, ) - def parse_ants(self, ant_str, print_toggle=False): + def parse_ants(self, ant_str, *, print_toggle=False): """ Get antpair and polarization from parsing an aipy-style ant string. @@ -7745,6 +7766,7 @@ def parse_ants(self, ant_str, print_toggle=False): def _select_preprocess( self, + *, antenna_nums, antenna_names, ant_str, @@ -8623,20 +8645,20 @@ def select( pol_inds, history_update_string, ) = uv_obj._select_preprocess( - antenna_nums, - antenna_names, - ant_str, - bls, - frequencies, - freq_chans, - times, - time_range, - lsts, - lst_range, - polarizations, - blt_inds, - phase_center_ids, - catalog_names, + antenna_nums=antenna_nums, + antenna_names=antenna_names, + ant_str=ant_str, + bls=bls, + frequencies=frequencies, + freq_chans=freq_chans, + times=times, + time_range=time_range, + lsts=lsts, + lst_range=lst_range, + polarizations=polarizations, + blt_inds=blt_inds, + phase_center_ids=phase_center_ids, + catalog_names=catalog_names, ) # Call the low-level selection method. @@ -8667,6 +8689,7 @@ def select( def _harmonize_resample_arrays( self, + *, inds_to_keep, temp_baseline, temp_id_array, @@ -8933,14 +8956,14 @@ def upsample_in_time( # harmonize temporary arrays with existing ones inds_to_keep = np.nonzero(self.integration_time <= max_int_time) self._harmonize_resample_arrays( - inds_to_keep, - temp_baseline, - temp_id_array, - temp_time, - temp_int_time, - temp_data, - temp_flag, - temp_nsample, + inds_to_keep=inds_to_keep, + temp_baseline=temp_baseline, + temp_id_array=temp_id_array, + temp_time=temp_time, + temp_int_time=temp_int_time, + temp_data=temp_data, + temp_flag=temp_flag, + temp_nsample=temp_nsample, astrometry_library=astrometry_library, ) @@ -9431,14 +9454,14 @@ def downsample_in_time( else: inds_to_keep = np.array([], dtype=bool) self._harmonize_resample_arrays( - inds_to_keep, - temp_baseline, - temp_id_array, - temp_time, - temp_int_time, - temp_data, - temp_flag, - temp_nsample, + inds_to_keep=inds_to_keep, + temp_baseline=temp_baseline, + temp_id_array=temp_id_array, + temp_time=temp_time, + temp_int_time=temp_int_time, + temp_data=temp_data, + temp_flag=temp_flag, + temp_nsample=temp_nsample, astrometry_library=astrometry_library, ) @@ -9498,6 +9521,7 @@ def downsample_in_time( def resample_in_time( self, target_time, + *, only_downsample=False, only_upsample=False, blt_order="time", @@ -9597,6 +9621,7 @@ def resample_in_time( def frequency_average( self, + *, n_chan_to_avg, summing_correlator_mode=False, propagate_flags=False, @@ -9952,6 +9977,7 @@ def frequency_average( def get_redundancies( self, + *, tol=1.0, use_antpos=False, include_conjugates=False, @@ -10026,7 +10052,7 @@ def get_redundancies( ) def compress_by_redundancy( - self, method="select", tol=1.0, inplace=True, keep_all_metadata=True + self, *, method="select", tol=1.0, inplace=True, keep_all_metadata=True ): """ Downselect or average to only have one baseline per redundant group. @@ -10266,7 +10292,7 @@ def compress_by_redundancy( bls=bl_ants, inplace=inplace, keep_all_metadata=keep_all_metadata ) - def inflate_by_redundancy(self, tol=1.0, blt_order="time", blt_minor_order=None): + def inflate_by_redundancy(self, *, tol=1.0, blt_order="time", blt_minor_order=None): """ Expand data to full size, copying data among redundant baselines. @@ -11321,6 +11347,7 @@ def read_uvh5(self, filename, **kwargs): def read( self, filename, + *, axis=None, file_type=None, read_data=True, @@ -12544,6 +12571,7 @@ def from_file(cls, filename, **kwargs): def write_miriad( self, filepath, + *, clobber=False, run_check=True, check_extra=True, @@ -12659,6 +12687,7 @@ def write_mir(self, filepath): def write_ms( self, filename, + *, force_phase=False, clobber=False, run_check=True, @@ -12727,6 +12756,7 @@ def write_ms( def write_uvfits( self, filename, + *, write_lst=True, force_phase=False, run_check=True, @@ -12819,6 +12849,7 @@ def write_uvfits( def write_uvh5( self, filename, + *, clobber=False, chunks=True, data_compression=None, @@ -12918,6 +12949,7 @@ def write_uvh5( def initialize_uvh5_file( self, filename, + *, clobber=False, chunks=True, data_compression=None, @@ -12980,6 +13012,7 @@ def initialize_uvh5_file( def write_uvh5_part( self, filename, + *, data_array, flags_array, nsample_array, @@ -13133,7 +13166,7 @@ def write_uvh5_part( ) del uvh5_obj - def normalize_by_autos(self, skip_autos=True, invert=False): + def normalize_by_autos(self, *, skip_autos=True, invert=False): """ Normalize cross-correlations by auto-correlation data. diff --git a/pyuvdata/uvflag/uvflag.py b/pyuvdata/uvflag/uvflag.py index 1675637754..d82f56621f 100644 --- a/pyuvdata/uvflag/uvflag.py +++ b/pyuvdata/uvflag/uvflag.py @@ -55,7 +55,7 @@ def and_rows_cols(waterfall): return wf -def flags2waterfall(uv, flag_array=None, keep_pol=False): +def flags2waterfall(uv, *, flag_array=None, keep_pol=False): """Convert a flag array to a 2D waterfall of dimensions (Ntimes, Nfreqs). Averages over baselines and polarizations (in the case of visibility data), @@ -196,6 +196,7 @@ class UVFlag(UVBase): def __init__( self, + *, indata=None, mode="metric", copy_flags=False, @@ -484,6 +485,7 @@ def __init__( "telescope_location", description=desc, acceptable_range=(6.35e6, 6.39e6), + frame="itrs", tols=1e-3, ) @@ -929,7 +931,7 @@ def _set_type_waterfall(self): if not self.future_array_shapes: self._freq_array.form = ("Nfreqs",) - def check(self, check_extra=True, run_check_acceptability=True): + def check(self, *, check_extra=True, run_check_acceptability=True): """ Add some extra checks on top of checks on UVBase class. @@ -1069,7 +1071,7 @@ def clear_unused_attributes(self): attr.value = None setattr(self, p, attr) - def __eq__(self, other, check_history=True, check_extra=True): + def __eq__(self, other, *, check_history=True, check_extra=True): """Check Equality of two UVFlag objects. Parameters @@ -1105,7 +1107,7 @@ def __eq__(self, other, check_history=True, check_extra=True): print("Classes do not match") return False - def __ne__(self, other, check_history=True, check_extra=True): + def __ne__(self, other, *, check_history=True, check_extra=True): """Not Equal.""" return not self.__eq__( other, check_history=check_history, check_extra=check_extra @@ -1123,7 +1125,7 @@ def _set_lsts_helper(self, astrometry_library=None): ) return - def set_lsts_from_time_array(self, background=False, astrometry_library=None): + def set_lsts_from_time_array(self, *, background=False, astrometry_library=None): """Set the lst_array based from the time_array. Parameters @@ -1150,7 +1152,7 @@ def set_lsts_from_time_array(self, background=False, astrometry_library=None): proc.start() return proc - def set_telescope_params(self, overwrite=False, warn=True): + def set_telescope_params(self, *, overwrite=False, warn=True): """ Set telescope related parameters. @@ -1277,7 +1279,7 @@ def baseline_to_antnums(self, baseline): assert self.type == "baseline", "Must be 'baseline' type UVFlag object." return uvutils.baseline_to_antnums(baseline, self.Nants_telescope) - def antnums_to_baseline(self, ant1, ant2, attempt256=False): + def antnums_to_baseline(self, ant1, ant2, *, attempt256=False): """ Get the baseline number corresponding to two given antenna numbers. @@ -1340,7 +1342,7 @@ def get_pols(self): self.polarization_array, x_orientation=self.x_orientation ) - def parse_ants(self, ant_str, print_toggle=False): + def parse_ants(self, ant_str, *, print_toggle=False): """ Get antpair and polarization from parsing an aipy-style ant string. @@ -1388,6 +1390,7 @@ def parse_ants(self, ant_str, print_toggle=False): def collapse_pol( self, + *, method="quadmean", run_check=True, check_extra=True, @@ -1461,6 +1464,7 @@ def collapse_pol( def to_waterfall( self, + *, method="quadmean", keep_pol=True, run_check=True, @@ -1587,6 +1591,7 @@ def to_waterfall( def to_baseline( self, uv, + *, force_pol=False, run_check=True, check_extra=True, @@ -1874,6 +1879,7 @@ def to_baseline( def to_antenna( self, uv, + *, force_pol=False, run_check=True, check_extra=True, @@ -2072,6 +2078,7 @@ def to_antenna( def to_flag( self, + *, threshold=np.inf, run_check=True, check_extra=True, @@ -2118,6 +2125,7 @@ def to_flag( def to_metric( self, + *, convert_wgts=False, run_check=True, check_extra=True, @@ -2201,6 +2209,7 @@ def to_metric( def __add__( self, other, + *, inplace=False, axis="time", run_check=True, @@ -2483,6 +2492,7 @@ def __add__( def __iadd__( self, other, + *, axis="time", run_check=True, check_extra=True, @@ -2519,6 +2529,7 @@ def __iadd__( def __or__( self, other, + *, inplace=False, run_check=True, check_extra=True, @@ -2573,7 +2584,7 @@ def __or__( return this def __ior__( - self, other, run_check=True, check_extra=True, run_check_acceptability=True + self, other, *, run_check=True, check_extra=True, run_check_acceptability=True ): """Perform an inplace logical or. @@ -2603,6 +2614,7 @@ def __ior__( def combine_metrics( self, others, + *, method="quadmean", inplace=True, run_check=True, @@ -2671,6 +2683,7 @@ def combine_metrics( def _select_preprocess( self, + *, antenna_nums, ant_str, bls, @@ -3044,7 +3057,7 @@ def _select_preprocess( return blt_inds, ant_inds, freq_inds, pol_inds, history_update_string def _select_metadata( - self, blt_inds, ant_inds, freq_inds, pol_inds, history_update_string + self, *, blt_inds, ant_inds, freq_inds, pol_inds, history_update_string ): """Perform select on everything except the data-sized arrays. @@ -3107,6 +3120,7 @@ def _select_metadata( def select( self, + *, antenna_nums=None, ant_inds=None, bls=None, @@ -3231,7 +3245,11 @@ def select( # do select operations on everything except data_array, flag_array # and nsample_array uv_object._select_metadata( - blt_inds, ant_inds, freq_inds, pol_inds, history_update_string + blt_inds=blt_inds, + ant_inds=ant_inds, + freq_inds=freq_inds, + pol_inds=pol_inds, + history_update_string=history_update_string, ) if blt_inds is not None: @@ -3333,6 +3351,7 @@ def select( def read( self, filename, + *, history="", mwa_metafits_file=None, telescope_name=None, @@ -3774,7 +3793,7 @@ def read( run_check_acceptability=run_check_acceptability, ) - def write(self, filename, clobber=False, data_compression="lzf"): + def write(self, filename, *, clobber=False, data_compression="lzf"): """Write a UVFlag object to a hdf5 file. Parameters @@ -3908,6 +3927,7 @@ def write(self, filename, clobber=False, data_compression="lzf"): def from_uvdata( self, indata, + *, mode="metric", copy_flags=False, waterfall=False, @@ -4100,6 +4120,7 @@ def from_uvdata( def from_uvcal( self, indata, + *, mode="metric", copy_flags=False, waterfall=False, From 0c89f3f4f68c84866958a3c7f73635d333e696d8 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Sun, 15 Oct 2023 15:48:55 -0700 Subject: [PATCH 02/12] more work on requiring params by name --- pyuvdata/tests/test_utils.py | 639 ++++++++++++++++++----------- pyuvdata/utils.py | 164 +++++--- pyuvdata/uvcal/tests/test_uvcal.py | 10 +- pyuvdata/uvcal/uvcal.py | 10 +- pyuvdata/uvdata/uvdata.py | 58 +-- pyuvdata/uvdata/uvfits.py | 28 +- pyuvdata/uvdata/uvh5.py | 28 +- pyuvdata/uvflag/uvflag.py | 6 +- 8 files changed, 572 insertions(+), 371 deletions(-) diff --git a/pyuvdata/tests/test_utils.py b/pyuvdata/tests/test_utils.py index a7d162e6d5..ea78cdb788 100644 --- a/pyuvdata/tests/test_utils.py +++ b/pyuvdata/tests/test_utils.py @@ -53,10 +53,10 @@ def astrometry_args(): "library": "erfa", } default_args["lst_array"] = uvutils.get_lst_for_time( - default_args["time_array"], - default_args["telescope_loc"][0] * (180.0 / np.pi), - default_args["telescope_loc"][1] * (180.0 / np.pi), - default_args["telescope_loc"][2], + jd_array=default_args["time_array"], + latitude=default_args["telescope_loc"][0] * (180.0 / np.pi), + longitude=default_args["telescope_loc"][1] * (180.0 / np.pi), + altitude=default_args["telescope_loc"][2], frame="itrs", ) @@ -71,10 +71,10 @@ def astrometry_args(): np.deg2rad(x) for x in (0.6875, 24.433, 0) ) default_args["moon_lst_array"] = uvutils.get_lst_for_time( - default_args["time_array"], - default_args["moon_telescope_loc"][0] * (180.0 / np.pi), - default_args["moon_telescope_loc"][1] * (180.0 / np.pi), - default_args["moon_telescope_loc"][2], + jd_array=default_args["time_array"], + latitude=default_args["moon_telescope_loc"][0] * (180.0 / np.pi), + longitude=default_args["moon_telescope_loc"][1] * (180.0 / np.pi), + altitude=default_args["moon_telescope_loc"][2], frame="mcmf", ) default_args["moon_drift_coord"] = SkyCoord( @@ -89,20 +89,20 @@ def astrometry_args(): ) default_args["fk5_ra"], default_args["fk5_dec"] = uvutils.transform_sidereal_coords( - default_args["icrs_ra"], - default_args["icrs_dec"], - "icrs", - "fk5", + longitude=default_args["icrs_ra"], + latitude=default_args["icrs_dec"], + in_coord_frame="icrs", + out_coord_frame="fk5", in_coord_epoch="J2000.0", out_coord_epoch="J2000.0", ) # These are values calculated w/o the optional arguments, e.g. pm, vrad, dist default_args["app_ra"], default_args["app_dec"] = uvutils.transform_icrs_to_app( - default_args["time_array"], - default_args["icrs_ra"], - default_args["icrs_dec"], - default_args["telescope_loc"], + time_array=default_args["time_array"], + ra=default_args["icrs_ra"], + dec=default_args["icrs_dec"], + telescope_loc=default_args["telescope_loc"], ) default_args["app_coord"] = SkyCoord( @@ -114,10 +114,10 @@ def astrometry_args(): default_args["moon_app_ra"], default_args["moon_app_dec"], ) = uvutils.transform_icrs_to_app( - default_args["time_array"], - default_args["icrs_ra"], - default_args["icrs_dec"], - default_args["moon_telescope_loc"], + time_array=default_args["time_array"], + ra=default_args["icrs_ra"], + dec=default_args["icrs_dec"], + telescope_loc=default_args["moon_telescope_loc"], telescope_frame="mcmf", ) @@ -439,7 +439,9 @@ def test_enu_from_ecef(enu_ecef_info): ) = enu_ecef_info xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts) - enu = uvutils.ENU_from_ECEF(xyz, center_lat, center_lon, center_alt) + enu = uvutils.ENU_from_ECEF( + xyz, latitude=center_lat, longitude=center_lon, altitude=center_alt + ) assert np.allclose(np.stack((east, north, up), axis=1), enu, atol=1e-3) @@ -460,7 +462,13 @@ def test_enu_from_mcmf(enu_mcmf_info): up, ) = enu_mcmf_info xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts, frame="mcmf") - enu = uvutils.ENU_from_ECEF(xyz, center_lat, center_lon, center_alt, frame="mcmf") + enu = uvutils.ENU_from_ECEF( + xyz, + latitude=center_lat, + longitude=center_lon, + altitude=center_alt, + frame="mcmf", + ) assert np.allclose(np.stack((east, north, up), axis=1), enu, atol=1e-3) @@ -469,11 +477,15 @@ def test_invalid_frame(): with pytest.raises( ValueError, match='No ENU_from_ECEF transform defined for frame "UNDEF".' ): - uvutils.ENU_from_ECEF(np.zeros((2, 3)), 0.0, 0.0, 0.0, frame="undef") + uvutils.ENU_from_ECEF( + np.zeros((2, 3)), latitude=0.0, longitude=0.0, altitude=0.0, frame="undef" + ) with pytest.raises( ValueError, match='No ECEF_from_ENU transform defined for frame "UNDEF".' ): - uvutils.ECEF_from_ENU(np.zeros((2, 3)), 0.0, 0.0, 0.0, frame="undef") + uvutils.ECEF_from_ENU( + np.zeros((2, 3)), latitude=0.0, longitude=0.0, altitude=0.0, frame="undef" + ) @pytest.mark.parametrize("shape_type", ["transpose", "Nblts,2", "Nblts,1"]) @@ -503,7 +515,9 @@ def test_enu_from_ecef_shape_errors(enu_ecef_info, shape_type): # check error if array transposed with pytest.raises(ValueError) as cm: - uvutils.ENU_from_ECEF(xyz, center_lat, center_lon, center_alt) + uvutils.ENU_from_ECEF( + xyz, longitude=center_lat, latitude=center_lon, altitude=center_alt + ) assert str(cm.value).startswith( "The expected shape of ECEF xyz array is (Npts, 3)." ) @@ -528,7 +542,9 @@ def test_enu_from_ecef_magnitude_error(enu_ecef_info): xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts) # error checking with pytest.raises(ValueError) as cm: - uvutils.ENU_from_ECEF(xyz / 2.0, center_lat, center_lon, center_alt) + uvutils.ENU_from_ECEF( + xyz / 2.0, latitude=center_lat, longitude=center_lon, altitude=center_alt + ) assert str(cm.value).startswith( "ITRS vector magnitudes must be on the order of the radius of the earth" ) @@ -544,10 +560,12 @@ def test_ecef_from_enu_roundtrip(enu_ecef_info, enu_mcmf_info, frame): enu_ecef_info if frame == "itrs" else enu_mcmf_info ) xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts, frame=frame) - enu = uvutils.ENU_from_ECEF(xyz, center_lat, center_lon, center_alt, frame=frame) + enu = uvutils.ENU_from_ECEF( + xyz, latitude=center_lat, longitude=center_lon, altitude=center_alt, frame=frame + ) # check that a round trip gives the original value. xyz_from_enu = uvutils.ECEF_from_ENU( - enu, center_lat, center_lon, center_alt, frame=frame + enu, latitude=center_lat, longitude=center_lon, altitude=center_alt, frame=frame ) assert np.allclose(xyz, xyz_from_enu, atol=1e-3) @@ -569,7 +587,9 @@ def test_ecef_from_enu_shape_errors(enu_ecef_info, shape_type): up, ) = enu_ecef_info xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts) - enu = uvutils.ENU_from_ECEF(xyz, center_lat, center_lon, center_alt) + enu = uvutils.ENU_from_ECEF( + xyz, latitude=center_lat, longitude=center_lon, altitude=center_alt + ) if shape_type == "transpose": enu = enu.copy().T elif shape_type == "Nblts,2": @@ -579,7 +599,9 @@ def test_ecef_from_enu_shape_errors(enu_ecef_info, shape_type): # check error if array transposed with pytest.raises(ValueError) as cm: - uvutils.ECEF_from_ENU(enu, center_lat, center_lon, center_alt) + uvutils.ECEF_from_ENU( + enu, latitude=center_lat, longitude=center_lon, altitude=center_alt + ) assert str(cm.value).startswith("The expected shape of the ENU array is (Npts, 3).") @@ -601,7 +623,9 @@ def test_ecef_from_enu_single(enu_ecef_info): ) = enu_ecef_info xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts) # check passing a single value - enu_single = uvutils.ENU_from_ECEF(xyz[0, :], center_lat, center_lon, center_alt) + enu_single = uvutils.ENU_from_ECEF( + xyz[0, :], latitude=center_lat, longitude=center_lon, altitude=center_alt + ) assert np.allclose(np.array((east[0], north[0], up[0])), enu_single, atol=1e-3) @@ -624,12 +648,18 @@ def test_ecef_from_enu_single_roundtrip(enu_ecef_info): ) = enu_ecef_info xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts) # check passing a single value - enu = uvutils.ENU_from_ECEF(xyz, center_lat, center_lon, center_alt) + enu = uvutils.ENU_from_ECEF( + xyz, latitude=center_lat, longitude=center_lon, altitude=center_alt + ) - enu_single = uvutils.ENU_from_ECEF(xyz[0, :], center_lat, center_lon, center_alt) + enu_single = uvutils.ENU_from_ECEF( + xyz[0, :], latitude=center_lat, longitude=center_lon, altitude=center_alt + ) assert np.allclose(np.array((east[0], north[0], up[0])), enu[0, :], atol=1e-3) - xyz_from_enu = uvutils.ECEF_from_ENU(enu_single, center_lat, center_lon, center_alt) + xyz_from_enu = uvutils.ECEF_from_ENU( + enu_single, latitude=center_lat, longitude=center_lon, altitude=center_alt + ) assert np.allclose(xyz[0, :], xyz_from_enu, atol=1e-3) @@ -673,7 +703,7 @@ def test_mwa_ecef_conversion(): # add in array center to get real ECEF ecef_xyz = new_xyz + arrcent - enu = uvutils.ENU_from_ECEF(ecef_xyz, lat, lon, alt) + enu = uvutils.ENU_from_ECEF(ecef_xyz, latitude=lat, longitude=lon, altitude=alt) assert np.allclose(enu, enh) @@ -683,18 +713,18 @@ def test_mwa_ecef_conversion(): @pytest.mark.parametrize( - "input1,input2,msg", + "lon_array,lat_array,msg", ( [0.0, np.array([0.0]), "lon_array and lat_array must either both be floats or"], [np.array([0.0, 1.0]), np.array([0.0]), "lon_array and lat_array must have "], ), ) -def test_polar2_to_cart3_arg_errs(input1, input2, msg): +def test_polar2_to_cart3_arg_errs(lon_array, lat_array, msg): """ Test that bad arguments to polar2_to_cart3 throw appropriate errors. """ with pytest.raises(ValueError) as cm: - uvutils.polar2_to_cart3(input1, input2) + uvutils.polar2_to_cart3(lon_array=lon_array, lat_array=lat_array) assert str(cm.value).startswith(msg) @@ -729,7 +759,9 @@ def test_rotate_matmul_wrapper_arg_errs(input1, input2, input3, msg): Test that bad arguments to _rotate_matmul_wrapper throw appropriate errors. """ with pytest.raises(ValueError) as cm: - uvutils._rotate_matmul_wrapper(input1, input2, input3) + uvutils._rotate_matmul_wrapper( + xyz_array=input1, rot_matrix=input2, n_rot=input3 + ) assert str(cm.value).startswith(msg) @@ -738,7 +770,9 @@ def test_cart_to_polar_roundtrip(): Test that polar->cart coord transformation is the inverse of cart->polar. """ # Basic round trip with vectors - assert uvutils.cart3_to_polar2(uvutils.polar2_to_cart3(0.0, 0.0)) == (0.0, 0.0) + assert uvutils.cart3_to_polar2( + uvutils.polar2_to_cart3(lon_array=0.0, lat_array=0.0) + ) == (0.0, 0.0) def test_rotate_one_axis(vector_list): @@ -750,50 +784,88 @@ def test_rotate_one_axis(vector_list): x_vecs, y_vecs, z_vecs, test_vecs = vector_list # Test no-ops w/ 0 deg rotations - assert np.all(uvutils._rotate_one_axis(x_vecs, 0.0, 0) == x_vecs) assert np.all( - uvutils._rotate_one_axis(x_vecs[:, 0], 0.0, 1) + uvutils._rotate_one_axis(xyz_array=x_vecs, rot_amount=0.0, rot_axis=0) == x_vecs + ) + assert np.all( + uvutils._rotate_one_axis(xyz_array=x_vecs[:, 0], rot_amount=0.0, rot_axis=1) == x_vecs[np.newaxis, :, 0, np.newaxis] ) assert np.all( - uvutils._rotate_one_axis(x_vecs[:, :, np.newaxis], 0.0, 2) + uvutils._rotate_one_axis( + xyz_array=x_vecs[:, :, np.newaxis], rot_amount=0.0, rot_axis=2 + ) == x_vecs[:, :, np.newaxis] ) # Test no-ops w/ None - assert np.all(uvutils._rotate_one_axis(test_vecs, None, 1) == test_vecs) assert np.all( - uvutils._rotate_one_axis(test_vecs[:, 0], None, 2) + uvutils._rotate_one_axis(xyz_array=test_vecs, rot_amount=None, rot_axis=1) + == test_vecs + ) + assert np.all( + uvutils._rotate_one_axis(xyz_array=test_vecs[:, 0], rot_amount=None, rot_axis=2) == test_vecs[np.newaxis, :, 0, np.newaxis] ) assert np.all( - uvutils._rotate_one_axis(test_vecs[:, :, np.newaxis], None, 0) + uvutils._rotate_one_axis( + xyz_array=test_vecs[:, :, np.newaxis], rot_amount=None, rot_axis=0 + ) == test_vecs[:, :, np.newaxis] ) # Test some basic equivalencies to make sure rotations are working correctly - assert np.allclose(x_vecs, uvutils._rotate_one_axis(x_vecs, 1.0, 0)) - assert np.allclose(y_vecs, uvutils._rotate_one_axis(y_vecs, 2.0, 1)) - assert np.allclose(z_vecs, uvutils._rotate_one_axis(z_vecs, 3.0, 2)) + assert np.allclose( + x_vecs, uvutils._rotate_one_axis(xyz_array=x_vecs, rot_amount=1.0, rot_axis=0) + ) + assert np.allclose( + y_vecs, uvutils._rotate_one_axis(xyz_array=y_vecs, rot_amount=2.0, rot_axis=1) + ) + assert np.allclose( + z_vecs, uvutils._rotate_one_axis(xyz_array=z_vecs, rot_amount=3.0, rot_axis=2) + ) - assert np.allclose(x_vecs, uvutils._rotate_one_axis(y_vecs, -np.pi / 2.0, 2)) - assert np.allclose(y_vecs, uvutils._rotate_one_axis(x_vecs, np.pi / 2.0, 2)) - assert np.allclose(x_vecs, uvutils._rotate_one_axis(z_vecs, np.pi / 2.0, 1)) - assert np.allclose(z_vecs, uvutils._rotate_one_axis(x_vecs, -np.pi / 2.0, 1)) - assert np.allclose(y_vecs, uvutils._rotate_one_axis(z_vecs, -np.pi / 2.0, 0)) - assert np.allclose(z_vecs, uvutils._rotate_one_axis(y_vecs, np.pi / 2.0, 0)) + assert np.allclose( + x_vecs, + uvutils._rotate_one_axis(xyz_array=y_vecs, rot_amount=-np.pi / 2.0, rot_axis=2), + ) + assert np.allclose( + y_vecs, + uvutils._rotate_one_axis(xyz_array=x_vecs, rot_amount=np.pi / 2.0, rot_axis=2), + ) + assert np.allclose( + x_vecs, + uvutils._rotate_one_axis(xyz_array=z_vecs, rot_amount=np.pi / 2.0, rot_axis=1), + ) + assert np.allclose( + z_vecs, + uvutils._rotate_one_axis(xyz_array=x_vecs, rot_amount=-np.pi / 2.0, rot_axis=1), + ) + assert np.allclose( + y_vecs, + uvutils._rotate_one_axis(xyz_array=z_vecs, rot_amount=-np.pi / 2.0, rot_axis=0), + ) + assert np.allclose( + z_vecs, + uvutils._rotate_one_axis(xyz_array=y_vecs, rot_amount=np.pi / 2.0, rot_axis=0), + ) assert np.all( np.equal( - uvutils._rotate_one_axis(test_vecs, 1.0, 2), - uvutils._rotate_one_axis(test_vecs, 1.0, np.array([2])), + uvutils._rotate_one_axis(xyz_array=test_vecs, rot_amount=1.0, rot_axis=2), + uvutils._rotate_one_axis( + xyz_array=test_vecs, rot_amount=1.0, rot_axis=np.array([2]) + ), ) ) # Testing a special case, where the xyz_array vectors are reshaped if there # is only a single rotation matrix used (helps speed things up significantly) mod_vec = x_vecs.T.reshape((2, 3, 1)) - assert np.all(uvutils._rotate_one_axis(mod_vec, 1.0, 0) == mod_vec) + assert np.all( + uvutils._rotate_one_axis(xyz_array=mod_vec, rot_amount=1.0, rot_axis=0) + == mod_vec + ) def test_rotate_two_axis(vector_list): @@ -804,15 +876,49 @@ def test_rotate_two_axis(vector_list): # These tests are used to verify the basic functionality of the primary # functions used to two-axis rotations - assert np.allclose(x_vecs, uvutils._rotate_two_axis(x_vecs, 2 * np.pi, 1.0, 1, 0)) - assert np.allclose(y_vecs, uvutils._rotate_two_axis(y_vecs, 2 * np.pi, 2.0, 2, 1)) - assert np.allclose(z_vecs, uvutils._rotate_two_axis(z_vecs, 2 * np.pi, 3.0, 0, 2)) + assert np.allclose( + x_vecs, + uvutils._rotate_two_axis( + xyz_array=x_vecs, + rot_amount1=2 * np.pi, + rot_amount2=1.0, + rot_axis1=1, + rot_axis2=0, + ), + ) + assert np.allclose( + y_vecs, + uvutils._rotate_two_axis( + xyz_array=y_vecs, + rot_amount1=2 * np.pi, + rot_amount2=2.0, + rot_axis1=2, + rot_axis2=1, + ), + ) + assert np.allclose( + z_vecs, + uvutils._rotate_two_axis( + xyz_array=z_vecs, + rot_amount1=2 * np.pi, + rot_amount2=3.0, + rot_axis1=0, + rot_axis2=2, + ), + ) # Do one more test, which verifies that we can filp our (1,1,1) test vector to # the postiion at (-1, -1 , -1) mod_vec = test_vecs.T.reshape((2, 3, 1)) assert np.allclose( - uvutils._rotate_two_axis(mod_vec, np.pi, np.pi / 2.0, 0, 1), -mod_vec + uvutils._rotate_two_axis( + xyz_array=mod_vec, + rot_amount1=np.pi, + rot_amount2=np.pi / 2.0, + rot_axis1=0, + rot_axis2=1, + ), + -mod_vec, ) @@ -835,8 +941,16 @@ def test_compare_one_to_two_axis(vector_list, rot1, axis1, rot2, rot3, axis2, ax # a single rot (with the rot angle equal to the sum of the two rot angles) assert np.all( np.equal( - uvutils._rotate_one_axis(test_vecs, rot1, axis1), - uvutils._rotate_two_axis(test_vecs, rot2, rot3, axis2, axis3), + uvutils._rotate_one_axis( + xyz_array=test_vecs, rot_amount=rot1, rot_axis=axis1 + ), + uvutils._rotate_two_axis( + xyz_array=test_vecs, + rot_amount1=rot2, + rot_amount2=rot3, + rot_axis1=axis2, + rot_axis2=axis3, + ), ) ) @@ -1156,10 +1270,10 @@ def test_transform_icrs_to_app_arg_errs(astrometry_args, arg_dict, msg): # Start w/ the transform_icrs_to_app block with pytest.raises(ValueError) as cm: uvutils.transform_icrs_to_app( - default_args["time_array"], - default_args["icrs_ra"], - default_args["icrs_dec"], - default_args["telescope_loc"], + time_array=default_args["time_array"], + ra=default_args["icrs_ra"], + dec=default_args["icrs_dec"], + telescope_loc=default_args["telescope_loc"], telescope_frame=default_args["telescope_frame"], pm_ra=default_args["pm_ra"], pm_dec=default_args["pm_dec"], @@ -1190,10 +1304,10 @@ def test_transform_app_to_icrs_arg_errs(astrometry_args, arg_dict, msg): with pytest.raises(ValueError) as cm: uvutils.transform_app_to_icrs( - default_args["time_array"], - default_args["app_ra"], - default_args["app_dec"], - default_args["telescope_loc"], + time_array=default_args["time_array"], + app_ra=default_args["app_ra"], + app_dec=default_args["app_dec"], + telescope_loc=default_args["telescope_loc"], telescope_frame=default_args["telescope_frame"], astrometry_library=default_args["library"], ) @@ -1207,10 +1321,10 @@ def test_transform_sidereal_coords_arg_errs(): # Next on to sidereal to sidereal with pytest.raises(ValueError) as cm: uvutils.transform_sidereal_coords( - [0.0], - [0.0, 1.0], - "fk5", - "icrs", + longitude=[0.0], + latitude=[0.0, 1.0], + in_coord_frame="fk5", + out_coord_frame="icrs", in_coord_epoch="J2000.0", time_array=[0.0, 1.0, 2.0], ) @@ -1218,10 +1332,10 @@ def test_transform_sidereal_coords_arg_errs(): with pytest.raises(ValueError) as cm: uvutils.transform_sidereal_coords( - [0.0, 1.0], - [0.0, 1.0], - "fk4", - "fk4", + longitude=[0.0, 1.0], + latitude=[0.0, 1.0], + in_coord_frame="fk4", + out_coord_frame="fk4", in_coord_epoch=1950.0, out_coord_epoch=1984.0, time_array=[0.0, 1.0, 2.0], @@ -1339,10 +1453,10 @@ def test_interpolate_ephem_arg_errs(bad_arg, msg): # Now moving on to the interpolation scheme with pytest.raises(ValueError) as cm: uvutils.interpolate_ephem( - 0.0, - 0.0 if ("etimes" == bad_arg) else [0.0, 1.0], - 0.0 if ("ra" == bad_arg) else [0.0, 1.0], - 0.0 if ("dec" == bad_arg) else [0.0, 1.0], + time_array=0.0, + ephem_times=0.0 if ("etimes" == bad_arg) else [0.0, 1.0], + ephem_ra=0.0 if ("ra" == bad_arg) else [0.0, 1.0], + ephem_dec=0.0 if ("dec" == bad_arg) else [0.0, 1.0], ephem_dist=0.0 if ("dist" == bad_arg) else [0.0, 1.0], ephem_vel=0.0 if ("vel" == bad_arg) else [0.0, 1.0], ) @@ -1356,7 +1470,7 @@ def test_calc_app_coords_arg_errs(): # Now on to app_coords with pytest.raises(ValueError) as cm: uvutils.calc_app_coords( - 0.0, 0.0, telescope_loc=(0, 1, 2), coord_type="whoknows" + lon_coord=0.0, lat_coord=0.0, telescope_loc=(0, 1, 2), coord_type="whoknows" ) assert str(cm.value).startswith("Object type whoknows is not recognized.") @@ -1369,10 +1483,10 @@ def test_transform_multi_sidereal_coords(astrometry_args): # Check and make sure that we can deal with non-singleton times or coords with # singleton coords and times, respectively. check_ra, check_dec = uvutils.transform_sidereal_coords( - astrometry_args["icrs_ra"] * np.ones(2), - astrometry_args["icrs_dec"] * np.ones(2), - "icrs", - "fk5", + longitude=astrometry_args["icrs_ra"] * np.ones(2), + latitude=astrometry_args["icrs_dec"] * np.ones(2), + in_coord_frame="icrs", + out_coord_frame="fk5", in_coord_epoch=2000.0, out_coord_epoch=2000.0, time_array=astrometry_args["time_array"][0] * np.ones(2), @@ -1389,29 +1503,29 @@ def test_transform_fk5_fk4_icrs_loop(astrometry_args): # Now do a triangle between ICRS -> FK5 -> FK4 -> ICRS. If all is working well, # then we should recover the same position we started with. fk5_ra, fk5_dec = uvutils.transform_sidereal_coords( - astrometry_args["icrs_ra"], - astrometry_args["icrs_dec"], - "icrs", - "fk5", + longitude=astrometry_args["icrs_ra"], + latitude=astrometry_args["icrs_dec"], + in_coord_frame="icrs", + out_coord_frame="fk5", in_coord_epoch=2000.0, out_coord_epoch=2000.0, time_array=astrometry_args["time_array"][0], ) fk4_ra, fk4_dec = uvutils.transform_sidereal_coords( - fk5_ra, - fk5_dec, - "fk5", - "fk4", + longitude=fk5_ra, + latitude=fk5_dec, + in_coord_frame="fk5", + out_coord_frame="fk4", in_coord_epoch="J2000.0", out_coord_epoch="B1950.0", ) check_ra, check_dec = uvutils.transform_sidereal_coords( - fk4_ra, - fk4_dec, - "fk4", - "icrs", + longitude=fk4_ra, + latitude=fk4_dec, + in_coord_frame="fk4", + out_coord_frame="icrs", in_coord_epoch="B1950.0", out_coord_epoch="J2000.0", ) @@ -1441,10 +1555,10 @@ def test_roundtrip_icrs(astrometry_args, telescope_frame, in_lib, out_lib): "astrometry library", ): app_ra, app_dec = uvutils.transform_icrs_to_app( - astrometry_args["time_array"], - astrometry_args["icrs_ra"], - astrometry_args["icrs_dec"], - telescope_loc, + time_array=astrometry_args["time_array"], + ra=astrometry_args["icrs_ra"], + dec=astrometry_args["icrs_dec"], + telescope_loc=telescope_loc, telescope_frame=telescope_frame, epoch=astrometry_args["epoch"], astrometry_library=in_lib, @@ -1452,10 +1566,10 @@ def test_roundtrip_icrs(astrometry_args, telescope_frame, in_lib, out_lib): return app_ra, app_dec = uvutils.transform_icrs_to_app( - astrometry_args["time_array"], - astrometry_args["icrs_ra"], - astrometry_args["icrs_dec"], - telescope_loc, + time_array=astrometry_args["time_array"], + ra=astrometry_args["icrs_ra"], + dec=astrometry_args["icrs_dec"], + telescope_loc=telescope_loc, epoch=astrometry_args["epoch"], astrometry_library=in_lib, ) @@ -1467,19 +1581,19 @@ def test_roundtrip_icrs(astrometry_args, telescope_frame, in_lib, out_lib): "astrometry library", ): check_ra, check_dec = uvutils.transform_app_to_icrs( - astrometry_args["time_array"], - app_ra, - app_dec, - telescope_loc, + time_array=astrometry_args["time_array"], + app_ra=app_ra, + app_dec=app_dec, + telescope_loc=telescope_loc, telescope_frame=telescope_frame, astrometry_library=out_lib, ) check_ra, check_dec = uvutils.transform_app_to_icrs( - astrometry_args["time_array"], - app_ra, - app_dec, - telescope_loc, + time_array=astrometry_args["time_array"], + app_ra=app_ra, + app_dec=app_dec, + telescope_loc=telescope_loc, astrometry_library=out_lib, ) @@ -1503,7 +1617,10 @@ def test_calc_parallactic_angle(): """ expected_vals = np.array([1.0754290375762232, 0.0, -0.6518070715011698]) meas_vals = uvutils.calc_parallactic_angle( - [0.0, 1.0, 2.0], [-1.0, 0.0, 1.0], [2.0, 1.0, 0], 1.0 + app_ra=[0.0, 1.0, 2.0], + app_dec=[-1.0, 0.0, 1.0], + lst_array=[2.0, 1.0, 0], + telescope_lat=1.0, ) # Make sure things agree to better than ~0.1 uas (as it definitely should) assert np.allclose(expected_vals, meas_vals, 0.0, 1e-12) @@ -1516,11 +1633,11 @@ def test_calc_frame_pos_angle(): # First test -- plug in "topo" for the frame, which should always produce an # array of all zeros (the topo frame is what the apparent coords are in) frame_pa = uvutils.calc_frame_pos_angle( - np.array([2456789.0] * 100), - np.arange(100) * (np.pi / 50), - np.zeros(100), - (0, 0, 0), - "topo", + time_array=np.array([2456789.0] * 100), + app_ra=np.arange(100) * (np.pi / 50), + app_dec=np.zeros(100), + telescope_loc=(0, 0, 0), + ref_frame="topo", ) assert len(frame_pa) == 100 assert np.all(frame_pa == 0.0) @@ -1529,11 +1646,11 @@ def test_calc_frame_pos_angle(): # of basically 0 degrees. j2000_jd = Time(2000.0, format="jyear").utc.jd frame_pa = uvutils.calc_frame_pos_angle( - np.array([j2000_jd] * 100), - np.arange(100) * (np.pi / 50), - np.zeros(100), - (0, 0, 0), - "fk5", + time_array=np.array([j2000_jd] * 100), + app_ra=np.arange(100) * (np.pi / 50), + app_dec=np.zeros(100), + telescope_loc=(0, 0, 0), + ref_frame="fk5", ref_epoch=2000.0, ) # At J2000, the only frame PA terms come from aberation, which basically max out @@ -1543,11 +1660,11 @@ def test_calc_frame_pos_angle(): # JD 2458849.5 is Jan-01-2020, so 20 years of parallax ought to have accumulated # (with about 1 arcmin/yr of precession). Make sure these values are sensible frame_pa = uvutils.calc_frame_pos_angle( - np.array([2458849.5] * 100), - np.arange(100) * (np.pi / 50), - np.zeros(100), - (0, 0, 0), - "fk5", + time_array=np.array([2458849.5] * 100), + app_ra=np.arange(100) * (np.pi / 50), + app_dec=np.zeros(100), + telescope_loc=(0, 0, 0), + ref_frame="fk5", ref_epoch=2000.0, ) assert np.all(np.abs(frame_pa) < 20 * (50.3 / 3600) * (np.pi / 180.0)) @@ -1603,10 +1720,10 @@ def test_ephem_interp_one_point(): ephem_vel = np.array([4.0]) ra_vals0, dec_vals0, dist_vals0, vel_vals0 = uvutils.interpolate_ephem( - time_array, - ephem_times, - ephem_ra, - ephem_dec, + time_array=time_array, + ephem_times=ephem_times, + ephem_ra=ephem_ra, + ephem_dec=ephem_dec, ephem_dist=ephem_dist, ephem_vel=ephem_vel, ) @@ -1632,10 +1749,10 @@ def test_ephem_interp_multi_point(): ephem_vel = np.array([0, 1]) + 4.0 ra_vals1, dec_vals1, dist_vals1, vel_vals1 = uvutils.interpolate_ephem( - time_array, - ephem_times, - ephem_ra, - ephem_dec, + time_array=time_array, + ephem_times=ephem_times, + ephem_ra=ephem_ra, + ephem_dec=ephem_dec, ephem_dist=ephem_dist, ephem_vel=ephem_vel, ) @@ -1650,10 +1767,10 @@ def test_ephem_interp_multi_point(): ephem_vel = (np.arange(11) * 0.1) + 4.0 ra_vals2, dec_vals2, dist_vals2, vel_vals2 = uvutils.interpolate_ephem( - time_array, - ephem_times, - ephem_ra, - ephem_dec, + time_array=time_array, + ephem_times=ephem_times, + ephem_ra=ephem_ra, + ephem_dec=ephem_dec, ephem_dist=ephem_dist, ephem_vel=ephem_vel, ) @@ -1685,8 +1802,12 @@ def test_calc_app_sidereal(astrometry_args, frame, telescope_frame): telescope_loc = astrometry_args["moon_telescope_loc"] check_ra, check_dec = uvutils.calc_app_coords( - astrometry_args["fk5_ra"] if (frame == "fk5") else astrometry_args["icrs_ra"], - astrometry_args["fk5_dec"] if (frame == "fk5") else astrometry_args["icrs_dec"], + lon_coord=astrometry_args["fk5_ra"] + if (frame == "fk5") + else astrometry_args["icrs_ra"], + lat_coord=astrometry_args["fk5_dec"] + if (frame == "fk5") + else astrometry_args["icrs_dec"], coord_type="sidereal", telescope_loc=telescope_loc, telescope_frame=telescope_frame, @@ -1725,8 +1846,8 @@ def test_calc_app_ephem(astrometry_args, frame, telescope_frame): ephem_times = np.array([astrometry_args["time_array"][0]]) check_ra, check_dec = uvutils.calc_app_coords( - ephem_ra, - ephem_dec, + lon_coord=ephem_ra, + lat_coord=ephem_dec, coord_times=ephem_times, coord_type="ephem", telescope_loc=telescope_loc, @@ -1754,8 +1875,8 @@ def test_calc_app_driftscan(astrometry_args, telescope_frame): telescope_loc = astrometry_args["moon_telescope_loc"] check_ra, check_dec = uvutils.calc_app_coords( - 0.0, - np.pi / 2.0, + lon_coord=0.0, + lat_coord=np.pi / 2.0, coord_type="driftscan", telescope_loc=telescope_loc, telescope_frame=telescope_frame, @@ -1783,8 +1904,8 @@ def test_calc_app_unprojected(astrometry_args, telescope_frame): lst_array = astrometry_args["moon_lst_array"] check_ra, check_dec = uvutils.calc_app_coords( - None, - None, + lon_coord=None, + lat_coord=None, coord_type="unprojected", telescope_loc=telescope_loc, telescope_frame=telescope_frame, @@ -1807,8 +1928,8 @@ def test_calc_app_fk5_roundtrip(astrometry_args, telescope_frame): telescope_loc = astrometry_args["moon_telescope_loc"] app_ra, app_dec = uvutils.calc_app_coords( - 0.0, - 0.0, + lon_coord=0.0, + lat_coord=0.0, coord_type="sidereal", telescope_loc=telescope_loc, telescope_frame=telescope_frame, @@ -1818,11 +1939,11 @@ def test_calc_app_fk5_roundtrip(astrometry_args, telescope_frame): ) check_ra, check_dec = uvutils.calc_sidereal_coords( - astrometry_args["time_array"], - app_ra, - app_dec, - telescope_loc, - "fk5", + time_array=astrometry_args["time_array"], + app_ra=app_ra, + app_dec=app_dec, + telescope_loc=telescope_loc, + coord_frame="fk5", telescope_frame=telescope_frame, coord_epoch=2000.0, ) @@ -1839,8 +1960,8 @@ def test_calc_app_fk4_roundtrip(astrometry_args, telescope_frame): telescope_loc = astrometry_args["moon_telescope_loc"] app_ra, app_dec = uvutils.calc_app_coords( - 0.0, - 0.0, + lon_coord=0.0, + lat_coord=0.0, coord_type="sidereal", telescope_loc=telescope_loc, telescope_frame=telescope_frame, @@ -1850,11 +1971,11 @@ def test_calc_app_fk4_roundtrip(astrometry_args, telescope_frame): ) check_ra, check_dec = uvutils.calc_sidereal_coords( - astrometry_args["time_array"], - app_ra, - app_dec, - telescope_loc, - "fk4", + time_array=astrometry_args["time_array"], + app_ra=app_ra, + app_dec=app_dec, + telescope_loc=telescope_loc, + coord_frame="fk4", telescope_frame=telescope_frame, coord_epoch=1950.0, ) @@ -1908,10 +2029,10 @@ def test_astrometry_icrs_to_app(astrometry_args, use_extra): for idx, name in enumerate(astrometry_list): coord_results[idx] = uvutils.transform_icrs_to_app( - astrometry_args["time_array"], - astrometry_args["icrs_ra"], - astrometry_args["icrs_dec"], - astrometry_args["telescope_loc"], + time_array=astrometry_args["time_array"], + ra=astrometry_args["icrs_ra"], + dec=astrometry_args["icrs_dec"], + telescope_loc=astrometry_args["telescope_loc"], epoch=astrometry_args["epoch"], astrometry_library=name, **kwargs, @@ -1957,10 +2078,10 @@ def test_astrometry_app_to_icrs(astrometry_args): # because the above pre-calculated values were generated using the ICRS # coordinate values coord_results[idx] = uvutils.transform_app_to_icrs( - astrometry_args["time_array"], - astrometry_args["icrs_ra"], - astrometry_args["icrs_dec"], - astrometry_args["telescope_loc"], + time_array=astrometry_args["time_array"], + app_ra=astrometry_args["icrs_ra"], + app_dec=astrometry_args["icrs_dec"], + telescope_loc=astrometry_args["telescope_loc"], astrometry_library=name, ) @@ -1982,18 +2103,18 @@ def test_sidereal_reptime(astrometry_args): """ gcrs_ra, gcrs_dec = uvutils.transform_sidereal_coords( - astrometry_args["icrs_ra"] * np.ones(2), - astrometry_args["icrs_dec"] * np.ones(2), - "icrs", - "gcrs", + longitude=astrometry_args["icrs_ra"] * np.ones(2), + latitude=astrometry_args["icrs_dec"] * np.ones(2), + in_coord_frame="icrs", + out_coord_frame="gcrs", time_array=Time(astrometry_args["time_array"][0], format="jd"), ) check_ra, check_dec = uvutils.transform_sidereal_coords( - astrometry_args["icrs_ra"] * np.ones(2), - astrometry_args["icrs_dec"] * np.ones(2), - "icrs", - "gcrs", + longitude=astrometry_args["icrs_ra"] * np.ones(2), + latitude=astrometry_args["icrs_dec"] * np.ones(2), + in_coord_frame="icrs", + out_coord_frame="gcrs", time_array=Time(astrometry_args["time_array"][0] * np.ones(2), format="jd"), ) @@ -2017,10 +2138,10 @@ def test_transform_icrs_to_app_time_obj(astrometry_args, telescope_frame): telescope_loc = astrometry_args["moon_telescope_loc"] check_ra, check_dec = uvutils.transform_icrs_to_app( - Time(astrometry_args["time_array"], format="jd"), - astrometry_args["icrs_ra"], - astrometry_args["icrs_dec"], - telescope_loc, + time_array=Time(astrometry_args["time_array"], format="jd"), + ra=astrometry_args["icrs_ra"], + dec=astrometry_args["icrs_dec"], + telescope_loc=telescope_loc, telescope_frame=telescope_frame, epoch=Time(astrometry_args["epoch"], format="jyear"), ) @@ -2041,17 +2162,17 @@ def test_transform_app_to_icrs_objs(astrometry_args): ) icrs_ra, icrs_dec = uvutils.transform_app_to_icrs( - astrometry_args["time_array"][0], - astrometry_args["app_ra"][0], - astrometry_args["app_dec"][0], - astrometry_args["telescope_loc"], + time_array=astrometry_args["time_array"][0], + app_ra=astrometry_args["app_ra"][0], + app_dec=astrometry_args["app_dec"][0], + telescope_loc=astrometry_args["telescope_loc"], ) check_ra, check_dec = uvutils.transform_app_to_icrs( - Time(astrometry_args["time_array"][0], format="jd"), - astrometry_args["app_ra"][0], - astrometry_args["app_dec"][0], - telescope_loc, + time_array=Time(astrometry_args["time_array"][0], format="jd"), + app_ra=astrometry_args["app_ra"][0], + app_dec=astrometry_args["app_dec"][0], + telescope_loc=telescope_loc, ) assert np.all(check_ra == icrs_ra) @@ -2080,16 +2201,16 @@ def test_calc_app_coords_objs(astrometry_args, telescope_frame): TimeClass = LTime app_ra, app_dec = uvutils.calc_app_coords( - astrometry_args["icrs_ra"], - astrometry_args["icrs_dec"], + lon_coord=astrometry_args["icrs_ra"], + lat_coord=astrometry_args["icrs_dec"], time_array=astrometry_args["time_array"][0], telescope_loc=astrometry_args["telescope_loc"], telescope_frame=telescope_frame, ) check_ra, check_dec = uvutils.calc_app_coords( - astrometry_args["icrs_ra"], - astrometry_args["icrs_dec"], + lon_coord=astrometry_args["icrs_ra"], + lat_coord=astrometry_args["icrs_dec"], time_array=TimeClass(astrometry_args["time_array"][0], format="jd"), telescope_loc=telescope_loc, ) @@ -2125,10 +2246,10 @@ def test_astrometry_lst(astrometry_args): # Note that the units aren't right here (missing a rad-> deg conversion), but # the above values were calculated using the arguments below. lst_results[idx] = uvutils.get_lst_for_time( - astrometry_args["time_array"], - astrometry_args["telescope_loc"][0], - astrometry_args["telescope_loc"][1], - astrometry_args["telescope_loc"][2], + jd_array=astrometry_args["time_array"], + latitude=astrometry_args["telescope_loc"][0], + longitude=astrometry_args["telescope_loc"][1], + altitude=astrometry_args["telescope_loc"][2], astrometry_library=name, ) @@ -2218,10 +2339,10 @@ def test_get_lst_for_time_errors(astrometry_args): "select either 'erfa' or 'astropy' for astrometry_library.", ): uvutils.get_lst_for_time( - np.array(astrometry_args["time_array"][0]), - astrometry_args["telescope_loc"][0] * (180.0 / np.pi), - astrometry_args["telescope_loc"][1] * (180.0 / np.pi), - astrometry_args["telescope_loc"][2], + jd_array=np.array(astrometry_args["time_array"][0]), + latitude=astrometry_args["telescope_loc"][0] * (180.0 / np.pi), + longitude=astrometry_args["telescope_loc"][1] * (180.0 / np.pi), + altitude=astrometry_args["telescope_loc"][2], astrometry_library="foo", ) @@ -2250,16 +2371,20 @@ def test_lst_for_time_moon(astrometry_args): match="The MCMF frame is only supported with the 'astropy' astrometry library", ): lst_array = uvutils.get_lst_for_time( - astrometry_args["time_array"], - lat, - lon, - alt, + jd_array=astrometry_args["time_array"], + latitude=lat, + longitude=lon, + altitude=alt, frame="mcmf", astrometry_library="novas", ) lst_array = uvutils.get_lst_for_time( - astrometry_args["time_array"], lat, lon, alt, frame="mcmf" + jd_array=astrometry_args["time_array"], + latitude=lat, + longitude=lon, + altitude=alt, + frame="mcmf", ) # Verify that lsts are close to local zenith RA @@ -2292,7 +2417,10 @@ def test_phasing_funcs(): ants_enu = np.array([-101.94, 156.41, 1.24]) ant_xyz_abs = uvutils.ECEF_from_ENU( - ants_enu, lat_lon_alt[0], lat_lon_alt[1], lat_lon_alt[2] + ants_enu, + latitude=lat_lon_alt[0], + longitude=lat_lon_alt[1], + altitude=lat_lon_alt[2], ) array_center_coord = SkyCoord( @@ -2318,14 +2446,9 @@ def test_phasing_funcs(): (gcrs_from_itrs_coord.cartesian - gcrs_array_center.cartesian).get_xyz().T ) - with uvtest.check_warnings( - DeprecationWarning, - match="This function supports the old phasing method and will be removed along " - "with the old phasing code in version 2.4", - ): - gcrs_uvw = uvutils.phase_uvw( - gcrs_coord.ra.rad, gcrs_coord.dec.rad, gcrs_rel.value - ) + gcrs_uvw = uvutils.old_uvw_calc( + gcrs_coord.ra.rad, gcrs_coord.dec.rad, gcrs_rel.value + ) mwa_tools_calcuvw_u = -97.122828 mwa_tools_calcuvw_v = 50.388281 @@ -2336,14 +2459,9 @@ def test_phasing_funcs(): assert np.allclose(gcrs_uvw[0, 2], mwa_tools_calcuvw_w, atol=1e-3) # also test unphasing - with uvtest.check_warnings( - DeprecationWarning, - match="This function supports the old phasing method and will be removed along " - "with the old phasing code in version 2.4", - ): - temp2 = uvutils.unphase_uvw( - gcrs_coord.ra.rad, gcrs_coord.dec.rad, np.squeeze(gcrs_uvw) - ) + temp2 = uvutils.undo_old_uvw_calc( + gcrs_coord.ra.rad, gcrs_coord.dec.rad, np.squeeze(gcrs_uvw) + ) assert np.allclose(gcrs_rel.value, temp2) @@ -2627,17 +2745,14 @@ def test_redundancy_finder(): # Check with conjugated baseline redundancies returned # Ensure at least one baseline has u==0 and v!=0 (for coverage of this case) bl_positions[16, 0] = 0 - with uvtest.check_warnings( - DeprecationWarning, "The with_conjugates keyword is deprecated" - ): - ( - baseline_groups, - vec_bin_centers, - lens, - conjugates, - ) = uvutils.get_baseline_redundancies( - uvd.baseline_array, bl_positions, tol=tol, with_conjugates=True - ) + ( + baseline_groups, + vec_bin_centers, + lens, + conjugates, + ) = uvutils.get_baseline_redundancies( + uvd.baseline_array, bl_positions, tol=tol, include_conjugates=True + ) # restore baseline (16,0) and repeat to get correct groups bl_positions = bl_pos_backup @@ -4167,7 +4282,12 @@ def gettimebls(blt_order): # time, bl TIME, ANT1, ANT2, BL = gettimebls(blt_order) order = uvutils.determine_blt_order( - TIME, ANT1, ANT2, BL, Nbls=nant**2, Ntimes=ntime + time_array=TIME, + ant_1_array=ANT1, + ant_2_array=ANT2, + baseline_array=BL, + Nbls=nant**2, + Ntimes=ntime, ) if isinstance(blt_order, list): assert order is None @@ -4177,7 +4297,7 @@ def gettimebls(blt_order): assert order is None is_rect, time_first = uvutils.determine_rectangularity( - TIME, BL, nbls=nant**2, ntimes=ntime + time_array=TIME, baseline_array=BL, nbls=nant**2, ntimes=ntime ) if blt_order in [("ant1", "time"), ("ant2", "time")]: # sorting by ant1/ant2 then time means we split the other ant into a @@ -4202,9 +4322,18 @@ def test_determine_blt_order_size_1(): ant2 = np.array([1]) bl = uvutils.antnums_to_baseline(ant1, ant2, 2) - order = uvutils.determine_blt_order(times, ant1, ant2, bl, Nbls=1, Ntimes=1) + order = uvutils.determine_blt_order( + time_array=times, + ant_1_array=ant1, + ant_2_array=ant2, + baseline_array=bl, + Nbls=1, + Ntimes=1, + ) assert order == ("baseline", "time") - is_rect, time_first = uvutils.determine_rectangularity(times, bl, nbls=1, ntimes=1) + is_rect, time_first = uvutils.determine_rectangularity( + time_array=times, baseline_array=bl, nbls=1, ntimes=1 + ) assert is_rect assert time_first @@ -4221,19 +4350,25 @@ def test_determine_rect_time_first(): TIME = np.tile(times, len(bls)) BL = np.concatenate([rng.permuted(bls) for i in range(len(times))]) - is_rect, time_first = uvutils.determine_rectangularity(TIME, BL, nbls=9, ntimes=10) + is_rect, time_first = uvutils.determine_rectangularity( + time_array=TIME, baseline_array=BL, nbls=9, ntimes=10 + ) assert not is_rect # now, permute time instead of bls TIME = np.concatenate([rng.permuted(times) for i in range(len(bls))]) BL = np.tile(bls, len(times)) - is_rect, time_first = uvutils.determine_rectangularity(TIME, BL, nbls=9, ntimes=10) + is_rect, time_first = uvutils.determine_rectangularity( + time_array=TIME, baseline_array=BL, nbls=9, ntimes=10 + ) assert not is_rect TIME = np.array([1000.0, 1000.0, 2000.0, 1000.0]) BLS = np.array([0, 0, 1, 0]) - is_rect, time_first = uvutils.determine_rectangularity(TIME, BLS, nbls=2, ntimes=2) + is_rect, time_first = uvutils.determine_rectangularity( + time_array=TIME, baseline_array=BLS, nbls=2, ntimes=2 + ) assert not is_rect @@ -4259,11 +4394,17 @@ def test_calc_app_coords_time_obj(): ra = zenith_coord.ra.to_value("rad") dec = zenith_coord.dec.to_value("rad") app_ra_to, app_dec_to = uvutils.calc_app_coords( - ra, dec, time_array=obstime, telescope_loc=telescope_location + lon_coord=ra, + lat_coord=dec, + time_array=obstime, + telescope_loc=telescope_location, ) app_ra_nto, app_dec_nto = uvutils.calc_app_coords( - ra, dec, time_array=obstime.utc.jd, telescope_loc=telescope_location + lon_coord=ra, + lat_coord=dec, + time_array=obstime.utc.jd, + telescope_loc=telescope_location, ) assert np.allclose(app_ra_to, app_ra_nto) diff --git a/pyuvdata/utils.py b/pyuvdata/utils.py index 204362eddd..170e04a8c5 100644 --- a/pyuvdata/utils.py +++ b/pyuvdata/utils.py @@ -385,7 +385,7 @@ def _test_array_constant_spacing(array, *, tols=None): return _test_array_constant(array_diff, tols=tols) -def _check_flex_spw_contiguous(spw_array, flex_spw_id_array): +def _check_flex_spw_contiguous(*, spw_array, flex_spw_id_array): """ Check if the spectral windows are contiguous for flex_spw datasets. @@ -486,7 +486,9 @@ def _check_freq_spacing( elif flex_spw: # Check to make sure that the flexible spectral window has indicies set up # correctly (grouped together) for this check - _check_flex_spw_contiguous(spw_array, flex_spw_id_array) + _check_flex_spw_contiguous( + spw_array=spw_array, flex_spw_id_array=flex_spw_id_array + ) diff_chanwidth = np.diff(channel_width) freq_dir = [] # We want to grab unique spw IDs, in the order that they appear in the data @@ -525,10 +527,10 @@ def _check_freq_spacing( chanwidth_error = True else: freq_dir = np.sign(np.mean(freq_spacing)) - if not _test_array_constant(freq_spacing, freq_tols): + if not _test_array_constant(freq_spacing, tols=freq_tols): spacing_error = True if future_array_shapes: - if not _test_array_constant(channel_width, freq_tols): + if not _test_array_constant(channel_width, tols=freq_tols): spacing_error = True else: if not np.isclose( @@ -1314,7 +1316,7 @@ def LatLonAlt_from_XYZ(xyz, *, frame="ITRS", check_acceptability=True): return lla[0], lla[1], lla[2] -def XYZ_from_LatLonAlt(latitude, longitude, altitude, frame="ITRS"): +def XYZ_from_LatLonAlt(latitude, longitude, altitude, *, frame="ITRS"): """ Calculate ECEF x,y,z from lat/lon/alt values. @@ -1592,9 +1594,8 @@ def old_uvw_calc(ra, dec, initial_uvw): This method should not be used and is only retained for testing the undo_old_uvw_calc method, which is needed for fixing phases. - This code expects input uvws or positions relative to the telescope - location in the same frame that ra/dec are in (e.g. icrs or gcrs) and - returns phased ones in the same frame. + This code expects input uvws relative to the telescope location in the same frame + that ra/dec are in (e.g. icrs or gcrs) and returns phased ones in the same frame. Parameters ---------- @@ -1746,7 +1747,7 @@ def cart3_to_polar2(xyz_array): return lon_array, lat_array -def _rotate_matmul_wrapper(xyz_array, rot_matrix, n_rot): +def _rotate_matmul_wrapper(*, xyz_array, rot_matrix, n_rot): """ Apply a rotation matrix to a series of vectors. @@ -1791,7 +1792,7 @@ def _rotate_matmul_wrapper(xyz_array, rot_matrix, n_rot): return rotated_xyz -def _rotate_one_axis(xyz_array, rot_amount, rot_axis): +def _rotate_one_axis(*, xyz_array, rot_amount, rot_axis): """ Rotate an array of 3D positions around the a single axis (x, y, or z). @@ -1864,15 +1865,19 @@ def _rotate_one_axis(xyz_array, rot_amount, rot_axis): # else is done. return np.transpose( _rotate_matmul_wrapper( - np.transpose(xyz_array, axes=[2, 1, 0]), rot_matrix, n_rot + xyz_array=np.transpose(xyz_array, axes=[2, 1, 0]), + rot_matrix=rot_matrix, + n_rot=n_rot, ), axes=[2, 1, 0], ) else: - return _rotate_matmul_wrapper(xyz_array, rot_matrix, n_rot) + return _rotate_matmul_wrapper( + xyz_array=xyz_array, rot_matrix=rot_matrix, n_rot=n_rot + ) -def _rotate_two_axis(xyz_array, rot_amount1, rot_amount2, rot_axis1, rot_axis2): +def _rotate_two_axis(*, xyz_array, rot_amount1, rot_amount2, rot_axis1, rot_axis2): """ Rotate an array of 3D positions sequentially around a pair of axes (x, y, or z). @@ -1920,14 +1925,22 @@ def _rotate_two_axis(xyz_array, rot_amount1, rot_amount2, rot_axis1, rot_axis2): return deepcopy(xyz_array) elif no_rot1: # If rot_amount1 is None, then ignore it and just work w/ the 2nd rotation - return _rotate_one_axis(xyz_array, rot_amount2, rot_axis2) + return _rotate_one_axis( + xyz_array=xyz_array, rot_amount=rot_amount2, rot_axis=rot_axis2 + ) elif no_rot2: # If rot_amount2 is None, then ignore it and just work w/ the 1st rotation - return _rotate_one_axis(xyz_array, rot_amount1, rot_axis1) + return _rotate_one_axis( + xyz_array=xyz_array, rot_amount=rot_amount1, rot_axis=rot_axis1 + ) elif rot_axis1 == rot_axis2: # Capture the case where someone wants to do a sequence of rotations on the same # axis. Also known as just rotating a single axis. - return _rotate_one_axis(xyz_array, rot_amount1 + rot_amount2, rot_axis1) + return _rotate_one_axis( + xyz_array=xyz_array, + rot_amount=rot_amount1 + rot_amount2, + rot_axis=rot_axis1, + ) # Figure out how many individual rotation matricies we need, accounting for the # fact that these can either be floats or ndarrays. @@ -2002,13 +2015,17 @@ def _rotate_two_axis(xyz_array, rot_amount1, rot_amount2, rot_axis1, rot_axis2): # swap the n_vector and n_rot axes, and then swap them back once everything # else is done. return np.transpose( - _rotate_matmul_wrapper( - np.transpose(xyz_array, axes=[2, 1, 0]), rot_matrix, n_rot + _rotate_matmul_wrapper( # xyz_array, rot_matrix, n_rot + xyz_array=np.transpose(xyz_array, axes=[2, 1, 0]), + rot_matrix=rot_matrix, + n_rot=n_rot, ), axes=[2, 1, 0], ) else: - return _rotate_matmul_wrapper(xyz_array, rot_matrix, n_rot) + return _rotate_matmul_wrapper( + xyz_array=xyz_array, rot_matrix=rot_matrix, n_rot=n_rot + ) def calc_uvw( @@ -2197,9 +2214,15 @@ def calc_uvw( ant_rot_vectors = np.reshape( np.transpose( _rotate_one_axis( - _rotate_two_axis(ant_vectors, unique_gha, unique_dec, 2, 1), - unique_pa, - 0, + xyz_array=_rotate_two_axis( + xyz_array=ant_vectors, + rot_amount1=unique_gha, + rot_amount2=unique_dec, + rot_axis1=2, + rot_axis2=1, + ), + rot_amount=unique_pa, + rot_axis=0, ), axes=[0, 2, 1], ), @@ -2265,30 +2288,34 @@ def calc_uvw( # up on the map). This is a much easier transform to handle. if np.all(gha_delta_array == 0.0) and np.all(old_app_dec == app_dec): new_coords = _rotate_one_axis( - uvw_array[:, [2, 0, 1], np.newaxis], - frame_pa - (0.0 if old_frame_pa is None else old_frame_pa), - 0, + xyz_array=uvw_array[:, [2, 0, 1], np.newaxis], + rot_amount=frame_pa - (0.0 if old_frame_pa is None else old_frame_pa), + rot_axis=0, )[:, :, 0] else: new_coords = _rotate_two_axis( - _rotate_two_axis( # Yo dawg, I heard you like rotation matricies... - uvw_array[:, [2, 0, 1], np.newaxis], - 0.0 if (from_enu or old_frame_pa is None) else (-old_frame_pa), - (-telescope_lat) if from_enu else (-old_app_dec), - 0, - 1, + xyz_array=_rotate_two_axis( + xyz_array=uvw_array[:, [2, 0, 1], np.newaxis], + rot_amount1=0.0 + if (from_enu or old_frame_pa is None) + else (-old_frame_pa), + rot_amount2=(-telescope_lat) if from_enu else (-old_app_dec), + rot_axis1=0, + rot_axis2=1, ), - gha_delta_array, - telescope_lat if to_enu else app_dec, - 2, - 1, + rot_amount1=gha_delta_array, + rot_amount2=telescope_lat if to_enu else app_dec, + rot_axis1=2, + rot_axis2=1, ) # One final rotation applied here, to compensate for the fact that we want # the Dec-axis of our image (Fourier dual to the v-axis) to be aligned with # the chosen frame, if we not in ENU coordinates if not to_enu: - new_coords = _rotate_one_axis(new_coords, frame_pa, 0) + new_coords = _rotate_one_axis( + xyz_array=new_coords, rot_amount=frame_pa, rot_axis=0 + ) # Finally drop the now-vestigal last axis of the array new_coords = new_coords[:, :, 0] @@ -2301,8 +2328,8 @@ def calc_uvw( def transform_sidereal_coords( *, - lon, - lat, + longitude, + latitude, in_coord_frame, out_coord_frame, in_coord_epoch=None, @@ -2359,8 +2386,8 @@ def transform_sidereal_coords( Latidudinal coordinates, in units of radians. Output will be an ndarray if any inputs were, with shape (Ncoords,) or (Ntimes,), depending on inputs. """ - lon_coord = lon * units.rad - lat_coord = lat * units.rad + lon_coord = longitude * units.rad + lat_coord = latitude * units.rad # Check here to make sure that lat_coord and lon_coord are the same length, # either 1 or len(time_array) @@ -2550,7 +2577,6 @@ def transform_icrs_to_app( "Requested coordinate transformation library is not supported, please " "select either 'erfa', 'novas', or 'astropy' for astrometry_library." ) - ra_coord = ra * units.rad dec_coord = dec * units.rad @@ -2879,6 +2905,7 @@ def transform_icrs_to_app( def transform_app_to_icrs( + *, time_array, app_ra, app_dec, @@ -3076,7 +3103,7 @@ def transform_app_to_icrs( return icrs_ra, icrs_dec -def calc_parallactic_angle(app_ra, app_dec, lst_array, telescope_lat): +def calc_parallactic_angle(*, app_ra, app_dec, lst_array, telescope_lat): """ Calculate the parallactic angle between RA/Dec and the AltAz frame. @@ -3098,6 +3125,7 @@ def calc_parallactic_angle(app_ra, app_dec, lst_array, telescope_lat): def calc_frame_pos_angle( + *, time_array, app_ra, app_dec, @@ -3226,6 +3254,7 @@ def calc_frame_pos_angle( def lookup_jplhorizons( target_name, time_array, + *, telescope_loc=None, high_cadence=False, force_indv_lookup=None, @@ -3429,7 +3458,7 @@ def lookup_jplhorizons( def interpolate_ephem( - time_array, ephem_times, ephem_ra, ephem_dec, ephem_dist=None, ephem_vel=None + *, time_array, ephem_times, ephem_ra, ephem_dec, ephem_dist=None, ephem_vel=None ): """ Interpolates ephemerides to give positions for requested times. @@ -3559,6 +3588,7 @@ def interpolate_ephem( def calc_app_coords( + *, lon_coord, lat_coord, coord_frame="icrs", @@ -3694,8 +3724,8 @@ def calc_app_coords( # If the coordinates are not in the ICRS frame, go ahead and transform them now if coord_frame != "icrs": icrs_ra, icrs_dec = transform_sidereal_coords( - lon=lon_coord, - lat=lat_coord, + longitude=lon_coord, + latitude=lat_coord, in_coord_frame=coord_frame, out_coord_frame="icrs", in_coord_epoch=coord_epoch, @@ -3727,12 +3757,15 @@ def calc_app_coords( unique_app_dec = unique_app_dec + np.zeros_like(unique_app_ra) elif coord_type == "ephem": interp_ra, interp_dec, _, _ = interpolate_ephem( - unique_time_array, coord_times, lon_coord, lat_coord + time_array=unique_time_array, + ephem_times=coord_times, + ephem_ra=lon_coord, + ephem_dec=lat_coord, ) if coord_frame != "icrs": icrs_ra, icrs_dec = transform_sidereal_coords( - lon=interp_ra, - lat=interp_dec, + longitude=interp_ra, + latitude=interp_dec, in_coord_frame=coord_frame, out_coord_frame="icrs", in_coord_epoch=coord_epoch, @@ -3839,15 +3872,19 @@ def calc_sidereal_coords( epoch = Time(coord_epoch, format="jyear") icrs_ra, icrs_dec = transform_app_to_icrs( - time_array, app_ra, app_dec, telescope_loc, telescope_frame + time_array=time_array, + app_ra=app_ra, + app_dec=app_dec, + telescope_loc=telescope_loc, + telescope_frame=telescope_frame, ) if coord_frame == "icrs": ref_ra, ref_dec = (icrs_ra, icrs_dec) else: ref_ra, ref_dec = transform_sidereal_coords( - lon=icrs_ra, - lat=icrs_dec, + longitude=icrs_ra, + latitude=icrs_dec, in_coord_frame="icrs", out_coord_frame=coord_frame, out_coord_epoch=epoch, @@ -3858,8 +3895,8 @@ def calc_sidereal_coords( def get_lst_for_time( - *, jd_array=None, + *, latitude=None, longitude=None, altitude=None, @@ -4258,7 +4295,12 @@ def uvw_track_generator( ) frame_pa = calc_frame_pos_angle( - time_array, app_ra, app_dec, site_loc, coord_frame, ref_epoch=coord_epoch + time_array=time_array, + app_ra=app_ra, + app_dec=app_dec, + telescope_loc=site_loc, + ref_frame=coord_frame, + ref_epoch=coord_epoch, ) uvws = calc_uvw( @@ -4346,7 +4388,7 @@ def _find_cliques(adj, strict=False): return loc_gps -def find_clusters(location_ids, location_vectors, tol, strict=False): +def find_clusters(*, location_ids, location_vectors, tol, strict=False): """ Find clusters of vectors (e.g. redundant baselines, times). @@ -4436,7 +4478,9 @@ def get_baseline_redundancies( return bl_gps, vec_bin_centers, lens, baseline_ind_conj try: - bl_gps = find_clusters(baselines, baseline_vecs, tol, strict=True) + bl_gps = find_clusters( + location_ids=baselines, location_vectors=baseline_vecs, tol=tol, strict=True + ) except ValueError as exc: raise ValueError( "Some baselines are falling into multiple" @@ -5124,16 +5168,18 @@ def uvcalibrate( except KeyError: uvcal_ant2_num = None - uvcal_key1 = (uvcal_ant1_num, feed1) - uvcal_key2 = (uvcal_ant2_num, feed2) if (uvcal_ant1_num is None or uvcal_ant2_num is None) or not ( - uvcal_use._has_key(*uvcal_key1) and uvcal_use._has_key(*uvcal_key2) + uvcal_use._key_exists(antnum=uvcal_ant1_num, jpol=feed1) + and uvcal_use._key_exists(antnum=uvcal_ant2_num, jpol=feed2) ): if uvdata.future_array_shapes: uvdata.flag_array[blt_inds, :, pol_ind] = True else: uvdata.flag_array[blt_inds, 0, :, pol_ind] = True continue + + uvcal_key1 = (uvcal_ant1_num, feed1) + uvcal_key2 = (uvcal_ant2_num, feed2) if flip_gain_conj: gain = ( np.conj(uvcal_use.get_gains(uvcal_key1)) @@ -5646,7 +5692,7 @@ def _get_dset_shape(dset, indices): return dset_shape, indices -def _convert_to_slices(indices, max_nslice_frac=0.1): +def _convert_to_slices(indices, *, max_nslice_frac=0.1): """ Convert list of indices to a list of slices. diff --git a/pyuvdata/uvcal/tests/test_uvcal.py b/pyuvdata/uvcal/tests/test_uvcal.py index 083529c245..80c78219a9 100644 --- a/pyuvdata/uvcal/tests/test_uvcal.py +++ b/pyuvdata/uvcal/tests/test_uvcal.py @@ -3930,11 +3930,11 @@ def test_uvcal_get_methods(future_shapes, gain_data): np.testing.assert_array_almost_equal(gain_arr, gain_arr2) # check has_key - assert uvc._has_key(antnum=10) - assert uvc._has_key(jpol="Jee") - assert uvc._has_key(antnum=10, jpol="Jee") - assert not uvc._has_key(antnum=10, jpol="Jnn") - assert not uvc._has_key(antnum=101, jpol="Jee") + assert uvc._key_exists(antnum=10) + assert uvc._key_exists(jpol="Jee") + assert uvc._key_exists(antnum=10, jpol="Jee") + assert not uvc._key_exists(antnum=10, jpol="Jnn") + assert not uvc._key_exists(antnum=101, jpol="Jee") # test exceptions pytest.raises(ValueError, uvc.get_gains, 1) diff --git a/pyuvdata/uvcal/uvcal.py b/pyuvdata/uvcal/uvcal.py index 80457590cd..21b9b506ec 100644 --- a/pyuvdata/uvcal/uvcal.py +++ b/pyuvdata/uvcal/uvcal.py @@ -1262,7 +1262,9 @@ def _check_flex_spw_contiguous(self): """ if self.flex_spw: - uvutils._check_flex_spw_contiguous(self.spw_array, self.flex_spw_id_array) + uvutils._check_flex_spw_contiguous( + spw_array=self.spw_array, flex_spw_id_array=self.flex_spw_id_array + ) def _check_freq_spacing(self, *, raise_errors=True): """ @@ -1501,7 +1503,7 @@ def copy(self, *, metadata_only=False): return uv - def _has_key(self, *, antnum=None, jpol=None): + def _key_exists(self, *, antnum=None, jpol=None): """ Check if this UVCal has the requested antenna or polarization. @@ -1543,7 +1545,7 @@ def ant2ind(self, antnum): int Antenna index in data arrays. """ - if not self._has_key(antnum=antnum): + if not self._key_exists(antnum=antnum): raise ValueError("{} not found in ant_array".format(antnum)) return np.argmin(np.abs(self.ant_array - antnum)) @@ -1565,7 +1567,7 @@ def jpol2ind(self, jpol): if isinstance(jpol, (str, np.str_)): jpol = uvutils.jstr2num(jpol, x_orientation=self.x_orientation) - if not self._has_key(jpol=jpol): + if not self._key_exists(jpol=jpol): raise ValueError("{} not found in jones_array".format(jpol)) return np.argmin(np.abs(self.jones_array - jpol)) diff --git a/pyuvdata/uvdata/uvdata.py b/pyuvdata/uvdata/uvdata.py index 9da33afac1..fccf5847cf 100644 --- a/pyuvdata/uvdata/uvdata.py +++ b/pyuvdata/uvdata/uvdata.py @@ -2463,7 +2463,6 @@ def _set_lsts_helper(self, astrometry_library=None): latitude=latitude, longitude=longitude, altitude=altitude, - astrometry_lib=astrometry_library, frame=self._telescope_location.frame, astrometry_library=astrometry_library, ) @@ -2505,9 +2504,9 @@ def _set_app_coords_helper(self, *, pa_only=False): dist = temp_dict.get("cat_dist") app_ra[select_mask], app_dec[select_mask] = uvutils.calc_app_coords( - lon_val, - lat_val, - frame, + lon_coord=lon_val, + lat_coord=lat_val, + coord_frame=frame, coord_epoch=epoch, coord_times=time_val, pm_ra=pm_ra, @@ -2532,11 +2531,11 @@ def _set_app_coords_helper(self, *, pa_only=False): epoch = temp_dict.get("cat_epoch") if not frame == "altaz": frame_pa[select_mask] = uvutils.calc_frame_pos_angle( - self.time_array[select_mask], - app_ra[select_mask], - app_dec[select_mask], - self.telescope_location_lat_lon_alt, - frame, + time_array=self.time_array[select_mask], + app_ra=app_ra[select_mask], + app_dec=app_dec[select_mask], + telescope_loc=self.telescope_location_lat_lon_alt, + ref_frame=frame, ref_epoch=epoch, ) self.phase_center_app_ra = app_ra @@ -2587,7 +2586,9 @@ def _check_flex_spw_contiguous(self): formats cannot, so we just consider it forbidden. """ if self.flex_spw: - uvutils._check_flex_spw_contiguous(self.spw_array, self.flex_spw_id_array) + uvutils._check_flex_spw_contiguous( + spw_array=self.spw_array, flex_spw_id_array=self.flex_spw_id_array + ) def _check_freq_spacing(self, *, raise_errors=True): """ @@ -4374,9 +4375,12 @@ def get_ENU_antpos(self, *, center=False, pick_data_ants=False): Antenna numbers matching ordering of antpos, shape=(Nants,) """ + latitude, longitude, altitude = self.telescope_location_lat_lon_alt antpos = uvutils.ENU_from_ECEF( (self.antenna_positions + self.telescope_location), - *self.telescope_location_lat_lon_alt, + latitude=latitude, + longitude=longitude, + altitude=altitude, frame=self._telescope_location.frame, ) ants = self.antenna_numbers @@ -4880,10 +4884,10 @@ def set_rectangularity(self, *, force: bool = False) -> None: return rect, time = uvutils.determine_rectangularity( - self.time_array, - self.baseline_array, - self.Nbls, - self.Ntimes, + time_array=self.time_array, + baseline_array=self.baseline_array, + nbls=self.Nbls, + ntimes=self.Ntimes, blt_order=self.blt_order, ) self.blts_are_rectangular = rect @@ -5311,7 +5315,7 @@ def remove_eq_coeffs(self): return - def _apply_w_proj(self, new_w_vals, old_w_vals, *, select_mask=None): + def _apply_w_proj(self, *, new_w_vals, old_w_vals, select_mask=None): """ Apply corrections based on changes to w-coord. @@ -5462,7 +5466,11 @@ def unproject_phase( to_enu=True, ) - self._apply_w_proj(0.0, self.uvw_array[select_mask_use, 2], select_mask_use) + self._apply_w_proj( + new_w_vals=0.0, + old_w_vals=self.uvw_array[select_mask_use, 2], + select_mask=select_mask_use, + ) self.uvw_array = new_uvw # remove/update phase center @@ -5890,11 +5898,11 @@ def phase( # Now calculate position angles. if not phase_frame == "altaz": new_frame_pa = uvutils.calc_frame_pos_angle( - time_array, - new_app_ra, - new_app_dec, - self.telescope_location_lat_lon_alt, - phase_frame, + time_array=time_array, + app_ra=new_app_ra, + app_dec=new_app_dec, + telescope_loc=self.telescope_location_lat_lon_alt, + ref_frame=phase_frame, ref_epoch=epoch, telescope_frame=self._telescope_location.frame, ) @@ -5941,7 +5949,9 @@ def phase( # Now its time to update the raw data. This will return empty if # metadata_only is set to True. new_w_vals = 0.0 if (cat_type == "unprojected") else new_uvw[:, 2] - self._apply_w_proj(new_w_vals, old_w_vals, select_mask=select_mask) + self._apply_w_proj( + new_w_vals=new_w_vals, old_w_vals=old_w_vals, select_mask=select_mask + ) # Finally, we now take it upon ourselves to update some metadata. What we # do here will depend a little bit on whether or not we have a selection @@ -6065,7 +6075,7 @@ def set_uvws_from_antenna_positions(self, *, update_vis=True): if update_vis: old_w_vals = self.uvw_array[:, 2].copy() old_w_vals[unprojected_blts] = 0.0 - self._apply_w_proj(new_uvw[:, 2], old_w_vals) + self._apply_w_proj(new_w_vals=new_uvw[:, 2], old_w_vals=old_w_vals) else: warnings.warn( "Recalculating uvw_array without adjusting visibility " diff --git a/pyuvdata/uvdata/uvfits.py b/pyuvdata/uvdata/uvfits.py index 24e314872f..b04bb5038d 100644 --- a/pyuvdata/uvdata/uvfits.py +++ b/pyuvdata/uvdata/uvfits.py @@ -223,20 +223,20 @@ def _get_data( """ # figure out what data to read in blt_inds, freq_inds, pol_inds, history_update_string = self._select_preprocess( - antenna_nums, - antenna_names, - ant_str, - bls, - frequencies, - freq_chans, - times, - time_range, - lsts, - lst_range, - polarizations, - blt_inds, - phase_center_ids, - catalog_names, + antenna_nums=antenna_nums, + antenna_names=antenna_names, + ant_str=ant_str, + bls=bls, + frequencies=frequencies, + freq_chans=freq_chans, + times=times, + time_range=time_range, + lsts=lsts, + lst_range=lst_range, + polarizations=polarizations, + blt_inds=blt_inds, + phase_center_ids=phase_center_ids, + catalog_names=catalog_names, ) if blt_inds is not None: diff --git a/pyuvdata/uvdata/uvh5.py b/pyuvdata/uvdata/uvh5.py index 7352a9c048..20bc630191 100644 --- a/pyuvdata/uvdata/uvh5.py +++ b/pyuvdata/uvdata/uvh5.py @@ -1146,20 +1146,20 @@ def _get_data( # figure out what data to read in blt_inds, freq_inds, pol_inds, history_update_string = self._select_preprocess( - antenna_nums, - antenna_names, - ant_str, - bls, - frequencies, - freq_chans, - times, - time_range, - lsts, - lst_range, - polarizations, - blt_inds, - phase_center_ids, - catalog_names, + antenna_nums=antenna_nums, + antenna_names=antenna_names, + ant_str=ant_str, + bls=bls, + frequencies=frequencies, + freq_chans=freq_chans, + times=times, + time_range=time_range, + lsts=lsts, + lst_range=lst_range, + polarizations=polarizations, + blt_inds=blt_inds, + phase_center_ids=phase_center_ids, + catalog_names=catalog_names, ) # figure out which axis is the most selective diff --git a/pyuvdata/uvflag/uvflag.py b/pyuvdata/uvflag/uvflag.py index d82f56621f..c2007a1d48 100644 --- a/pyuvdata/uvflag/uvflag.py +++ b/pyuvdata/uvflag/uvflag.py @@ -196,8 +196,8 @@ class UVFlag(UVBase): def __init__( self, - *, indata=None, + *, mode="metric", copy_flags=False, waterfall=False, @@ -964,7 +964,9 @@ def check(self, *, check_extra=True, run_check_acceptability=True): self._flex_spw_id_array.required = False # first run the basic check from UVBase - super().check(check_extra, run_check_acceptability) + super().check( + check_extra=check_extra, run_check_acceptability=run_check_acceptability + ) # Check internal consistency of numbers which don't explicitly correspond # to the shape of another array. From faf09ad9525fdbef5fd431f56bb8165018b21a3c Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Fri, 25 Aug 2023 10:08:46 -0700 Subject: [PATCH 03/12] params by names for beams --- pyuvdata/uvbeam/beamfits.py | 6 +++++- pyuvdata/uvbeam/cst_beam.py | 9 +++++++-- pyuvdata/uvbeam/mwa_beam.py | 26 +++++++++++++++++--------- pyuvdata/uvbeam/uvbeam.py | 18 +++++++++--------- 4 files changed, 38 insertions(+), 21 deletions(-) diff --git a/pyuvdata/uvbeam/beamfits.py b/pyuvdata/uvbeam/beamfits.py index e9ac142eb6..1465ecea33 100644 --- a/pyuvdata/uvbeam/beamfits.py +++ b/pyuvdata/uvbeam/beamfits.py @@ -61,6 +61,7 @@ class BeamFITS(UVBeam): def read_beamfits( self, filename, + *, use_future_array_shapes=False, run_check=True, check_extra=True, @@ -555,6 +556,7 @@ def read_beamfits( def write_beamfits( self, filename, + *, run_check=True, check_extra=True, run_check_acceptability=True, @@ -607,7 +609,9 @@ def write_beamfits( freq_spacing = self.freq_array[1:] - self.freq_array[:-1] else: freq_spacing = self.freq_array[0, 1:] - self.freq_array[0, :-1] - if not uvutils._test_array_constant(freq_spacing, self._freq_array.tols): + if not uvutils._test_array_constant( + freq_spacing, tols=self._freq_array.tols + ): raise ValueError( "The frequencies are not evenly spaced (probably " "because of a select operation). The beamfits format " diff --git a/pyuvdata/uvbeam/cst_beam.py b/pyuvdata/uvbeam/cst_beam.py index 59b27e0655..5799b6ec2b 100644 --- a/pyuvdata/uvbeam/cst_beam.py +++ b/pyuvdata/uvbeam/cst_beam.py @@ -54,6 +54,7 @@ def name2freq(self, fname): def read_cst_beam( self, filename, + *, beam_type="power", use_future_array_shapes=False, feed_pol="x", @@ -240,12 +241,16 @@ def read_cst_beam( theta_data = theta_data.reshape((theta_axis.size, phi_axis.size), order="F") phi_data = phi_data.reshape((theta_axis.size, phi_axis.size), order="F") - if not uvutils._test_array_constant_spacing(theta_axis, self._axis2_array.tols): + if not uvutils._test_array_constant_spacing( + theta_axis, tols=self._axis2_array.tols + ): raise ValueError( "Data does not appear to be regularly gridded in zenith angle" ) - if not uvutils._test_array_constant_spacing(phi_axis, self._axis1_array.tols): + if not uvutils._test_array_constant_spacing( + phi_axis, tols=self._axis1_array.tols + ): raise ValueError( "Data does not appear to be regularly gridded in azimuth angle" ) diff --git a/pyuvdata/uvbeam/mwa_beam.py b/pyuvdata/uvbeam/mwa_beam.py index 6753113404..8eed8f30db 100644 --- a/pyuvdata/uvbeam/mwa_beam.py +++ b/pyuvdata/uvbeam/mwa_beam.py @@ -267,6 +267,7 @@ def _read_metadata(self, h5filepath): def _get_beam_modes( self, + *, h5filepath, freqs_hz, pol_names, @@ -379,7 +380,7 @@ def _get_beam_modes( } return beam_modes - def _get_response(self, freqs_hz, pol_names, beam_modes, phi_arr, theta_arr): + def _get_response(self, *, freqs_hz, pol_names, beam_modes, phi_arr, theta_arr): """ Calculate full Jones matrix response (E-field) of beam on a regular az/za grid. @@ -479,6 +480,7 @@ def _get_response(self, freqs_hz, pol_names, beam_modes, phi_arr, theta_arr): def read_mwa_beam( self, h5filepath, + *, use_future_array_shapes=False, delays=None, amplitudes=None, @@ -604,13 +606,13 @@ def read_mwa_beam( freqs_use = freqs_hz beam_modes = self._get_beam_modes( - h5filepath, - freqs_hz, - pol_names, - dipole_names, - max_length, - delays, - amplitudes, + h5filepath=h5filepath, + freqs_hz=freqs_hz, + pol_names=pol_names, + dipole_names=dipole_names, + max_length=max_length, + delays=delays, + amplitudes=amplitudes, ) n_phi = np.floor(360 * pixels_per_deg) @@ -618,7 +620,13 @@ def read_mwa_beam( theta_arr = np.deg2rad(np.arange(0, n_theta) / pixels_per_deg) phi_arr = np.deg2rad(np.arange(0, n_phi) / pixels_per_deg) - jones = self._get_response(freqs_use, pol_names, beam_modes, phi_arr, theta_arr) + jones = self._get_response( + freqs_hz=freqs_use, + pol_names=pol_names, + beam_modes=beam_modes, + phi_arr=phi_arr, + theta_arr=theta_arr, + ) # work out zenith normalization # (MWA beams are peak normalized to 1 when pointed at zenith) diff --git a/pyuvdata/uvbeam/uvbeam.py b/pyuvdata/uvbeam/uvbeam.py index aea8caad34..5a40f19d49 100644 --- a/pyuvdata/uvbeam/uvbeam.py +++ b/pyuvdata/uvbeam/uvbeam.py @@ -1304,7 +1304,7 @@ def _stokes_matrix(self, pol_index): return pauli_mat - def _construct_mueller(self, jones, pol_index1, pol_index2): + def _construct_mueller(self, *, jones, pol_index1, pol_index2): """ Generate Mueller components. @@ -1426,11 +1426,11 @@ def efield_to_pstokes( for pol_i in range(len(pol_strings)): if beam_object.future_array_shapes: power_data[:, pol_i, fq_i, :] = self._construct_mueller( - jones, pol_i, pol_i + jones=jones, pol_index1=pol_i, pol_index2=pol_i ) else: power_data[:, :, pol_i, fq_i, :] = self._construct_mueller( - jones, pol_i, pol_i + jones=jones, pol_index1=pol_i, pol_index2=pol_i ) assert not np.any(np.iscomplex(power_data)), ( "The calculated pstokes beams are complex but should be real. This is a " @@ -2308,9 +2308,9 @@ def interp( extra_keyword_dict["check_azza_domain"] = check_azza_domain interp_arrays = getattr(self, interp_func)( - az_array_use, - za_array_use, - freq_array, + az_array=az_array_use, + za_array=za_array_use, + freq_array=freq_array, freq_interp_kind=kind_use, polarizations=polarizations, **extra_keyword_dict, @@ -3296,7 +3296,7 @@ def __add__( else: freq_arr_test = this.freq_array[0, :] if not uvutils._test_array_constant_spacing( - freq_arr_test, this._freq_array.tols + freq_arr_test, tols=this._freq_array.tols ): warnings.warn( "Combined frequencies are not evenly spaced. This will " @@ -3565,7 +3565,7 @@ def select( beam_object.freq_array[1:] - beam_object.freq_array[:-1] ) if not uvutils._test_array_constant( - freq_separation, beam_object._freq_array.tols + freq_separation, tols=beam_object._freq_array.tols ): warnings.warn( "Selected frequencies are not evenly spaced. This " @@ -3592,7 +3592,7 @@ def select( beam_object.freq_array[0, 1:] - beam_object.freq_array[0, :-1] ) if not uvutils._test_array_constant( - freq_separation, beam_object._freq_array.tols + freq_separation, tols=beam_object._freq_array.tols ): warnings.warn( "Selected frequencies are not evenly spaced. This " From e941258288e47bccdcee073909d7a6a7749817e8 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Fri, 25 Aug 2023 10:26:36 -0700 Subject: [PATCH 04/12] params by names for cal --- pyuvdata/uvcal/calfits.py | 2 + pyuvdata/uvcal/fhd_cal.py | 1 + pyuvdata/uvcal/tests/test_fhd_cal.py | 8 +- pyuvdata/uvcal/tests/test_uvcal.py | 137 ++++++++++++++++----------- pyuvdata/uvcal/uvcal.py | 26 ++--- 5 files changed, 102 insertions(+), 72 deletions(-) diff --git a/pyuvdata/uvcal/calfits.py b/pyuvdata/uvcal/calfits.py index efb392456c..8d4bde95bf 100644 --- a/pyuvdata/uvcal/calfits.py +++ b/pyuvdata/uvcal/calfits.py @@ -28,6 +28,7 @@ class CALFITS(UVCal): def write_calfits( self, filename, + *, run_check=True, check_extra=True, run_check_acceptability=True, @@ -525,6 +526,7 @@ def write_calfits( def read_calfits( self, filename, + *, read_data=True, background_lsts=True, run_check=True, diff --git a/pyuvdata/uvcal/fhd_cal.py b/pyuvdata/uvcal/fhd_cal.py index cefefa48b1..10ea8821d1 100644 --- a/pyuvdata/uvcal/fhd_cal.py +++ b/pyuvdata/uvcal/fhd_cal.py @@ -30,6 +30,7 @@ class FHDCal(UVCal): @copy_replace_short_description(UVCal.read_fhd_cal, style=DocstringStyle.NUMPYDOC) def read_fhd_cal( self, + *, cal_file, obs_file, layout_file=None, diff --git a/pyuvdata/uvcal/tests/test_fhd_cal.py b/pyuvdata/uvcal/tests/test_fhd_cal.py index 4d77da2964..d4e7dd03ab 100644 --- a/pyuvdata/uvcal/tests/test_fhd_cal.py +++ b/pyuvdata/uvcal/tests/test_fhd_cal.py @@ -308,8 +308,8 @@ def test_break_read_fhdcal(cal_file, obs_file, layout_file, settings_file, nfile with uvtest.check_warnings(warning_list, match=message_list): fhd_cal.read_fhd_cal( - cal_file, - obs_file, + cal_file=cal_file, + obs_file=obs_file, settings_file=settings_file, use_future_array_shapes=True, ) @@ -324,8 +324,8 @@ def test_break_read_fhdcal(cal_file, obs_file, layout_file, settings_file, nfile "an error in version 2.5", ): fhd_cal.read_fhd_cal( - cal_file, - obs_file, + cal_file=cal_file, + obs_file=obs_file, layout_file=layout_file, read_data=False, use_future_array_shapes=True, diff --git a/pyuvdata/uvcal/tests/test_uvcal.py b/pyuvdata/uvcal/tests/test_uvcal.py index 80c78219a9..ca60680eb8 100644 --- a/pyuvdata/uvcal/tests/test_uvcal.py +++ b/pyuvdata/uvcal/tests/test_uvcal.py @@ -1891,12 +1891,12 @@ def test_reorder_ants( ant_num_diff = np.diff(calobj2.ant_array) assert np.all(ant_num_diff > 0) - calobj2.reorder_antennas("-number") + calobj2.reorder_antennas(order="-number") ant_num_diff = np.diff(calobj2.ant_array) assert np.all(ant_num_diff < 0) sorted_names = np.sort(calobj.antenna_names) - calobj.reorder_antennas("name") + calobj.reorder_antennas(order="name") temp = np.asarray(calobj.antenna_names) dtype_use = temp.dtype name_array = np.zeros_like(calobj.ant_array, dtype=dtype_use) @@ -1908,10 +1908,10 @@ def test_reorder_ants( assert np.all(sorted_names == name_array) # test sorting with an integer array. First resort back to by number - calobj2.reorder_antennas("number") + calobj2.reorder_antennas(order="number") sorted_nums = [int(name[3:]) for name in sorted_names] index_array = [np.nonzero(calobj2.ant_array == ant)[0][0] for ant in sorted_nums] - calobj2.reorder_antennas(index_array) + calobj2.reorder_antennas(order=index_array) assert calobj2 == calobj @@ -2206,11 +2206,11 @@ def test_reorder_jones( calobj = calobj2.copy() # this is a no-op because it's already sorted this way - calobj2.reorder_jones("-number") + calobj2.reorder_jones(order="-number") jnum_diff = np.diff(calobj2.jones_array) assert np.all(jnum_diff < 0) - calobj2.reorder_jones("number") + calobj2.reorder_jones(order="number") jnum_diff = np.diff(calobj2.jones_array) assert np.all(jnum_diff > 0) @@ -2234,7 +2234,7 @@ def test_reorder_jones( # test sorting with an index array. Sort back to number first so indexing works sorted_nums = uvutils.jstr2num(sorted_names, x_orientation=calobj.x_orientation) index_array = [np.nonzero(calobj.jones_array == num)[0][0] for num in sorted_nums] - calobj.reorder_jones(index_array) + calobj.reorder_jones(order=index_array) assert calobj2 == calobj @@ -2325,14 +2325,14 @@ def test_add_different_sorting( cal2 = calobj.select(jones=np.array([-6, -8]), inplace=False) if sort_type == "ant": - cal1.reorder_antennas("number") - cal2.reorder_antennas("-number") - calobj.reorder_antennas("name") + cal1.reorder_antennas(order="number") + cal2.reorder_antennas(order="-number") + calobj.reorder_antennas(order="name") order_check = cal1._ant_array == cal2._ant_array elif sort_type == "time": - cal1.reorder_times("time") - cal2.reorder_times("-time") - calobj.reorder_times("time") + cal1.reorder_times(order="time") + cal2.reorder_times(order="-time") + calobj.reorder_times(order="time") order_check = cal1._time_array == cal2._time_array elif sort_type == "freq": if wide_band: @@ -2346,9 +2346,9 @@ def test_add_different_sorting( calobj.reorder_freqs(channel_order="freq") order_check = cal1._freq_array == cal2._freq_array elif sort_type == "jones": - cal1.reorder_jones("name") - cal2.reorder_jones("-number") - calobj.reorder_jones("number") + cal1.reorder_jones(order="name") + cal2.reorder_jones(order="-number") + calobj.reorder_jones(order="number") order_check = cal1._jones_array == cal2._jones_array # Make sure that the order has actually been scrambled @@ -2359,11 +2359,11 @@ def test_add_different_sorting( cal4 = cal2 + cal1 if sort_type == "ant": - cal3.reorder_antennas("name") - cal4.reorder_antennas("name") + cal3.reorder_antennas(order="name") + cal4.reorder_antennas(order="name") elif sort_type == "time": - cal3.reorder_times("time") - cal4.reorder_times("time") + cal3.reorder_times(order="time") + cal4.reorder_times(order="time") elif sort_type == "freq": if wide_band: cal3.reorder_freqs() @@ -2372,8 +2372,8 @@ def test_add_different_sorting( cal3.reorder_freqs(channel_order="freq") cal4.reorder_freqs(channel_order="freq") elif sort_type == "jones": - cal3.reorder_jones("number") - cal4.reorder_jones("number") + cal3.reorder_jones(order="number") + cal4.reorder_jones(order="number") # Deal with the history separately, since it will be different assert str.startswith(cal3.history, calobj.history) @@ -3918,15 +3918,15 @@ def test_uvcal_get_methods(future_shapes, gain_data): np.testing.assert_array_almost_equal(gain_arr, expected_array) # test variable key input - gain_arr2 = uvc.get_gains(*key) + gain_arr2 = uvc.get_gains(key[0], jpol=key[1]) np.testing.assert_array_almost_equal(gain_arr, gain_arr2) gain_arr2 = uvc.get_gains(key[0]) np.testing.assert_array_almost_equal(gain_arr, gain_arr2) gain_arr2 = uvc.get_gains(key[:1]) np.testing.assert_array_almost_equal(gain_arr, gain_arr2) - gain_arr2 = uvc.get_gains(10, -5) + gain_arr2 = uvc.get_gains(10, jpol=-5) np.testing.assert_array_almost_equal(gain_arr, gain_arr2) - gain_arr2 = uvc.get_gains(10, "x") + gain_arr2 = uvc.get_gains(10, jpol="x") np.testing.assert_array_almost_equal(gain_arr, gain_arr2) # check has_key @@ -3937,10 +3937,15 @@ def test_uvcal_get_methods(future_shapes, gain_data): assert not uvc._key_exists(antnum=101, jpol="Jee") # test exceptions - pytest.raises(ValueError, uvc.get_gains, 1) - pytest.raises(ValueError, uvc.get_gains, (10, "Jnn")) + with pytest.raises(ValueError, match="1 not found in ant_array"): + uvc.get_gains(1) + with pytest.raises(ValueError, match="-6 not found in jones_array"): + uvc.get_gains((10, "Jnn")) uvc.cal_type = "delay" - pytest.raises(ValueError, uvc.get_gains, 10) + with pytest.raises( + ValueError, match=re.escape("cal_type must be 'gain' for get_gains() method") + ): + uvc.get_gains(10) @pytest.mark.filterwarnings("ignore:This method will be removed in version 3.0 when") @@ -4195,7 +4200,10 @@ def test_init_from_uvdata( uvc2 = uvc.copy(metadata_only=True) uvc_new = UVCal.initialize_from_uvdata( - uvd, uvc.gain_convention, uvc.cal_style, future_array_shapes=uvcal_future_shapes + uvd, + gain_convention=uvc.gain_convention, + cal_style=uvc.cal_style, + future_array_shapes=uvcal_future_shapes, ) assert np.allclose(uvc2.antenna_positions, uvc_new.antenna_positions, atol=0.1) @@ -4275,8 +4283,8 @@ def test_init_from_uvdata_setfreqs( uvc_new = UVCal.initialize_from_uvdata( uvd, - uvc.gain_convention, - uvc.cal_style, + gain_convention=uvc.gain_convention, + cal_style=uvc.cal_style, future_array_shapes=uvcal_future_shapes, freq_array=freqs_use, channel_width=channel_width, @@ -4289,8 +4297,8 @@ def test_init_from_uvdata_setfreqs( ): UVCal.initialize_from_uvdata( uvd, - uvc.gain_convention, - uvc.cal_style, + gain_convention=uvc.gain_convention, + cal_style=uvc.cal_style, future_array_shapes=uvcal_future_shapes, frequencies=freqs_use, channel_width=channel_width, @@ -4356,8 +4364,8 @@ def test_init_from_uvdata_settimes( uvc_new = UVCal.initialize_from_uvdata( uvd, - uvc.gain_convention, - uvc.cal_style, + gain_convention=uvc.gain_convention, + cal_style=uvc.cal_style, future_array_shapes=uvcal_future_shapes, metadata_only=metadata_only, time_array=times_use, @@ -4370,8 +4378,8 @@ def test_init_from_uvdata_settimes( ): UVCal.initialize_from_uvdata( uvd, - uvc.gain_convention, - uvc.cal_style, + gain_convention=uvc.gain_convention, + cal_style=uvc.cal_style, future_array_shapes=uvcal_future_shapes, metadata_only=metadata_only, times=times_use, @@ -4420,7 +4428,10 @@ def test_init_from_uvdata_setjones(uvcalibrate_data): uvc2 = uvc.copy(metadata_only=True) uvc_new = UVCal.initialize_from_uvdata( - uvd, uvc.gain_convention, uvc.cal_style, jones_array=[-5, -6] + uvd, + gain_convention=uvc.gain_convention, + cal_style=uvc.cal_style, + jones_array=[-5, -6], ) with pytest.warns( @@ -4428,7 +4439,10 @@ def test_init_from_uvdata_setjones(uvcalibrate_data): match="The jones keyword is deprecated in favor of jones_array", ): UVCal.initialize_from_uvdata( - uvd, uvc.gain_convention, uvc.cal_style, jones=[-5, -6] + uvd, + gain_convention=uvc.gain_convention, + cal_style=uvc.cal_style, + jones=[-5, -6], ) # antenna positions are different by ~6cm or less. The ones in the uvcal file @@ -4477,7 +4491,9 @@ def test_init_single_pol(uvcalibrate_data, pol): uvc2 = uvc.copy(metadata_only=True) - uvc_new = UVCal.initialize_from_uvdata(uvd, uvc.gain_convention, uvc.cal_style) + uvc_new = UVCal.initialize_from_uvdata( + uvd, gain_convention=uvc.gain_convention, cal_style=uvc.cal_style + ) # antenna positions are different by ~6cm or less. The ones in the uvcal file # derive from info on our telescope object while the ones in the uvdata file @@ -4519,7 +4535,9 @@ def test_init_from_uvdata_circular_pol(uvcalibrate_data): uvc2 = uvc.copy(metadata_only=True) - uvc_new = UVCal.initialize_from_uvdata(uvd, uvc.gain_convention, uvc.cal_style) + uvc_new = UVCal.initialize_from_uvdata( + uvd, gain_convention=uvc.gain_convention, cal_style=uvc.cal_style + ) # antenna positions are different by ~6cm or less. The ones in the uvcal file # derive from info on our telescope object while the ones in the uvdata file @@ -4592,8 +4610,8 @@ def test_init_from_uvdata_sky( uvc_new = UVCal.initialize_from_uvdata( uvd, - uvc.gain_convention, - uvc.cal_style, + gain_convention=uvc.gain_convention, + cal_style=uvc.cal_style, future_array_shapes=uvcal_future_shapes, sky_field=uvc_sky.sky_field, sky_catalog=uvc_sky.sky_catalog, @@ -4707,8 +4725,8 @@ def test_init_from_uvdata_delay( uvc_new = UVCal.initialize_from_uvdata( uvd, - uvc.gain_convention, - uvc.cal_style, + gain_convention=uvc.gain_convention, + cal_style=uvc.cal_style, future_array_shapes=uvcal_future_shapes, cal_type="delay", freq_range=freq_range, @@ -4807,8 +4825,8 @@ def test_init_from_uvdata_wideband( uvc_new = UVCal.initialize_from_uvdata( uvd, - uvc.gain_convention, - uvc.cal_style, + gain_convention=uvc.gain_convention, + cal_style=uvc.cal_style, wide_band=True, freq_range=freq_range, spw_array=spw_array, @@ -4849,11 +4867,16 @@ def test_init_from_uvdata_basic_errors(uvcalibrate_data): uvc._set_flex_spw() with pytest.raises(ValueError, match="uvdata must be a UVData object."): - UVCal.initialize_from_uvdata(uvc, uvc.gain_convention, uvc.cal_style) + UVCal.initialize_from_uvdata( + uvc, gain_convention=uvc.gain_convention, cal_style=uvc.cal_style + ) with pytest.raises(ValueError, match="cal_type must be either 'gain' or 'delay'."): UVCal.initialize_from_uvdata( - uvd, uvc.gain_convention, uvc.cal_style, cal_type="unknown" + uvd, + gain_convention=uvc.gain_convention, + cal_style=uvc.cal_style, + cal_type="unknown", ) with pytest.raises( @@ -4861,11 +4884,15 @@ def test_init_from_uvdata_basic_errors(uvcalibrate_data): match="If cal_style is 'sky', ref_antenna_name and sky_catalog must be " "provided.", ): - UVCal.initialize_from_uvdata(uvd, uvc.gain_convention, "sky") + UVCal.initialize_from_uvdata( + uvd, gain_convention=uvc.gain_convention, cal_style="sky" + ) uvd.polarization_array = np.array([1, 2, 3, 4]) with pytest.raises(ValueError, match="you must set jones_array."): - UVCal.initialize_from_uvdata(uvd, uvc.gain_convention, uvc.cal_style) + UVCal.initialize_from_uvdata( + uvd, gain_convention=uvc.gain_convention, cal_style=uvc.cal_style + ) def test_init_from_uvdata_freqrange_errors(uvcalibrate_data): @@ -4880,8 +4907,8 @@ def test_init_from_uvdata_freqrange_errors(uvcalibrate_data): ): UVCal.initialize_from_uvdata( uvd, - uvc.gain_convention, - uvc.cal_style, + gain_convention=uvc.gain_convention, + cal_style=uvc.cal_style, cal_type="delay", freq_range=[1e8, 1.2e8, 1.3e8, 1.5e8], ) @@ -4895,8 +4922,8 @@ def test_init_from_uvdata_freqrange_errors(uvcalibrate_data): ): UVCal.initialize_from_uvdata( uvd, - uvc.gain_convention, - uvc.cal_style, + gain_convention=uvc.gain_convention, + cal_style=uvc.cal_style, cal_type="delay", freq_range=np.asarray([[1e8, 1.2e8], [1.3e8, 1.5e8]]), ) diff --git a/pyuvdata/uvcal/uvcal.py b/pyuvdata/uvcal/uvcal.py index 21b9b506ec..c1814f1f3f 100644 --- a/pyuvdata/uvcal/uvcal.py +++ b/pyuvdata/uvcal/uvcal.py @@ -1196,7 +1196,7 @@ def set_telescope_params(self, *, overwrite=False): f"Telescope {self.telescope_name} is not in known_telescopes." ) - def _set_lsts_helper(self, astrometry_library=None): + def _set_lsts_helper(self, *, astrometry_library=None): latitude, longitude, altitude = self.telescope_location_lat_lon_alt_degrees if self.time_array is not None: self.lst_array = uvutils.get_lst_for_time( @@ -2630,7 +2630,7 @@ def __add__( this_ants_ind[other_argsort] ] - this.reorder_antennas(temp_ind) + this.reorder_antennas(order=temp_ind) if len(this_times_ind) != 0: this_argsort = np.argsort(this_times_ind) @@ -2642,7 +2642,7 @@ def __add__( this_times_ind[other_argsort] ] - this.reorder_times(temp_ind) + this.reorder_times(order=temp_ind) if len(this_freq_ind) != 0: this_argsort = np.argsort(this_freq_ind) @@ -2672,7 +2672,7 @@ def __add__( this_jones_ind[other_argsort] ] - this.reorder_jones(temp_ind) + this.reorder_jones(order=temp_ind) # Update filename parameter this.filename = uvutils._combine_filenames(this.filename, other.filename) @@ -4876,7 +4876,7 @@ def read_calfits(self, filename, **kwargs): del calfits_obj def read_fhd_cal( - self, *, cal_file, obs_file, layout_file=None, settings_file=None, **kwargs + self, cal_file, *, obs_file, layout_file=None, settings_file=None, **kwargs ): """ Read data from an FHD cal.sav file. @@ -4980,8 +4980,8 @@ def read_fhd_cal( settings_file_use = None self.read_fhd_cal( - cal_file[0], - obs_file[0], + cal_file=cal_file[0], + obs_file=obs_file[0], layout_file=layout_file_use, settings_file=settings_file_use, **kwargs, @@ -4994,8 +4994,8 @@ def read_fhd_cal( if layout_file is not None: layout_file_use = layout_file[ind + 1] uvcal2.read_fhd_cal( - f, - obs_file[ind + 1], + cal_file=f, + obs_file=obs_file[ind + 1], layout_file=layout_file_use, settings_file=settings_file_use, **kwargs, @@ -5019,8 +5019,8 @@ def read_fhd_cal( fhd_cal_obj = fhd_cal.FHDCal() fhd_cal_obj.read_fhd_cal( - cal_file, - obs_file, + cal_file=cal_file, + obs_file=obs_file, layout_file=layout_file, settings_file=settings_file, **kwargs, @@ -5268,8 +5268,8 @@ def read( elif file_type == "fhd": self.read_fhd_cal( - filename, - obs_file, + cal_file=filename, + obs_file=obs_file, layout_file=layout_file, settings_file=settings_file, raw=raw, From 9ddf62b4aee051ceac01a1590bb0e2b354592d0e Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Mon, 28 Aug 2023 14:01:24 -0700 Subject: [PATCH 05/12] finish up params by name --- pyuvdata/uvcal/fhd_cal.py | 16 +- pyuvdata/uvcal/initializers.py | 23 +- pyuvdata/uvdata/fhd.py | 19 +- pyuvdata/uvdata/initializers.py | 36 +- pyuvdata/uvdata/mir.py | 9 +- pyuvdata/uvdata/miriad.py | 15 +- pyuvdata/uvdata/ms.py | 11 +- pyuvdata/uvdata/mwa_corr_fits.py | 67 +++- pyuvdata/uvdata/tests/test_initializers.py | 4 +- pyuvdata/uvdata/tests/test_mir.py | 8 +- pyuvdata/uvdata/tests/test_miriad.py | 8 +- pyuvdata/uvdata/tests/test_ms.py | 20 +- pyuvdata/uvdata/tests/test_uvdata.py | 377 +++++++++++++-------- pyuvdata/uvdata/tests/test_uvfits.py | 8 +- pyuvdata/uvdata/tests/test_uvh5.py | 222 ++++++++---- pyuvdata/uvdata/uvdata.py | 76 +++-- pyuvdata/uvdata/uvfits.py | 62 ++-- pyuvdata/uvdata/uvh5.py | 84 +++-- pyuvdata/uvflag/tests/test_uvflag.py | 2 +- pyuvdata/uvflag/uvflag.py | 4 +- 20 files changed, 690 insertions(+), 381 deletions(-) diff --git a/pyuvdata/uvcal/fhd_cal.py b/pyuvdata/uvcal/fhd_cal.py index 10ea8821d1..be02b0e6db 100644 --- a/pyuvdata/uvcal/fhd_cal.py +++ b/pyuvdata/uvcal/fhd_cal.py @@ -127,14 +127,14 @@ def read_fhd_cal( ] layout_param_dict = get_fhd_layout_info( - layout_file, - self.telescope_name, - latitude, - longitude, - altitude, - self._lst_array.tols, - self._telescope_location.tols, - obs_tile_names, + layout_file=layout_file, + telescope_name=self.telescope_name, + latitude=latitude, + longitude=longitude, + altitude=altitude, + radian_tol=uvutils.RADIAN_TOL, + loc_tols=self._telescope_location.tols, + obs_tile_names=obs_tile_names, run_check_acceptability=True, ) diff --git a/pyuvdata/uvcal/initializers.py b/pyuvdata/uvcal/initializers.py index 21e66e07c2..c1e39af72e 100644 --- a/pyuvdata/uvcal/initializers.py +++ b/pyuvdata/uvcal/initializers.py @@ -163,7 +163,10 @@ def new_uvcal( uvc = UVCal() antenna_positions, antenna_names, antenna_numbers = get_antenna_params( - antenna_positions, antenna_names, antenna_numbers, antname_format + antenna_positions=antenna_positions, + antenna_names=antenna_names, + antenna_numbers=antenna_numbers, + antname_format=antname_format, ) if ant_array is None: ant_array = antenna_numbers @@ -177,16 +180,16 @@ def new_uvcal( if time_array is not None: lst_array, integration_time = get_time_params( - telescope_location, - time_array, - integration_time, + telescope_location=telescope_location, + time_array=time_array, + integration_time=integration_time, astrometry_library=astrometry_library, ) if time_range is not None: lst_range, integration_time = get_time_params( - telescope_location, - time_range, - integration_time, + telescope_location=telescope_location, + time_array=time_range, + integration_time=integration_time, astrometry_library=astrometry_library, ) @@ -201,9 +204,11 @@ def new_uvcal( ) if freq_array is not None: - freq_array, channel_width = get_freq_params(freq_array, channel_width) + freq_array, channel_width = get_freq_params( + freq_array=freq_array, channel_width=channel_width + ) flex_spw_id_array, spw_array = get_spw_params( - flex_spw_id_array, freq_array=freq_array + flex_spw_id_array=flex_spw_id_array, freq_array=freq_array ) wide_band = False freq_range = None diff --git a/pyuvdata/uvdata/fhd.py b/pyuvdata/uvdata/fhd.py index 245d13ff51..c0bb3e4aad 100644 --- a/pyuvdata/uvdata/fhd.py +++ b/pyuvdata/uvdata/fhd.py @@ -19,7 +19,7 @@ __all__ = ["get_fhd_history", "get_fhd_layout_info", "FHD"] -def get_fhd_history(settings_file, return_user=False): +def get_fhd_history(settings_file, *, return_user=False): """ Small function to get the important history from an FHD settings text file. @@ -92,6 +92,7 @@ def _latlonalt_close(latlonalt1, latlonalt2, radian_tol, loc_tols): def get_fhd_layout_info( + *, layout_file, telescope_name, latitude, @@ -534,14 +535,14 @@ def read_fhd( for ant in obs_tile_names ] layout_param_dict = get_fhd_layout_info( - layout_file, - self.telescope_name, - latitude, - longitude, - altitude, - uvutils.RADIAN_TOL, - self._telescope_location.tols, - obs_tile_names, + layout_file=layout_file, + telescope_name=self.telescope_name, + latitude=latitude, + longitude=longitude, + altitude=altitude, + radian_tol=uvutils.RADIAN_TOL, + loc_tols=self._telescope_location.tols, + obs_tile_names=obs_tile_names, run_check_acceptability=True, ) diff --git a/pyuvdata/uvdata/initializers.py b/pyuvdata/uvdata/initializers.py index b2ae7c7da3..8e457b4fcf 100644 --- a/pyuvdata/uvdata/initializers.py +++ b/pyuvdata/uvdata/initializers.py @@ -32,6 +32,7 @@ def get_antenna_params( + *, antenna_positions: np.ndarray | dict[str | int, np.ndarray], antenna_names: list[str] | None = None, antenna_numbers: list[int] | None = None, @@ -91,6 +92,7 @@ def get_antenna_params( def get_time_params( + *, telescope_location: Locations, time_array: np.ndarray, integration_time: float | np.ndarray | None = None, @@ -151,7 +153,7 @@ def get_time_params( def get_freq_params( - freq_array: np.ndarray, channel_width: float | np.ndarray | None = None + *, freq_array: np.ndarray, channel_width: float | np.ndarray | None = None ) -> tuple[np.ndarray, np.ndarray]: """Configure frequency parameters for new UVData object.""" if not isinstance(freq_array, np.ndarray): @@ -182,15 +184,16 @@ def get_freq_params( def get_baseline_params( - antenna_positions: np.ndarray, antpairs: np.ndarray + *, antenna_numbers: np.ndarray, antpairs: np.ndarray ) -> np.ndarray: """Configure baseline parameters for new UVData object.""" return utils.antnums_to_baseline( - antpairs[:, 0], antpairs[:, 1], len(antenna_positions) + antpairs[:, 0], antpairs[:, 1], len(antenna_numbers) ) def configure_blt_rectangularity( + *, times: np.ndarray, antpairs: np.ndarray, do_blt_outer: bool | None = None, @@ -292,7 +295,7 @@ def configure_blt_rectangularity( ) -def set_phase_params(obj, phase_center_catalog, phase_center_id_array, time_array): +def set_phase_params(obj, *, phase_center_catalog, phase_center_id_array, time_array): """Configure phase center parameters for new UVData object.""" if phase_center_catalog is None: obj._add_phase_center(cat_name="unprojected", cat_type="unprojected") @@ -315,6 +318,7 @@ def set_phase_params(obj, phase_center_catalog, phase_center_id_array, time_arra def get_spw_params( + *, flex_spw_id_array: np.ndarray | None = None, freq_array: np.ndarray | None = None, spw_array: np.ndarray | None = None, @@ -343,6 +347,7 @@ def get_spw_params( def new_uvdata( + *, freq_array: np.ndarray, polarization_array: np.ndarray | list[str | int] | tuple[str | int], antenna_positions: np.ndarray | dict[str | int, np.ndarray], @@ -501,13 +506,16 @@ def new_uvdata( obj = UVData() antenna_positions, antenna_names, antenna_numbers = get_antenna_params( - antenna_positions, antenna_names, antenna_numbers, antname_format + antenna_positions=antenna_positions, + antenna_names=antenna_names, + antenna_numbers=antenna_numbers, + antname_format=antname_format, ) lst_array, integration_time = get_time_params( - telescope_location, - times, - integration_time, + telescope_location=telescope_location, + time_array=times, + integration_time=integration_time, astrometry_library=astrometry_library, ) @@ -531,14 +539,20 @@ def new_uvdata( time_axis_faster_than_bls=time_axis_faster_than_bls, time_sized_arrays=(lst_array, integration_time), ) - baseline_array = get_baseline_params(antenna_numbers, antpairs) + baseline_array = get_baseline_params( + antenna_numbers=antenna_numbers, antpairs=antpairs + ) # Re-get the ant arrays because the baseline array may have changed ant_1_array, ant_2_array = antpairs.T - freq_array, channel_width = get_freq_params(freq_array, channel_width) + freq_array, channel_width = get_freq_params( + freq_array=freq_array, channel_width=channel_width + ) - flex_spw_id_array, spw_array = get_spw_params(flex_spw_id_array, freq_array) + flex_spw_id_array, spw_array = get_spw_params( + flex_spw_id_array=flex_spw_id_array, freq_array=freq_array + ) polarization_array = np.array(polarization_array) if polarization_array.dtype.kind != "i": diff --git a/pyuvdata/uvdata/mir.py b/pyuvdata/uvdata/mir.py index dfa92b6870..2d95c44ccc 100644 --- a/pyuvdata/uvdata/mir.py +++ b/pyuvdata/uvdata/mir.py @@ -33,6 +33,7 @@ class Mir(UVData): def read_mir( self, filepath, + *, antenna_nums=None, antenna_names=None, bls=None, @@ -684,10 +685,10 @@ def _init_from_mir_parser( mir_data.in_data["mjd"][source_mask], scale="tt", format="mjd" ).utc.jd source_ra, source_dec = uvutils.transform_app_to_icrs( - time_arr, - mir_data.in_data["ara"][source_mask], - mir_data.in_data["adec"][source_mask], - self.telescope_location_lat_lon_alt, + time_array=time_arr, + app_ra=mir_data.in_data["ara"][source_mask], + app_dec=mir_data.in_data["adec"][source_mask], + telescope_loc=self.telescope_location_lat_lon_alt, ) self._add_phase_center( source_name, diff --git a/pyuvdata/uvdata/miriad.py b/pyuvdata/uvdata/miriad.py index 8f76e77eec..5badb50197 100644 --- a/pyuvdata/uvdata/miriad.py +++ b/pyuvdata/uvdata/miriad.py @@ -265,7 +265,7 @@ def _load_miriad_variables(self, uv): return default_miriad_variables, other_miriad_variables, extra_miriad_variables - def _load_telescope_coords(self, uv, correct_lat_lon=True): + def _load_telescope_coords(self, uv, *, correct_lat_lon=True): """ Load telescope lat, lon, alt coordinates from aipy.miriad UV descriptor. @@ -380,7 +380,7 @@ def _load_telescope_coords(self, uv, correct_lat_lon=True): ) ) - def _load_antpos(self, uv, sorted_unique_ants=None, correct_lat_lon=True): + def _load_antpos(self, uv, *, sorted_unique_ants=None, correct_lat_lon=True): """ Load antennas and their positions from a Miriad UV descriptor. @@ -634,7 +634,7 @@ def _load_antpos(self, uv, sorted_unique_ants=None, correct_lat_lon=True): self.Nants_telescope, dtype=np.float64 ) - def _read_miriad_metadata(self, uv, correct_lat_lon=True): + def _read_miriad_metadata(self, uv, *, correct_lat_lon=True): """ Read in metadata (parameter info) but not data from a miriad file. @@ -720,6 +720,7 @@ def _read_miriad_metadata(self, uv, correct_lat_lon=True): def read_miriad( self, filepath, + *, antenna_nums=None, ant_str=None, bls=None, @@ -1514,7 +1515,7 @@ def read_miriad( self._add_phase_center( name, - cat_type, + cat_type=cat_type, cat_lon=lon_use, cat_lat=lat_use, cat_frame=cat_frame, @@ -1611,6 +1612,7 @@ def read_miriad( def write_miriad( self, filepath, + *, clobber=False, run_check=True, check_extra=True, @@ -1683,7 +1685,10 @@ def write_miriad( if (self.telescope_location is not None) and calc_lst: latitude, longitude, altitude = self.telescope_location_lat_lon_alt_degrees miriad_lsts = uvutils.get_lst_for_time( - miriad_time_array, latitude, longitude, altitude + miriad_time_array, + latitude=latitude, + longitude=longitude, + altitude=altitude, ) else: # The long float below is the number of sidereal days per day. The below diff --git a/pyuvdata/uvdata/ms.py b/pyuvdata/uvdata/ms.py index 1d33952a53..bdf558b43d 100644 --- a/pyuvdata/uvdata/ms.py +++ b/pyuvdata/uvdata/ms.py @@ -1074,6 +1074,7 @@ def _write_ms_history(self, filepath): def write_ms( self, filepath, + *, force_phase=False, clobber=False, run_check=True, @@ -1350,7 +1351,7 @@ def write_ms( self._write_ms_observation(filepath) self._write_ms_history(filepath) - def _parse_casa_frame_ref(self, ref_name, raise_error=True): + def _parse_casa_frame_ref(self, ref_name, *, raise_error=True): """ Interpret a CASA frame into an astropy-friendly frame and epoch. @@ -1406,7 +1407,7 @@ def _parse_casa_frame_ref(self, ref_name, raise_error=True): return frame_name, epoch_val - def _parse_pyuvdata_frame_ref(self, frame_name, epoch_val, raise_error=True): + def _parse_pyuvdata_frame_ref(self, frame_name, epoch_val, *, raise_error=True): """ Interpret a UVData pair of frame + epoch into a CASA frame name. @@ -1464,6 +1465,7 @@ def _parse_pyuvdata_frame_ref(self, frame_name, epoch_val, raise_error=True): def _read_ms_main( self, filepath, + *, data_column, data_desc_dict, read_weights=True, @@ -1912,6 +1914,7 @@ def _read_ms_main( def read_ms( self, filepath, + *, data_column="DATA", pol_order="AIPS", background_lsts=True, @@ -2012,8 +2015,8 @@ def read_ms( flip_conj = ("importuvfits" in self.history) and (not pyuvdata_written) spw_list, field_list, pol_list, flex_pol = self._read_ms_main( filepath, - data_column, - data_desc_dict, + data_column=data_column, + data_desc_dict=data_desc_dict, read_weights=read_weights, flip_conj=flip_conj, raise_error=raise_error, diff --git a/pyuvdata/uvdata/mwa_corr_fits.py b/pyuvdata/uvdata/mwa_corr_fits.py index 328f25d5be..4fce13150e 100644 --- a/pyuvdata/uvdata/mwa_corr_fits.py +++ b/pyuvdata/uvdata/mwa_corr_fits.py @@ -79,8 +79,9 @@ def read_metafits( # antenna positions are "relative to # the centre of the array in local topocentric \"east\", \"north\", # \"height\". Units are meters." + latitude, longitude, altitude = mwa_telescope_obj.telescope_location_lat_lon_alt antenna_positions_ecef = uvutils.ECEF_from_ENU( - antenna_positions, *mwa_telescope_obj.telescope_location_lat_lon_alt + antenna_positions, latitude=latitude, longitude=longitude, altitude=altitude ) # make antenna positions relative to telescope location antenna_positions = ( @@ -308,7 +309,7 @@ def corrcorrect_simps(rho, sig1, sig2): return integrated_khat -def corrcorrect_vect_prime(rho, sig1, sig2): +def corrcorrect_vect_prime(*, rho, sig1, sig2): """ Calculate the derivative of corrcorrect_simps. @@ -361,7 +362,7 @@ def van_vleck_autos(sighat_arr): return sighat_arr -def van_vleck_crosses_int(k_arr, sig1_arr, sig2_arr, cheby_approx): +def van_vleck_crosses_int(*, k_arr, sig1_arr, sig2_arr, cheby_approx): """ Use Newton's method to solve the inverse of corrcorrect_simps. @@ -400,11 +401,13 @@ def van_vleck_crosses_int(k_arr, sig1_arr, sig2_arr, cheby_approx): sig2 = sig2_arr[nonzero_inds] x0 = khat / (sig1 * sig2) corr = corrcorrect_simps(x0, sig1, sig2) - khat - x0 -= corr / corrcorrect_vect_prime(x0, sig1, sig2) + x0 -= corr / corrcorrect_vect_prime(rho=x0, sig1=sig1, sig2=sig2) inds = np.where(np.abs(corr) > 1e-8)[0] while len(inds) != 0: corr = corrcorrect_simps(x0[inds], sig1[inds], sig2[inds]) - khat[inds] - x0[inds] -= corr / corrcorrect_vect_prime(x0[inds], sig1[inds], sig2[inds]) + x0[inds] -= corr / corrcorrect_vect_prime( + rho=x0[inds], sig1=sig1[inds], sig2=sig2[inds] + ) inds2 = np.where(np.abs(corr) > 1e-8)[0] inds = inds[inds2] k_arr[nonzero_inds] = x0 * sig1 * sig2 @@ -473,9 +476,15 @@ def van_vleck_crosses_cheby( sig1[broad_inds] * sig2[broad_inds] ) khat[~broad_inds] = van_vleck_crosses_int( - khat.real[~broad_inds], sig1[~broad_inds], sig2[~broad_inds], cheby_approx + k_arr=khat.real[~broad_inds], + sig1_arr=sig1[~broad_inds], + sig2_arr=sig2[~broad_inds], + cheby_approx=cheby_approx, ) + 1j * van_vleck_crosses_int( - khat.imag[~broad_inds], sig1[~broad_inds], sig2[~broad_inds], cheby_approx + k_arr=khat.imag[~broad_inds], + sig1_arr=sig1[~broad_inds], + sig2_arr=sig2[~broad_inds], + cheby_approx=cheby_approx, ) return khat @@ -944,12 +953,22 @@ def van_vleck_correction( k, :, j, np.array([yy, yx, xy, xx]) ].flatten() # correct real - kap = van_vleck_crosses_int(khat.real, sig1, sig2, cheby_approx) + kap = van_vleck_crosses_int( + k_arr=khat.real, + sig1_arr=sig1, + sig2_arr=sig2, + cheby_approx=cheby_approx, + ) self.data_array.real[ k, :, j, np.array([yy, yx, xy, xx]) ] = kap.reshape(self.Npols, self.Ntimes) # correct imaginary - kap = van_vleck_crosses_int(khat.imag, sig1, sig2, cheby_approx) + kap = van_vleck_crosses_int( + k_arr=khat.imag, + sig1_arr=sig1, + sig2_arr=sig2, + cheby_approx=cheby_approx, + ) self.data_array.imag[ k, :, j, np.array([yy, yx, xy, xx]) ] = kap.reshape(self.Npols, self.Ntimes) @@ -961,10 +980,19 @@ def van_vleck_correction( sig2 = self.data_array.real[k, :, j, xx] khat = self.data_array[k, :, j, yx] # correct real - kap = van_vleck_crosses_int(khat.real, sig1, sig2, cheby_approx) - self.data_array.real[k, :, j, yx] = kap + kap = van_vleck_crosses_int( + k_arr=khat.real, + sig1_arr=sig1, + sig2_arr=sig2, + cheby_approx=cheby_approx, + ) # correct imaginary - kap = van_vleck_crosses_int(khat.imag, sig1, sig2, cheby_approx) + kap = van_vleck_crosses_int( + k_arr=khat.imag, + sig1_arr=sig1, + sig2_arr=sig2, + cheby_approx=cheby_approx, + ) self.data_array.imag[k, :, j, yx] = kap # correct xy autos self.data_array[good_autos, :, :, xy] = np.conj( @@ -985,7 +1013,13 @@ def van_vleck_correction( self.history += history_add_string def _flag_small_auto_ants( - self, nsamples, flag_small_auto_ants, ant_1_inds, ant_2_inds, flagged_ant_inds + self, + *, + nsamples, + flag_small_auto_ants, + ant_1_inds, + ant_2_inds, + flagged_ant_inds, ): """ Find and flag autocorrelations below a threshold. @@ -1219,7 +1253,11 @@ def _apply_corrections( nsamples = self.channel_width[0] * self.integration_time[0] * 2 # look for small auto data and flag flagged_ant_inds = self._flag_small_auto_ants( - nsamples, flag_small_auto_ants, ant_1_inds, ant_2_inds, flagged_ant_inds + nsamples=nsamples, + flag_small_auto_ants=flag_small_auto_ants, + ant_1_inds=ant_1_inds, + ant_2_inds=ant_2_inds, + flagged_ant_inds=flagged_ant_inds, ) else: nsamples = None @@ -1265,6 +1303,7 @@ def _apply_corrections( def read_mwa_corr_fits( self, filelist, + *, use_aoflagger_flags=None, remove_dig_gains=True, remove_coarse_band=True, diff --git a/pyuvdata/uvdata/tests/test_initializers.py b/pyuvdata/uvdata/tests/test_initializers.py index a2302e1906..6981966aac 100644 --- a/pyuvdata/uvdata/tests/test_initializers.py +++ b/pyuvdata/uvdata/tests/test_initializers.py @@ -494,11 +494,11 @@ def test_get_spw_params(): idarray = np.array([0, 0, 0, 0, 0]) freq = np.linspace(0, 1, 5) - _id, spw = get_spw_params(idarray, freq) + _id, spw = get_spw_params(flex_spw_id_array=idarray, freq_array=freq) assert np.all(spw == 0) idarray = np.array([0, 0, 0, 0, 1]) - _id, spw = get_spw_params(idarray, freq) + _id, spw = get_spw_params(flex_spw_id_array=idarray, freq_array=freq) assert np.all(spw == [0, 1]) with pytest.raises( diff --git a/pyuvdata/uvdata/tests/test_mir.py b/pyuvdata/uvdata/tests/test_mir.py index 3d0af7b01d..74afc8a477 100644 --- a/pyuvdata/uvdata/tests/test_mir.py +++ b/pyuvdata/uvdata/tests/test_mir.py @@ -175,7 +175,9 @@ def test_read_mir_write_ms(sma_mir, tmp_path, future_shapes): # fix up the phase center info to match the mir dataset cat_id = list(sma_mir.phase_center_catalog.keys())[0] cat_name = sma_mir.phase_center_catalog[cat_id]["cat_name"] - ms_uv._update_phase_center_id(list(ms_uv.phase_center_catalog.keys())[0], cat_id) + ms_uv._update_phase_center_id( + list(ms_uv.phase_center_catalog.keys())[0], new_id=cat_id + ) ms_uv.phase_center_catalog[cat_id]["cat_name"] = cat_name ms_uv.phase_center_catalog[cat_id]["info_source"] = "file" @@ -341,7 +343,9 @@ def test_read_mir_write_ms_flex_pol(mir_data, tmp_path): # fix up the phase center info to match the mir dataset cat_id = list(mir_uv.phase_center_catalog.keys())[0] cat_name = mir_uv.phase_center_catalog[cat_id]["cat_name"] - ms_uv._update_phase_center_id(list(ms_uv.phase_center_catalog.keys())[0], cat_id) + ms_uv._update_phase_center_id( + list(ms_uv.phase_center_catalog.keys())[0], new_id=cat_id + ) ms_uv.phase_center_catalog[cat_id]["cat_name"] = cat_name ms_uv.phase_center_catalog[cat_id]["info_source"] = "file" diff --git a/pyuvdata/uvdata/tests/test_miriad.py b/pyuvdata/uvdata/tests/test_miriad.py index 8e3f577e23..a01f91555f 100644 --- a/pyuvdata/uvdata/tests/test_miriad.py +++ b/pyuvdata/uvdata/tests/test_miriad.py @@ -938,7 +938,7 @@ def test_miriad_ephem(tmp_path, casa_uvfits, cut_ephem_pts, extrapolate): uv_in.write_miriad(testfile, clobber=True) uv2 = UVData.from_file(testfile, use_future_array_shapes=True) - uv2._update_phase_center_id(0, 1) + uv2._update_phase_center_id(0, new_id=1) uv2.phase_center_catalog[1]["info_source"] = uv_in.phase_center_catalog[1][ "info_source" ] @@ -1855,7 +1855,7 @@ def test_multi_files(casa_uvfits, tmp_path): testfile2 = os.path.join(tmp_path, "uv2") # rename telescope to avoid name warning uv_full.unproject_phase() - uv_full.conjugate_bls("ant1 1: @@ -6312,7 +6315,7 @@ def test_redundancy_contract_expand( # This must be done after reconjugation because reconjugation can alter the index # baseline red_gps, centers, lengths, conjugates = uv0.get_redundancies( - tol, include_conjugates=True + tol=tol, include_conjugates=True ) index_bls = [] for gp_ind, gp in enumerate(red_gps): @@ -6941,7 +6944,7 @@ def test_overlapping_data_add(casa_uvfits, tmp_path, future_shapes): with pytest.raises( ValueError, match="These objects have overlapping data and cannot be combined." ): - uv4.__add__(uv4, uvfull) + uv4.__add__(uv4) # write individual objects out, and make sure that we can read in the list uv1_out = str(tmp_path / "uv1.uvfits") @@ -6990,7 +6993,9 @@ def test_lsts_from_time_with_only_unique(paper_uvh5): uv = paper_uvh5 lat, lon, alt = uv.telescope_location_lat_lon_alt_degrees # calculate the lsts for all elements in time array - full_lsts = uvutils.get_lst_for_time(uv.time_array, lat, lon, alt) + full_lsts = uvutils.get_lst_for_time( + uv.time_array, latitude=lat, longitude=lon, altitude=alt + ) # use `set_lst_from_time_array` to set the uv.lst_array using only unique values uv.set_lsts_from_time_array() assert np.array_equal(full_lsts, uv.lst_array) @@ -7004,7 +7009,9 @@ def test_lsts_from_time_with_only_unique_background(paper_uvh5): uv = paper_uvh5 lat, lon, alt = uv.telescope_location_lat_lon_alt_degrees # calculate the lsts for all elements in time array - full_lsts = uvutils.get_lst_for_time(uv.time_array, lat, lon, alt) + full_lsts = uvutils.get_lst_for_time( + uv.time_array, latitude=lat, longitude=lon, altitude=alt + ) # use `set_lst_from_time_array` to set the uv.lst_array using only unique values proc = uv.set_lsts_from_time_array(background=True) proc.join() @@ -8566,7 +8573,7 @@ def test_upsample_downsample_in_time_odd_resample(hera_uvh5, future_shapes): assert np.amax(uv_object.integration_time) <= max_integration_time uv_object.downsample_in_time( - np.amin(uv_object2.integration_time), blt_order="baseline" + min_int_time=np.amin(uv_object2.integration_time), blt_order="baseline" ) # increase tolerance on LST if iers.conf.auto_max_age is set to None, as we @@ -8612,7 +8619,7 @@ def test_upsample_downsample_in_time_metadata_only(hera_uvh5): assert np.amax(uv_object.integration_time) <= max_integration_time uv_object.downsample_in_time( - np.amin(uv_object2.integration_time), blt_order="baseline" + min_int_time=np.amin(uv_object2.integration_time), blt_order="baseline" ) # increase tolerance on LST if iers.conf.auto_max_age is set to None, as we @@ -9045,7 +9052,7 @@ def test_frequency_average_uneven( ) with uvtest.check_warnings(warn, match=msg): uvobj.frequency_average( - n_chan_to_avg, + n_chan_to_avg=n_chan_to_avg, keep_ragged=keep_ragged, summing_correlator_mode=sum_corr, respect_spws=respect_spws, @@ -9279,7 +9286,7 @@ def test_frequency_average_flagging( warn = None msg = "" with uvtest.check_warnings(warn, match=msg): - uvobj.frequency_average(n_chan_to_avg, keep_ragged=keep_ragged) + uvobj.frequency_average(n_chan_to_avg=n_chan_to_avg, keep_ragged=keep_ragged) input_freqs = np.squeeze(uvobj2.freq_array) @@ -9453,7 +9460,7 @@ def test_frequency_average_propagate_flags(casa_uvfits, future_shapes, keep_ragg msg = "" with uvtest.check_warnings(warn, match=msg): uvobj.frequency_average( - n_chan_to_avg, propagate_flags=True, keep_ragged=keep_ragged + n_chan_to_avg=n_chan_to_avg, propagate_flags=True, keep_ragged=keep_ragged ) input_freqs = np.squeeze(uvobj2.freq_array) @@ -10570,24 +10577,66 @@ def test_rename_phase_center_bad_args(carma_miriad, args, err_type, msg): @pytest.mark.filterwarnings("ignore:Altitude is not present in Miriad file,") @pytest.mark.parametrize( - "args,err_type,msg", + "kwargs,err_type,msg", ( - [["abc", "xyz", 1], ValueError, "No catalog entries matching the name abc."], - [["3C273", -2, 1], TypeError, "Value provided to new_name must be a string"], - [["3C273", "3c273", 1.5], IndexError, "select_mask must be an array-like,"], - [["3C273", "3c273", 1], ValueError, "Data selected with select_mask includes"], - [[-1], ValueError, "No entry with the ID -1 found in the catalog"], - [[1, None, None, "hi"], TypeError, "Value provided to new_id must be an int"], - [[1, None, None, 2], ValueError, "The ID 2 is already in the catalog"], - [[35.5], TypeError, "catalog_identifier must be a string or an integer."], + [ + {"catalog_identifier": "abc", "new_name": "xyz", "select_mask": 1}, + ValueError, + "No catalog entries matching the name abc.", + ], + [ + {"catalog_identifier": "3C273", "new_name": -2, "select_mask": 1}, + TypeError, + "Value provided to new_name must be a string", + ], + [ + {"catalog_identifier": "3C273", "new_name": "3c273", "select_mask": 1.5}, + IndexError, + "select_mask must be an array-like,", + ], + [ + {"catalog_identifier": "3C273", "new_name": "3c273", "select_mask": 1}, + ValueError, + "Data selected with select_mask includes", + ], + [ + {"catalog_identifier": -1}, + ValueError, + "No entry with the ID -1 found in the catalog", + ], + [ + { + "catalog_identifier": 1, + "new_name": None, + "select_mask": None, + "new_id": "hi", + }, + TypeError, + "Value provided to new_id must be an int", + ], + [ + { + "catalog_identifier": 1, + "new_name": None, + "select_mask": None, + "new_id": 2, + }, + ValueError, + "The ID 2 is already in the catalog", + ], + [ + {"catalog_identifier": 35.5}, + TypeError, + "catalog_identifier must be a string or an integer.", + ], ), ) -def test_split_phase_center_bad_args(carma_miriad, args, err_type, msg): +def test_split_phase_center_bad_args(carma_miriad, kwargs, err_type, msg): """ Verify that split_phase_center will throw an error if supplied with bad args """ with pytest.raises(err_type, match=msg): - carma_miriad.split_phase_center(*args) + carma_miriad.split_phase_center(**kwargs) def test_split_phase_center_err_multiname(carma_miriad): @@ -10642,7 +10691,7 @@ def test_update_id_bad_args(sma_mir, cat_id, new_id, res_id, err_type, msg): Verify that _update_phase_center_id throws errors when supplied with bad args """ with pytest.raises(err_type, match=msg): - sma_mir._update_phase_center_id(cat_id, new_id, reserved_ids=res_id) + sma_mir._update_phase_center_id(cat_id, new_id=new_id, reserved_ids=res_id) def test_add_clear_phase_center(sma_mir): @@ -10727,7 +10776,7 @@ def test_update_id(sma_mir): def test_split_phase_center_warnings(sma_mir, name1, name2, select_mask, msg): # Now let's select no data at all with uvtest.check_warnings(UserWarning, match=msg): - sma_mir.split_phase_center(name1, name2, select_mask) + sma_mir.split_phase_center(name1, new_name=name2, select_mask=select_mask) def test_split_phase_center(hera_uvh5): @@ -10743,7 +10792,7 @@ def test_split_phase_center(hera_uvh5): # integration? select_mask = np.isin(hera_uvh5.time_array, np.unique(hera_uvh5.time_array)[::2]) - hera_uvh5.split_phase_center("3c84", "3c84_2", select_mask) + hera_uvh5.split_phase_center("3c84", new_name="3c84_2", select_mask=select_mask) cat_id1 = hera_uvh5._look_for_name("3c84") cat_id2 = hera_uvh5._look_for_name("3c84_2") # Check that the catalog IDs also line up w/ what we expect @@ -10773,13 +10822,16 @@ def test_split_phase_center_downselect(hera_uvh5): # Again, only select the first half of the data select_mask = np.isin(hera_uvh5.time_array, np.unique(hera_uvh5.time_array)[::2]) - hera_uvh5.split_phase_center("3c84", "3c84_2", select_mask) + hera_uvh5.split_phase_center("3c84", new_name="3c84_2", select_mask=select_mask) # Now effectively rename zenith2 as zenith3 by selecting all data and using # the downselect switch with uvtest.check_warnings(UserWarning, "All data for the source selected"): hera_uvh5.split_phase_center( - "3c84_2", "3c84_3", np.arange(hera_uvh5.Nblts), downselect=True + "3c84_2", + new_name="3c84_3", + select_mask=np.arange(hera_uvh5.Nblts), + downselect=True, ) cat_id1 = hera_uvh5._look_for_name("3c84") @@ -10805,16 +10857,20 @@ def test_split_phase_center_downselect(hera_uvh5): @pytest.mark.parametrize( - "val1,val2,val3,err_type,msg", + "new_w_vals,old_w_vals,select_mask,err_type,msg", [ [0.0, 0.0, 1.5, IndexError, "select_mask must be an array-like, either of"], [[0.0, 0.0], 0.0, [0], IndexError, "The length of new_w_vals is wrong"], [0.0, [0.0, 0.0], [0], IndexError, "The length of old_w_vals is wrong"], ], ) -def test_apply_w_arg_errs(hera_uvh5, val1, val2, val3, err_type, msg): +def test_apply_w_arg_errs( + hera_uvh5, new_w_vals, old_w_vals, select_mask, err_type, msg +): with pytest.raises(err_type, match=msg): - hera_uvh5._apply_w_proj(val1, val2, val3) + hera_uvh5._apply_w_proj( + new_w_vals=new_w_vals, old_w_vals=old_w_vals, select_mask=select_mask + ) @pytest.mark.filterwarnings("ignore:This method will be removed in version 3.0 when") @@ -10830,11 +10886,13 @@ def test_apply_w_no_ops(hera_uvh5, future_shapes): # Test to make sure that the following gives us back the same results, # first without a selection mask - hera_uvh5._apply_w_proj(0.0, 0.0) + hera_uvh5._apply_w_proj(new_w_vals=0.0, old_w_vals=0.0) assert hera_uvh5 == hera_copy # And now with a selection mask applied - hera_uvh5._apply_w_proj([0.0, 1.0], [0.0, 1.0], [0, 1]) + hera_uvh5._apply_w_proj( + new_w_vals=[0.0, 1.0], old_w_vals=[0.0, 1.0], select_mask=[0, 1] + ) assert hera_uvh5 == hera_copy @@ -10846,10 +10904,23 @@ def test_phase_dict_helper_err_multi_match(carma_miriad): """ for key in carma_miriad.phase_center_catalog: carma_miriad.phase_center_catalog[key]["cat_name"] = "NOISE" - args = [None] * 13 - args[10:12] = "NOISE", True + with pytest.raises(ValueError, match="Name of object has multiple matches in "): - carma_miriad._phase_dict_helper(*args) + carma_miriad._phase_dict_helper( + lon=None, + lat=None, + epoch=None, + phase_frame=None, + ephem_times=None, + cat_type=None, + pm_ra=None, + pm_dec=None, + dist=None, + vrad=None, + cat_name="NOISE", + lookup_name=True, + time_array=None, + ) def test_phase_dict_helper_simple(hera_uvh5, sma_mir, dummy_phase_dict): @@ -10862,19 +10933,19 @@ def test_phase_dict_helper_simple(hera_uvh5, sma_mir, dummy_phase_dict): # for loop to move through two different datasets. for uv_object in [hera_uvh5, sma_mir]: phase_dict = uv_object._phase_dict_helper( - dummy_phase_dict["cat_lon"], - dummy_phase_dict["cat_lat"], - dummy_phase_dict["cat_epoch"], - dummy_phase_dict["cat_frame"], - dummy_phase_dict["cat_times"], - dummy_phase_dict["cat_type"], - dummy_phase_dict["cat_pm_ra"], - dummy_phase_dict["cat_pm_dec"], - dummy_phase_dict["cat_dist"], - dummy_phase_dict["cat_vrad"], - dummy_phase_dict["cat_name"], - False, # Don't lookup source - None, # Don't supply a time_array + lon=dummy_phase_dict["cat_lon"], + lat=dummy_phase_dict["cat_lat"], + epoch=dummy_phase_dict["cat_epoch"], + phase_frame=dummy_phase_dict["cat_frame"], + ephem_times=dummy_phase_dict["cat_times"], + cat_type=dummy_phase_dict["cat_type"], + pm_ra=dummy_phase_dict["cat_pm_ra"], + pm_dec=dummy_phase_dict["cat_pm_dec"], + dist=dummy_phase_dict["cat_dist"], + vrad=dummy_phase_dict["cat_vrad"], + cat_name=dummy_phase_dict["cat_name"], + lookup_name=False, + time_array=None, ) assert phase_dict == dummy_phase_dict @@ -10914,19 +10985,19 @@ def test_phase_dict_helper_errs(sma_mir, arg_dict, dummy_phase_dict, msg): # intermittent failures connecting to the JPL-Horizons service. with pytest.raises(Exception) as cm: sma_mir._phase_dict_helper( - arg_dict["cat_lon"], - arg_dict["cat_lat"], - arg_dict["cat_epoch"], - arg_dict["cat_frame"], - arg_dict["cat_times"], - arg_dict["cat_type"], - arg_dict["cat_pm_ra"], - arg_dict["cat_pm_dec"], - arg_dict["cat_dist"], - arg_dict["cat_vrad"], - arg_dict["cat_name"], - arg_dict.get("lookup"), - arg_dict.get("time_arr"), + lon=arg_dict["cat_lon"], + lat=arg_dict["cat_lat"], + epoch=arg_dict["cat_epoch"], + phase_frame=arg_dict["cat_frame"], + ephem_times=arg_dict["cat_times"], + cat_type=arg_dict["cat_type"], + pm_ra=arg_dict["cat_pm_ra"], + pm_dec=arg_dict["cat_pm_dec"], + dist=arg_dict["cat_dist"], + vrad=arg_dict["cat_vrad"], + cat_name=arg_dict["cat_name"], + lookup_name=arg_dict.get("lookup"), + time_array=arg_dict.get("time_arr"), ) if issubclass(cm.type, RequestException) or issubclass(cm.type, SSLError): @@ -10942,19 +11013,19 @@ def test_phase_dict_helper_sidereal_lookup(sma_mir, dummy_phase_dict): a multi-phase-ctr dataset. """ phase_dict = sma_mir._phase_dict_helper( - dummy_phase_dict["cat_lon"], - dummy_phase_dict["cat_lat"], - dummy_phase_dict["cat_epoch"], - dummy_phase_dict["cat_frame"], - dummy_phase_dict["cat_times"], - dummy_phase_dict["cat_type"], - dummy_phase_dict["cat_pm_ra"], - dummy_phase_dict["cat_pm_dec"], - dummy_phase_dict["cat_dist"], - dummy_phase_dict["cat_vrad"], - "3c84", - True, # Do lookup source! - None, # Don't supply a time_array + lon=dummy_phase_dict["cat_lon"], + lat=dummy_phase_dict["cat_lat"], + epoch=dummy_phase_dict["cat_epoch"], + phase_frame=dummy_phase_dict["cat_frame"], + ephem_times=dummy_phase_dict["cat_times"], + cat_type=dummy_phase_dict["cat_type"], + pm_ra=dummy_phase_dict["cat_pm_ra"], + pm_dec=dummy_phase_dict["cat_pm_dec"], + dist=dummy_phase_dict["cat_dist"], + vrad=dummy_phase_dict["cat_vrad"], + cat_name="3c84", + lookup_name=True, + time_array=None, ) assert phase_dict.pop("cat_id") == sma_mir._look_for_name("3c84")[0] assert phase_dict == sma_mir.phase_center_catalog[sma_mir._look_for_name("3c84")[0]] @@ -10969,19 +11040,19 @@ def test_phase_dict_helper_jpl_lookup_existing(sma_mir): # actually performing a lookup cat_id = sma_mir._look_for_name("3c84")[0] phase_dict = sma_mir._phase_dict_helper( - sma_mir.phase_center_catalog[cat_id].get("cat_lon"), - sma_mir.phase_center_catalog[cat_id].get("cat_lat"), - sma_mir.phase_center_catalog[cat_id].get("cat_epoch"), - sma_mir.phase_center_catalog[cat_id].get("cat_frame"), - sma_mir.phase_center_catalog[cat_id].get("cat_times"), - sma_mir.phase_center_catalog[cat_id].get("cat_type"), - sma_mir.phase_center_catalog[cat_id].get("cat_pm_ra"), - sma_mir.phase_center_catalog[cat_id].get("cat_pm_dec"), - sma_mir.phase_center_catalog[cat_id].get("cat_dist"), - sma_mir.phase_center_catalog[cat_id].get("cat_vrad"), - "3c84", - False, - sma_mir.time_array, + lon=sma_mir.phase_center_catalog[cat_id].get("cat_lon"), + lat=sma_mir.phase_center_catalog[cat_id].get("cat_lat"), + epoch=sma_mir.phase_center_catalog[cat_id].get("cat_epoch"), + phase_frame=sma_mir.phase_center_catalog[cat_id].get("cat_frame"), + ephem_times=sma_mir.phase_center_catalog[cat_id].get("cat_times"), + cat_type=sma_mir.phase_center_catalog[cat_id].get("cat_type"), + pm_ra=sma_mir.phase_center_catalog[cat_id].get("cat_pm_ra"), + pm_dec=sma_mir.phase_center_catalog[cat_id].get("cat_pm_dec"), + dist=sma_mir.phase_center_catalog[cat_id].get("cat_dist"), + vrad=sma_mir.phase_center_catalog[cat_id].get("cat_vrad"), + cat_name="3c84", + lookup_name=False, + time_array=sma_mir.time_array, ) assert phase_dict.pop("cat_id") == cat_id assert phase_dict == sma_mir.phase_center_catalog[sma_mir._look_for_name("3c84")[0]] @@ -11005,14 +11076,26 @@ def test_phase_dict_helper_jpl_lookup_append(sma_mir): # to reach the JPL-Horizons service. try: phase_dict = sma_mir._phase_dict_helper( - 0, 0, None, None, None, None, 0, 0, 0, 0, "Mars", True, obs_time + lon=0, + lat=0, + epoch=None, + phase_frame=None, + ephem_times=None, + cat_type=None, + pm_ra=0, + pm_dec=0, + dist=0, + vrad=0, + cat_name="Mars", + lookup_name=True, + time_array=obs_time, ) except (SSLError, RequestException) as err: pytest.skip("SSL/Connection error w/ JPL Horizons: " + str(err)) cat_id = sma_mir._add_phase_center( phase_dict["cat_name"], - phase_dict["cat_type"], + cat_type=phase_dict["cat_type"], cat_lon=phase_dict["cat_lon"], cat_lat=phase_dict["cat_lat"], cat_frame=phase_dict["cat_frame"], @@ -11036,7 +11119,19 @@ def test_phase_dict_helper_jpl_lookup_append(sma_mir): # Again, just skip if we are unable to reach the JPL-Horizons try: phase_dict = sma_mir._phase_dict_helper( - 0, 0, None, None, None, None, 0, 0, 0, 0, "Mars", True, obs_time + lon=0, + lat=0, + epoch=None, + phase_frame=None, + ephem_times=None, + cat_type=None, + pm_ra=0, + pm_dec=0, + dist=0, + vrad=0, + cat_name="Mars", + lookup_name=True, + time_array=obs_time, ) except (SSLError, RequestException) as err: pytest.skip("SSL/Connection error w/ JPL Horizons: " + str(err)) @@ -11069,7 +11164,7 @@ def test_fix_phase(hera_uvh5, tmp_path, future_shapes, use_ant_pos, phase_frame) phase_dec = uv_in.telescope_location_lat_lon_alt[0] * 0.333 # Do the improved phasing on the data set. - uv_in.phase(phase_ra, phase_dec, phase_frame=phase_frame, cat_name="foo") + uv_in.phase(lon=phase_ra, lat=phase_dec, phase_frame=phase_frame, cat_name="foo") if use_ant_pos: antpos_str = "_antpos" @@ -11194,9 +11289,9 @@ def test_multi_file_ignore_name(hera_uvh5_split, future_shapes): uvfull.use_current_array_shapes() # Phase both targets to the same position with different names - uv1.phase(3.6, -0.5, cat_name="target1") - uv2.phase(3.6, -0.5, cat_name="target2") - uvfull.phase(3.6, -0.5, cat_name="target1") + uv1.phase(lon=3.6, lat=-0.5, cat_name="target1") + uv2.phase(lon=3.6, lat=-0.5, cat_name="target2") + uvfull.phase(lon=3.6, lat=-0.5, cat_name="target1") # Check that you end up with two phase centers if you don't ignore the name uv3 = uv1 + uv2 @@ -11228,15 +11323,15 @@ def test_multi_phase_split_merge_rename(hera_uvh5_split, test_op): uv1, uv2, uvfull = hera_uvh5_split half_mask = np.arange(uvfull.Nblts) < (uvfull.Nblts * 0.5) - uv1.phase(3.6, -0.5, cat_name="target1") - uv2.phase(3.6, -0.5, cat_name="target1" if (test_op is None) else "target2") + uv1.phase(lon=3.6, lat=-0.5, cat_name="target1") + uv2.phase(lon=3.6, lat=-0.5, cat_name="target1" if (test_op is None) else "target2") uv3 = uv1 + uv2 uv3.reorder_blts() uvfull.reorder_blts() - uvfull.phase(3.6, -0.5, cat_name="target1") + uvfull.phase(lon=3.6, lat=-0.5, cat_name="target1") uvfull._update_phase_center_id( - list(uvfull.phase_center_catalog)[0], 1 if (test_op is None) else 0 + list(uvfull.phase_center_catalog)[0], new_id=1 if (test_op is None) else 0 ) # Any of these operations should allow for the objects to become equal to the @@ -11252,7 +11347,7 @@ def test_multi_phase_split_merge_rename(hera_uvh5_split, test_op): elif test_op == "rename": uv3.merge_phase_centers(list(uv3.phase_center_catalog)[::-1], ignore_name=True) uvfull.rename_phase_center("target1", "target2") - uvfull._update_phase_center_id(0, 1) + uvfull._update_phase_center_id(0, new_id=1) elif test_op == "merge": uv3.merge_phase_centers(["target1", "target2"], ignore_name=True) elif test_op == "r+m": @@ -11275,13 +11370,13 @@ def test_multi_phase_add(hera_uvh5_split, catid, future_shapes): uvfull.use_current_array_shapes() # Give it a new name, and then rephase half of the "full" object - uv1.phase(3.6, -0.5, cat_name="target1") - uv2.phase(-0.5, 3.6, cat_name="target2") + uv1.phase(lon=3.6, lat=-0.5, cat_name="target1") + uv2.phase(lon=-0.5, lat=3.6, cat_name="target2") # Test that addition handles cat ID collisions correctly for pc_id in list(uv2.phase_center_catalog): if uv2.phase_center_catalog[pc_id]["cat_name"] == "target2": - uv2._update_phase_center_id(pc_id, catid) + uv2._update_phase_center_id(pc_id, new_id=catid) # Add the objects together uv3 = uv1.__add__(uv2) @@ -11289,8 +11384,8 @@ def test_multi_phase_add(hera_uvh5_split, catid, future_shapes): # Separately phase both halves of the full data set half_mask = np.arange(uvfull.Nblts) < (uvfull.Nblts * 0.5) - uvfull.phase(-0.5, 3.6, cat_name="target2", select_mask=~half_mask) - uvfull.phase(3.6, -0.5, cat_name="target1", select_mask=half_mask) + uvfull.phase(lon=-0.5, lat=3.6, cat_name="target2", select_mask=~half_mask) + uvfull.phase(lon=3.6, lat=-0.5, cat_name="target1", select_mask=half_mask) uvfull.reorder_blts() # Check that the histories line up @@ -11313,10 +11408,10 @@ def test_multi_phase_add(hera_uvh5_split, catid, future_shapes): for pc_id, pc_dict in uvfull.phase_center_catalog.items() } - uv3._update_phase_center_id(name_map1["target1"], 100) - uv3._update_phase_center_id(name_map1["target2"], 101) - uv3._update_phase_center_id(100, name_map2["target1"]) - uv3._update_phase_center_id(101, name_map2["target2"]) + uv3._update_phase_center_id(name_map1["target1"], new_id=100) + uv3._update_phase_center_id(name_map1["target2"], new_id=101) + uv3._update_phase_center_id(100, new_id=name_map2["target1"]) + uv3._update_phase_center_id(101, new_id=name_map2["target2"]) assert uv3 == uvfull @@ -11344,10 +11439,10 @@ def test_multi_phase_downselect(hera_uvh5_split, cat_type, future_shapes): # Give it a new name, and then rephase half of the "full" object if cat_type == "sidereal": - uv1.phase(3.6, -0.5, cat_name="target1") - uv2.phase(-0.5, 3.6, cat_name="target2") - uvfull.phase(-0.5, 3.6, cat_name="target2", select_mask=~half_mask) - uvfull.phase(3.6, -0.5, cat_name="target1", select_mask=half_mask) + uv1.phase(lon=3.6, lat=-0.5, cat_name="target1") + uv2.phase(lon=-0.5, lat=3.6, cat_name="target2") + uvfull.phase(lon=-0.5, lat=3.6, cat_name="target2", select_mask=~half_mask) + uvfull.phase(lon=3.6, lat=-0.5, cat_name="target1", select_mask=half_mask) elif cat_type == "ephem": from ssl import SSLError @@ -11359,27 +11454,35 @@ def test_multi_phase_downselect(hera_uvh5_split, cat_type, future_shapes): ra=0, dec=0, epoch="J2000", lookup_name="Jupiter", cat_name="Jupiter" ) uvfull.phase( - 0, 0, lookup_name="Jupiter", cat_name="Jupiter", select_mask=~half_mask + lon=0, + lat=0, + lookup_name="Jupiter", + cat_name="Jupiter", + select_mask=~half_mask, ) uvfull.phase( - 0, 0, lookup_name="Mars", cat_name="Mars", select_mask=half_mask + lon=0, lat=0, lookup_name="Mars", cat_name="Mars", select_mask=half_mask ) except (SSLError, RequestException) as err: pytest.skip("SSL/Connection error w/ JPL Horizons: " + str(err)) elif cat_type == "driftscan": - uv1.phase(3.6, -0.5, cat_type=cat_type, phase_frame=None, cat_name="drift1") - uv2.phase(-0.5, 3.6, cat_type=cat_type, phase_frame="altaz", cat_name="drift2") + uv1.phase( + lon=3.6, lat=-0.5, cat_type=cat_type, phase_frame=None, cat_name="drift1" + ) + uv2.phase( + lon=-0.5, lat=3.6, cat_type=cat_type, phase_frame="altaz", cat_name="drift2" + ) uvfull.phase( - -0.5, - 3.6, + lon=-0.5, + lat=3.6, cat_type=cat_type, cat_name="drift2", phase_frame=None, select_mask=~half_mask, ) uvfull.phase( - 3.6, - -0.5, + lon=3.6, + lat=-0.5, cat_type=cat_type, cat_name="drift1", phase_frame="altaz", @@ -11395,7 +11498,9 @@ def test_multi_phase_downselect(hera_uvh5_split, cat_type, future_shapes): # Select does not clear the catalog, so clear the unused source and # update the cat ID so that it matches with the indv datasets uvtemp._clear_unused_phase_centers() - uvtemp._update_phase_center_id(list(uvtemp.phase_center_catalog.keys())[0], 1) + uvtemp._update_phase_center_id( + list(uvtemp.phase_center_catalog.keys())[0], new_id=1 + ) assert uvtemp.history in uvdata.history uvtemp.history = uvdata.history assert uvtemp == uvdata @@ -11896,19 +12001,19 @@ def test_add_pol_sorting_bl(casa_uvfits, add_type, sort_type, future_shapes): ) if sort_type == "blt": - uv1.reorder_blts("time", "ant1") - uv2.reorder_blts("time", "ant2") - casa_uvfits.reorder_blts("bda") + uv1.reorder_blts(order="time", minor_order="ant1") + uv2.reorder_blts(order="time", minor_order="ant2") + casa_uvfits.reorder_blts(order="bda") order_check = uv1.ant_1_array == uv2.ant_1_array elif sort_type == "freq": uv1.reorder_freqs(channel_order="freq") uv2.reorder_freqs(channel_order="-freq") - casa_uvfits.reorder_freqs("freq") + casa_uvfits.reorder_freqs(spw_order="freq") order_check = uv1.freq_array == uv2.freq_array elif sort_type == "pol": - uv1.reorder_pols("AIPS") - uv2.reorder_pols("CASA") - casa_uvfits.reorder_pols("CASA") + uv1.reorder_pols(order="AIPS") + uv2.reorder_pols(order="CASA") + casa_uvfits.reorder_pols(order="CASA") order_check = uv1.polarization_array == uv2.polarization_array # Make sure that the order has actually been scrambled @@ -11918,11 +12023,11 @@ def test_add_pol_sorting_bl(casa_uvfits, add_type, sort_type, future_shapes): uv3 = uv1 + uv2 if sort_type == "blt": - uv3.reorder_blts("bda") + uv3.reorder_blts(order="bda") elif sort_type == "freq": uv3.reorder_freqs(channel_order="freq") elif sort_type == "pol": - uv3.reorder_pols("CASA") + uv3.reorder_pols(order="CASA") # Deal with the history separately, since it will be different assert str.startswith(uv3.history, casa_uvfits.history) @@ -12188,14 +12293,14 @@ def test_make_flex_pol_errs(sma_mir, err_msg, param, param_val): sma_copy = sma_mir.copy() with pytest.raises(ValueError, match=err_msg): - sma_mir._make_flex_pol(True, True) + sma_mir._make_flex_pol(raise_error=True, raise_warning=True) with uvtest.check_warnings(UserWarning, err_msg): - sma_mir._make_flex_pol(False, True) + sma_mir._make_flex_pol(raise_error=False, raise_warning=True) assert sma_copy == sma_mir with uvtest.check_warnings(None): - sma_mir._make_flex_pol(False, False) + sma_mir._make_flex_pol(raise_error=False, raise_warning=False) assert sma_copy == sma_mir diff --git a/pyuvdata/uvdata/tests/test_uvfits.py b/pyuvdata/uvdata/tests/test_uvfits.py index 37e574d5a9..7a76ce64b6 100644 --- a/pyuvdata/uvdata/tests/test_uvfits.py +++ b/pyuvdata/uvdata/tests/test_uvfits.py @@ -161,7 +161,7 @@ def test_time_precision(tmp_path): latitude, longitude, altitude = uvd2.telescope_location_lat_lon_alt_degrees unique_times, inverse_inds = np.unique(uvd2.time_array, return_inverse=True) unique_lst_array = uvutils.get_lst_for_time( - unique_times, latitude, longitude, altitude + unique_times, latitude=latitude, longitude=longitude, altitude=altitude ) calc_lst_array = unique_lst_array[inverse_inds] @@ -626,7 +626,9 @@ def test_uvw_coordinate_suffixes(casa_uvfits, tmp_path, uvw_suffix): ): uv2 = UVData.from_file(write_file2, use_future_array_shapes=True) uv2.uvw_array = uvutils._rotate_one_axis( - uv2.uvw_array[:, :, None], -1 * (uv2.phase_center_app_dec - np.pi / 2), 0 + xyz_array=uv2.uvw_array[:, :, None], + rot_amount=-1 * (uv2.phase_center_app_dec - np.pi / 2), + rot_axis=0, )[:, :, 0] else: uv2 = UVData.from_file(write_file2, use_future_array_shapes=True) @@ -1057,7 +1059,7 @@ def test_roundtrip_blt_order(casa_uvfits, order, tmp_path): uv_out = UVData() write_file = str(tmp_path / "blt_order_test.uvfits") - uv_in.reorder_blts(order) + uv_in.reorder_blts(order=order) uv_in.write_uvfits(write_file) uv_out.read(write_file, use_future_array_shapes=True) diff --git a/pyuvdata/uvdata/tests/test_uvh5.py b/pyuvdata/uvdata/tests/test_uvh5.py index da43f3453d..4e20164efe 100644 --- a/pyuvdata/uvdata/tests/test_uvh5.py +++ b/pyuvdata/uvdata/tests/test_uvh5.py @@ -53,8 +53,9 @@ def uv_partial_write(casa_uvfits, tmp_path): # convert a uvfits file to uvh5, cutting down the amount of data uv_uvfits = casa_uvfits uv_uvfits.select(antenna_nums=[3, 7, 24]) + lat, lon, alt = uv_uvfits.telescope_location_lat_lon_alt_degrees uv_uvfits.lst_array = uvutils.get_lst_for_time( - uv_uvfits.time_array, *uv_uvfits.telescope_location_lat_lon_alt_degrees + uv_uvfits.time_array, latitude=lat, longitude=lon, altitude=alt ) testfile = str(tmp_path / "outtest.uvh5") @@ -996,8 +997,13 @@ def test_uvh5_partial_write_antpairs(uv_partial_write, future_shapes, tmp_path): data = full_uvh5.get_data(key, squeeze="none") flags = full_uvh5.get_flags(key, squeeze="none") nsamples = full_uvh5.get_nsamples(key, squeeze="none") - partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples, bls=key) - + partial_uvh5.write_uvh5_part( + partial_testfile, + data_array=data, + flag_array=flags, + nsample_array=nsamples, + bls=key, + ) # now read in the full file and make sure that it matches the original partial_uvh5.read(partial_testfile, use_future_array_shapes=future_shapes) @@ -1014,7 +1020,12 @@ def test_uvh5_partial_write_antpairs(uv_partial_write, future_shapes, tmp_path): flags = full_uvh5.get_flags(key, squeeze="none") nsamples = full_uvh5.get_nsamples(key, squeeze="none") partial_uvh5.write_uvh5_part( - partial_testfile, data, flags, nsamples, bls=key, add_to_history="foo" + partial_testfile, + data_array=data, + flag_array=flags, + nsample_array=nsamples, + bls=key, + add_to_history="foo", ) partial_uvh5.read(partial_testfile, read_data=False) assert "foo" in partial_uvh5.history @@ -1060,7 +1071,11 @@ def test_uvh5_partial_write_frequencies(uv_partial_write, future_shapes, tmp_pat flags = full_uvh5.flag_array[:, :, freqs1, :] nsamples = full_uvh5.nsample_array[:, :, freqs1, :] partial_uvh5.write_uvh5_part( - partial_testfile, data, flags, nsamples, freq_chans=freqs1 + partial_testfile, + data_array=data, + flag_array=flags, + nsample_array=nsamples, + freq_chans=freqs1, ) if future_shapes: data = full_uvh5.data_array[:, freqs2, :] @@ -1071,7 +1086,11 @@ def test_uvh5_partial_write_frequencies(uv_partial_write, future_shapes, tmp_pat flags = full_uvh5.flag_array[:, :, freqs2, :] nsamples = full_uvh5.nsample_array[:, :, freqs2, :] partial_uvh5.write_uvh5_part( - partial_testfile, data, flags, nsamples, freq_chans=freqs2 + partial_testfile, + data_array=data, + flag_array=flags, + nsample_array=nsamples, + freq_chans=freqs2, ) # read in the full file and make sure it matches @@ -1120,13 +1139,21 @@ def test_uvh5_partial_write_blts(uv_partial_write, future_shapes, tmp_path): flags = full_uvh5.flag_array[blts1] nsamples = full_uvh5.nsample_array[blts1] partial_uvh5.write_uvh5_part( - partial_testfile, data, flags, nsamples, blt_inds=blts1 + partial_testfile, + data_array=data, + flag_array=flags, + nsample_array=nsamples, + blt_inds=blts1, ) data = full_uvh5.data_array[blts2] flags = full_uvh5.flag_array[blts2] nsamples = full_uvh5.nsample_array[blts2] partial_uvh5.write_uvh5_part( - partial_testfile, data, flags, nsamples, blt_inds=blts2 + partial_testfile, + data_array=data, + flag_array=flags, + nsample_array=nsamples, + blt_inds=blts2, ) # read in the full file and make sure it matches @@ -1181,9 +1208,9 @@ def test_uvh5_partial_write_pols(uv_partial_write, future_shapes, tmp_path): nsamples = full_uvh5.nsample_array[:, :, :, pols1] partial_uvh5.write_uvh5_part( partial_testfile, - data, - flags, - nsamples, + data_array=data, + flag_array=flags, + nsample_array=nsamples, polarizations=full_uvh5.polarization_array[:Hpols], ) if future_shapes: @@ -1196,9 +1223,9 @@ def test_uvh5_partial_write_pols(uv_partial_write, future_shapes, tmp_path): nsamples = full_uvh5.nsample_array[:, :, :, pols2] partial_uvh5.write_uvh5_part( partial_testfile, - data, - flags, - nsamples, + data_array=data, + flag_array=flags, + nsample_array=nsamples, polarizations=full_uvh5.polarization_array[Hpols:], ) @@ -1249,7 +1276,11 @@ def test_uvh5_partial_write_irregular_blt(uv_partial_write, tmp_path): flags = full_uvh5.flag_array[blt_inds] nsamples = full_uvh5.nsample_array[blt_inds] partial_uvh5.write_uvh5_part( - partial_testfile, data, flags, nsamples, blt_inds=blt_inds + partial_testfile, + data_array=data, + flag_array=flags, + nsample_array=nsamples, + blt_inds=blt_inds, ) # also write the arrays to the partial object @@ -1305,7 +1336,11 @@ def test_uvh5_partial_write_irregular_freq(uv_partial_write, tmp_path): flags = full_uvh5.flag_array[:, freq_inds, :] nsamples = full_uvh5.nsample_array[:, freq_inds, :] partial_uvh5.write_uvh5_part( - partial_testfile, data, flags, nsamples, freq_chans=freq_inds + partial_testfile, + data_array=data, + flag_array=flags, + nsample_array=nsamples, + freq_chans=freq_inds, ) # also write the arrays to the partial object @@ -1362,9 +1397,9 @@ def test_uvh5_partial_write_irregular_pol(uv_partial_write, tmp_path): nsamples = full_uvh5.nsample_array[:, :, pol_inds] partial_uvh5.write_uvh5_part( partial_testfile, - data, - flags, - nsamples, + data_array=data, + flag_array=flags, + nsample_array=nsamples, polarizations=partial_uvh5.polarization_array[pol_inds], ) @@ -1447,9 +1482,9 @@ def test_uvh5_partial_write_irregular_multi1(uv_partial_write, future_shapes, tm ): partial_uvh5.write_uvh5_part( partial_testfile, - data, - flags, - nsamples, + data_array=data, + flag_array=flags, + nsample_array=nsamples, blt_inds=blt_inds, freq_chans=freq_inds, ) @@ -1552,9 +1587,9 @@ def test_uvh5_partial_write_irregular_multi2(uv_partial_write, future_shapes, tm ): partial_uvh5.write_uvh5_part( partial_testfile, - data, - flags, - nsamples, + data_array=data, + flag_array=flags, + nsample_array=nsamples, freq_chans=freq_inds, polarizations=full_uvh5.polarization_array[pol_inds], ) @@ -1653,9 +1688,9 @@ def test_uvh5_partial_write_irregular_multi3(uv_partial_write, future_shapes, tm ): partial_uvh5.write_uvh5_part( partial_testfile, - data, - flags, - nsamples, + data_array=data, + flag_array=flags, + nsample_array=nsamples, blt_inds=blt_inds, polarizations=full_uvh5.polarization_array[pol_inds], ) @@ -1765,9 +1800,9 @@ def test_uvh5_partial_write_irregular_multi4(uv_partial_write, future_shapes, tm ): partial_uvh5.write_uvh5_part( partial_testfile, - data, - flags, - nsamples, + data_array=data, + flag_array=flags, + nsample_array=nsamples, blt_inds=blt_inds, freq_chans=freq_inds, polarizations=full_uvh5.polarization_array[pol_inds], @@ -1843,7 +1878,13 @@ def test_uvh5_partial_write_errors(uv_partial_write, tmp_path): with pytest.raises( AssertionError, match=re.escape(f"{partial_testfile} does not exist") ): - partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples, bls=key) + partial_uvh5.write_uvh5_part( + partial_testfile, + data_array=data, + flag_array=flags, + nsample_array=nsamples, + bls=key, + ) # initialize file on disk partial_uvh5.initialize_uvh5_file(partial_testfile, clobber=True) @@ -1853,20 +1894,31 @@ def test_uvh5_partial_write_errors(uv_partial_write, tmp_path): AssertionError, match="data_array and flag_array must have the same shape" ): partial_uvh5.write_uvh5_part( - partial_testfile, data, flags[:, :, 0], nsamples, bls=key + partial_testfile, + data_array=data, + flag_array=flags[:, :, 0], + nsample_array=nsamples, + bls=key, ) with pytest.raises( AssertionError, match="data_array and nsample_array must have the same shape" ): partial_uvh5.write_uvh5_part( - partial_testfile, data, flags, nsamples[:, :, 0], bls=key + partial_testfile, + data_array=data, + flag_array=flags, + nsample_array=nsamples[:, :, 0], + bls=key, ) # pass in arrays that are the same size, but don't match expected shape with pytest.raises(AssertionError, match="data_array has shape"): partial_uvh5.write_uvh5_part( - partial_testfile, data[:, :, 0], flags[:, :, 0], nsamples[:, :, 0] + partial_testfile, + data_array=data[:, :, 0], + flag_array=flags[:, :, 0], + nsample_array=nsamples[:, :, 0], ) # initialize a file on disk, and pass in a different object so check_header fails @@ -1877,7 +1929,13 @@ def test_uvh5_partial_write_errors(uv_partial_write, tmp_path): AssertionError, match="The object metadata in memory and metadata on disk are different", ): - small_uvd.write_uvh5_part(partial_testfile, data, flags, nsamples, bls=key) + small_uvd.write_uvh5_part( + partial_testfile, + data_array=data, + flag_array=flags, + nsample_array=nsamples, + bls=key, + ) # clean up os.remove(partial_testfile) @@ -2386,7 +2444,13 @@ def test_uvh5_partial_write_ints_antpairs(uv_uvh5, tmp_path): data = full_uvh5.get_data(key, squeeze="none") flags = full_uvh5.get_flags(key, squeeze="none") nsamples = full_uvh5.get_nsamples(key, squeeze="none") - partial_uvh5.write_uvh5_part(partial_testfile, data, flags, nsamples, bls=key) + partial_uvh5.write_uvh5_part( + partial_testfile, + data_array=data, + flag_array=flags, + nsample_array=nsamples, + bls=key, + ) # now read in the full file and make sure that it matches the original partial_uvh5.read(partial_testfile, use_future_array_shapes=True) @@ -2431,13 +2495,21 @@ def test_uvh5_partial_write_ints_frequencies(uv_uvh5, tmp_path): flags = full_uvh5.flag_array[:, freqs1, :] nsamples = full_uvh5.nsample_array[:, freqs1, :] partial_uvh5.write_uvh5_part( - partial_testfile, data, flags, nsamples, freq_chans=freqs1 + partial_testfile, + data_array=data, + flag_array=flags, + nsample_array=nsamples, + freq_chans=freqs1, ) data = full_uvh5.data_array[:, freqs2, :] flags = full_uvh5.flag_array[:, freqs2, :] nsamples = full_uvh5.nsample_array[:, freqs2, :] partial_uvh5.write_uvh5_part( - partial_testfile, data, flags, nsamples, freq_chans=freqs2 + partial_testfile, + data_array=data, + flag_array=flags, + nsample_array=nsamples, + freq_chans=freqs2, ) # read in the full file and make sure it matches @@ -2483,13 +2555,21 @@ def test_uvh5_partial_write_ints_blts(uv_uvh5, tmp_path): flags = full_uvh5.flag_array[blts1] nsamples = full_uvh5.nsample_array[blts1] partial_uvh5.write_uvh5_part( - partial_testfile, data, flags, nsamples, blt_inds=blts1 + partial_testfile, + data_array=data, + flag_array=flags, + nsample_array=nsamples, + blt_inds=blts1, ) data = full_uvh5.data_array[blts2] flags = full_uvh5.flag_array[blts2] nsamples = full_uvh5.nsample_array[blts2] partial_uvh5.write_uvh5_part( - partial_testfile, data, flags, nsamples, blt_inds=blts2 + partial_testfile, + data_array=data, + flag_array=flags, + nsample_array=nsamples, + blt_inds=blts2, ) # read in the full file and make sure it matches @@ -2536,9 +2616,9 @@ def test_uvh5_partial_write_ints_pols(uv_uvh5, tmp_path): nsamples = full_uvh5.nsample_array[:, :, pols1] partial_uvh5.write_uvh5_part( partial_testfile, - data, - flags, - nsamples, + data_array=data, + flag_array=flags, + nsample_array=nsamples, polarizations=full_uvh5.polarization_array[:Hpols], ) data = full_uvh5.data_array[:, :, pols2] @@ -2546,9 +2626,9 @@ def test_uvh5_partial_write_ints_pols(uv_uvh5, tmp_path): nsamples = full_uvh5.nsample_array[:, :, pols2] partial_uvh5.write_uvh5_part( partial_testfile, - data, - flags, - nsamples, + data_array=data, + flag_array=flags, + nsample_array=nsamples, polarizations=full_uvh5.polarization_array[Hpols:], ) @@ -2701,7 +2781,11 @@ def test_uvh5_partial_write_ints_irregular_blt(uv_uvh5, tmp_path): flags = full_uvh5.flag_array[blt_inds] nsamples = full_uvh5.nsample_array[blt_inds] partial_uvh5.write_uvh5_part( - partial_testfile, data, flags, nsamples, blt_inds=blt_inds + partial_testfile, + data_array=data, + flag_array=flags, + nsample_array=nsamples, + blt_inds=blt_inds, ) # also write the arrays to the partial object @@ -2754,7 +2838,11 @@ def test_uvh5_partial_write_ints_irregular_freq(uv_uvh5, tmp_path): flags = full_uvh5.flag_array[:, freq_inds, :] nsamples = full_uvh5.nsample_array[:, freq_inds, :] partial_uvh5.write_uvh5_part( - partial_testfile, data, flags, nsamples, freq_chans=freq_inds + partial_testfile, + data_array=data, + flag_array=flags, + nsample_array=nsamples, + freq_chans=freq_inds, ) # also write the arrays to the partial object @@ -2808,9 +2896,9 @@ def test_uvh5_partial_write_ints_irregular_pol(uv_uvh5, tmp_path): nsamples = full_uvh5.nsample_array[:, :, pol_inds] partial_uvh5.write_uvh5_part( partial_testfile, - data, - flags, - nsamples, + data_array=data, + flag_array=flags, + nsample_array=nsamples, polarizations=partial_uvh5.polarization_array[pol_inds], ) @@ -2892,9 +2980,9 @@ def test_uvh5_partial_write_ints_irregular_multi1(uv_uvh5, future_shapes, tmp_pa ): partial_uvh5.write_uvh5_part( partial_testfile, - data, - flags, - nsamples, + data_array=data, + flag_array=flags, + nsample_array=nsamples, blt_inds=blt_inds, freq_chans=freq_inds, ) @@ -2996,9 +3084,9 @@ def test_uvh5_partial_write_ints_irregular_multi2(uv_uvh5, future_shapes, tmp_pa ): partial_uvh5.write_uvh5_part( partial_testfile, - data, - flags, - nsamples, + data_array=data, + flag_array=flags, + nsample_array=nsamples, freq_chans=freq_inds, polarizations=full_uvh5.polarization_array[pol_inds], ) @@ -3095,9 +3183,9 @@ def test_uvh5_partial_write_ints_irregular_multi3(uv_uvh5, future_shapes, tmp_pa ): partial_uvh5.write_uvh5_part( partial_testfile, - data, - flags, - nsamples, + data_array=data, + flag_array=flags, + nsample_array=nsamples, blt_inds=blt_inds, polarizations=full_uvh5.polarization_array[pol_inds], ) @@ -3208,9 +3296,9 @@ def test_uvh5_partial_write_ints_irregular_multi4(uv_uvh5, future_shapes, tmp_pa ): partial_uvh5.write_uvh5_part( partial_testfile, - data, - flags, - nsamples, + data_array=data, + flag_array=flags, + nsample_array=nsamples, blt_inds=blt_inds, freq_chans=freq_inds, polarizations=full_uvh5.polarization_array[pol_inds], @@ -3472,9 +3560,9 @@ def test_write_uvh5_part_fix_autos(uv_uvh5, tmp_path): initialize_with_zeros_ints(uv_uvh5, testfile) uv_uvh5.write_uvh5_part( testfile, - uv_uvh5.data_array, - uv_uvh5.flag_array, - uv_uvh5.nsample_array, + data_array=uv_uvh5.data_array, + flag_array=uv_uvh5.flag_array, + nsample_array=uv_uvh5.nsample_array, fix_autos=True, ) diff --git a/pyuvdata/uvdata/uvdata.py b/pyuvdata/uvdata/uvdata.py index fccf5847cf..fdbc5ccf85 100644 --- a/pyuvdata/uvdata/uvdata.py +++ b/pyuvdata/uvdata/uvdata.py @@ -1612,7 +1612,7 @@ def split_phase_center( warnings.warn( "All data for the source selected - updating the cat_id instead." ) - self._update_phase_center_id(cat_id, new_id) + self._update_phase_center_id(cat_id, new_id=new_id) if new_name is not None: self.rename_phase_center(new_id, new_name) else: @@ -2455,7 +2455,7 @@ def _calc_single_integration_time(self): # seconds, so we need to convert. return np.diff(np.sort(list(set(self.time_array))))[0] * 86400 - def _set_lsts_helper(self, astrometry_library=None): + def _set_lsts_helper(self, *, astrometry_library=None): latitude, longitude, altitude = self.telescope_location_lat_lon_alt_degrees # the utility function is efficient -- it only calculates unique times self.lst_array = uvutils.get_lst_for_time( @@ -4526,7 +4526,7 @@ def set_data(self, data, key1, key2=None, key3=None): """ dshape = data.shape inds = self._set_method_helper(dshape, key1, key2, key3) - uvutils._index_dset(self.data_array, inds, data) + uvutils._index_dset(self.data_array, inds, input_array=data) return @@ -4571,7 +4571,7 @@ def set_flags(self, flags, key1, key2=None, key3=None): """ dshape = flags.shape inds = self._set_method_helper(dshape, key1, key2, key3) - uvutils._index_dset(self.flag_array, inds, flags) + uvutils._index_dset(self.flag_array, inds, input_array=flags) return @@ -4618,7 +4618,7 @@ def set_nsamples(self, nsamples, key1, key2=None, key3=None): """ dshape = nsamples.shape inds = self._set_method_helper(dshape, key1, key2, key3) - uvutils._index_dset(self.nsample_array, inds, nsamples) + uvutils._index_dset(self.nsample_array, inds, input_array=nsamples) return @@ -5701,6 +5701,7 @@ def _phase_dict_helper( def phase( self, + *, lon=None, lat=None, epoch="J2000", @@ -5879,8 +5880,8 @@ def phase( # We got the meta-data, now handle calculating the apparent coordinates. # First, check if we need to look up the phase center in question new_app_ra, new_app_dec = uvutils.calc_app_coords( - phase_dict["cat_lon"], - phase_dict["cat_lat"], + lon_coord=phase_dict["cat_lon"], + lat_coord=phase_dict["cat_lat"], coord_frame=phase_dict["cat_frame"], coord_epoch=phase_dict["cat_epoch"], coord_times=phase_dict["cat_times"], @@ -5930,8 +5931,8 @@ def phase( # With all operations complete, we now start manipulating the UVData object cat_id = self._add_phase_center( - phase_dict["cat_name"], - phase_dict["cat_type"], + cat_name=phase_dict["cat_name"], + cat_type=phase_dict["cat_type"], cat_lon=phase_dict["cat_lon"], cat_lat=phase_dict["cat_lat"], cat_frame=phase_dict["cat_frame"], @@ -6032,8 +6033,8 @@ def phase_to_time( zenith_dec = obs_zenith_coord.dec.rad self.phase( - zenith_ra, - zenith_dec, + lon=zenith_ra, + lat=zenith_dec, epoch="J2000", phase_frame=phase_frame, use_ant_pos=use_ant_pos, @@ -6218,10 +6219,13 @@ def fix_phase(self, *, use_ant_pos=True): ) itrs_uvw_coord = frame_uvw_coord.transform_to("itrs") - + lat, lon, alt = itrs_lat_lon_alt # now convert them to ENU, which is the space uvws are in self.uvw_array[inds, :] = uvutils.ENU_from_ECEF( - itrs_uvw_coord.cartesian.get_xyz().value.T, *itrs_lat_lon_alt + itrs_uvw_coord.cartesian.get_xyz().value.T, + latitude=lat, + longitude=lon, + altitude=alt, ) # remove/add phase center @@ -6244,8 +6248,8 @@ def fix_phase(self, *, use_ant_pos=True): # And rephase the data using the new algorithm self.phase( - phase_dict["cat_lon"], - phase_dict["cat_lat"], + lon=phase_dict["cat_lon"], + lat=phase_dict["cat_lat"], phase_frame=phase_dict["cat_frame"], epoch=phase_dict["cat_epoch"], cat_name=phase_dict["cat_name"], @@ -6676,7 +6680,7 @@ def __add__( this_blts_ind[other_argsort] ] - this.reorder_blts(temp_ind) + this.reorder_blts(order=temp_ind) if len(this_freq_ind) != 0: this_argsort = np.argsort(this_freq_ind) @@ -6698,7 +6702,7 @@ def __add__( this_pol_ind[other_argsort] ] - this.reorder_pols(temp_ind) + this.reorder_pols(order=temp_ind) # Pad out self to accommodate new data blt_order = None @@ -8403,6 +8407,7 @@ def _select_preprocess( def _select_by_index( self, + *, blt_inds, freq_inds, pol_inds, @@ -8507,6 +8512,7 @@ def _select_by_index( def select( self, + *, antenna_nums=None, antenna_names=None, ant_str=None, @@ -8673,7 +8679,11 @@ def select( # Call the low-level selection method. uv_obj._select_by_index( - blt_inds, freq_inds, pol_inds, history_update_string, keep_all_metadata + blt_inds=blt_inds, + freq_inds=freq_inds, + pol_inds=pol_inds, + history_update_string=history_update_string, + keep_all_metadata=keep_all_metadata, ) # Update the rectangularity attributes @@ -8767,6 +8777,7 @@ def _harmonize_resample_arrays( def upsample_in_time( self, max_int_time, + *, blt_order="time", minor_order="baseline", summing_correlator_mode=False, @@ -9000,8 +9011,8 @@ def upsample_in_time( else: select_mask = None self.phase( - cat_dict["cat_lon"], - cat_dict["cat_lat"], + lon=cat_dict["cat_lon"], + lat=cat_dict["cat_lat"], cat_name=cat_dict["cat_name"], cat_type=cat_dict["cat_type"], phase_frame=cat_dict["cat_frame"], @@ -9026,6 +9037,7 @@ def upsample_in_time( def downsample_in_time( self, + *, min_int_time=None, n_times_to_avg=None, blt_order="time", @@ -9202,7 +9214,7 @@ def downsample_in_time( if len(np.unique(int_times)) == 1: # this baseline has all the same integration times if len(np.unique(dtime)) > 1 and not uvutils._test_array_constant( - dtime, self._integration_time.tols + dtime, tols=self._integration_time.tols ): warnings.warn( "There is a gap in the times of baseline {bl}. " @@ -9498,8 +9510,8 @@ def downsample_in_time( else: select_mask = None self.phase( - cat_dict["cat_lon"], - cat_dict["cat_lat"], + lon=cat_dict["cat_lon"], + lat=cat_dict["cat_lat"], cat_name=cat_dict["cat_name"], cat_type=cat_dict["cat_type"], phase_frame=cat_dict["cat_frame"], @@ -9609,7 +9621,7 @@ def resample_in_time( if downsample: self.downsample_in_time( - target_time, + min_int_time=target_time, blt_order=blt_order, minor_order=minor_order, keep_ragged=keep_ragged, @@ -10102,7 +10114,7 @@ def compress_by_redundancy( raise ValueError(f"method must be one of {allowed_methods}") red_gps, centers, lengths, conjugates = self.get_redundancies( - tol, include_conjugates=True + tol=tol, include_conjugates=True ) bl_ants = [self.baseline_to_antnums(gp[0]) for gp in red_gps] @@ -10172,9 +10184,9 @@ def compress_by_redundancy( # so we can average over them. time_inds = np.arange(len(group_times + conj_group_times)) time_gps = uvutils.find_clusters( - time_inds, - np.array(group_times + conj_group_times), - self._time_array.tols[1], + location_ids=time_inds, + location_vectors=np.array(group_times + conj_group_times), + tol=self._time_array.tols[1], ) # average over the same times @@ -13024,7 +13036,7 @@ def write_uvh5_part( filename, *, data_array, - flags_array, + flag_array, nsample_array, check_header=True, antenna_nums=None, @@ -13153,9 +13165,9 @@ def write_uvh5_part( uvh5_obj = self._convert_to_filetype("uvh5") uvh5_obj.write_uvh5_part( filename, - data_array, - flags_array, - nsample_array, + data_array=data_array, + flag_array=flag_array, + nsample_array=nsample_array, check_header=check_header, antenna_nums=antenna_nums, antenna_names=antenna_names, diff --git a/pyuvdata/uvdata/uvfits.py b/pyuvdata/uvdata/uvfits.py index b04bb5038d..50bf2006f7 100644 --- a/pyuvdata/uvdata/uvfits.py +++ b/pyuvdata/uvdata/uvfits.py @@ -31,6 +31,7 @@ class UVFITS(UVData): def _get_parameter_data( self, vis_hdu, + *, read_source, run_check_acceptability, background_lsts=True, @@ -197,6 +198,7 @@ def _get_parameter_data( def _get_data( self, + *, vis_hdu, antenna_nums, antenna_names, @@ -271,7 +273,11 @@ def _get_data( # do select operations on everything except data_array, flag_array # and nsample_array self._select_by_index( - blt_inds, freq_inds, pol_inds, history_update_string, keep_all_metadata + blt_inds=blt_inds, + freq_inds=freq_inds, + pol_inds=pol_inds, + history_update_string=history_update_string, + keep_all_metadata=keep_all_metadata, ) # just read in the right portions of the data and flag arrays @@ -348,6 +354,7 @@ def _get_data( def read_uvfits( self, filename, + *, antenna_nums=None, antenna_names=None, ant_str=None, @@ -676,8 +683,8 @@ def read_uvfits( # Now read in the random parameter info self._get_parameter_data( vis_hdu, - read_source, - run_check_acceptability, + read_source=read_source, + run_check_acceptability=run_check_acceptability, background_lsts=background_lsts, astrometry_library=astrometry_library, ) @@ -739,30 +746,32 @@ def read_uvfits( # Must be done here because it requires the phase_center_app_dec if "UU---NCP" in vis_hdu.data.parnames: self.uvw_array = uvutils._rotate_one_axis( - self.uvw_array[:, :, None], self.phase_center_app_dec - np.pi / 2, 0 + xyz_array=self.uvw_array[:, :, None], + rot_amount=self.phase_center_app_dec - np.pi / 2, + rot_axis=0, )[:, :, 0] if read_data: # Now read in the data self._get_data( - vis_hdu, - antenna_nums, - antenna_names, - ant_str, - bls, - frequencies, - freq_chans, - times, - time_range, - lsts, - lst_range, - polarizations, - blt_inds, - phase_center_ids, - catalog_names, - keep_all_metadata, - fix_old_proj, - fix_use_ant_pos, + vis_hdu=vis_hdu, + antenna_nums=antenna_nums, + antenna_names=antenna_names, + ant_str=ant_str, + bls=bls, + frequencies=frequencies, + freq_chans=freq_chans, + times=times, + time_range=time_range, + lsts=lsts, + lst_range=lst_range, + polarizations=polarizations, + blt_inds=blt_inds, + phase_center_ids=phase_center_ids, + catalog_names=catalog_names, + keep_all_metadata=keep_all_metadata, + fix_old_proj=fix_old_proj, + fix_use_ant_pos=fix_use_ant_pos, ) if use_future_array_shapes: self.use_future_array_shapes() @@ -783,6 +792,7 @@ def read_uvfits( def write_uvfits( self, filename, + *, write_lst=True, force_phase=False, run_check=True, @@ -1435,10 +1445,10 @@ def write_uvfits( # objects to share the same frame. So we want to make sure that # everything lines up with the coordinate frame listed. new_ra, new_dec = uvutils.transform_sidereal_coords( - phase_dict["cat_lon"], - phase_dict["cat_lat"], - phase_dict["cat_frame"], - hdu.header["RADESYS"], + longitude=phase_dict["cat_lon"], + latitude=phase_dict["cat_lat"], + in_coord_frame=phase_dict["cat_frame"], + out_coord_frame=hdu.header["RADESYS"], in_coord_epoch=phase_dict.get("cat_epoch"), out_coord_epoch=phase_dict.get("cat_epoch"), time_array=np.mean(self.time_array), diff --git a/pyuvdata/uvdata/uvh5.py b/pyuvdata/uvdata/uvh5.py index 20bc630191..59958dd011 100644 --- a/pyuvdata/uvdata/uvh5.py +++ b/pyuvdata/uvdata/uvh5.py @@ -303,6 +303,7 @@ class FastUVH5Meta: def __init__( self, path: str | Path | h5py.File | h5py.Group, + *, blt_order: Literal["determine"] | tuple[str] | None = None, blts_are_rectangular: bool | None = None, time_axis_faster_than_bls: bool | None = None, @@ -787,9 +788,12 @@ def pols(self) -> list[str]: @cached_property def antpos_enu(self) -> np.ndarray: """The antenna positions in ENU coordinates, in meters.""" + lat, lon, alt = self.telescope_location_lat_lon_alt return uvutils.ENU_from_ECEF( self.antenna_positions + self.telescope_location, - *self.telescope_location_lat_lon_alt, + latitude=lat, + longitude=lon, + altitude=alt, frame="itrs", ) @@ -844,7 +848,7 @@ def vis_units(self) -> str: return vis_units def to_uvdata( - self, check_lsts: bool = False, astrometry_library: str | None = None + self, *, check_lsts: bool = False, astrometry_library: str | None = None ) -> UVData: """Convert the file to a UVData object. @@ -886,6 +890,7 @@ class UVH5(UVData): def _read_header_with_fast_meta( self, filename: str | Path | FastUVH5Meta, + *, run_check_acceptability: bool = True, blt_order: tuple[str] | None | Literal["determine"] = None, blts_are_rectangular: bool | None = None, @@ -1092,6 +1097,7 @@ def _read_header( def _get_data( self, dgrp, + *, antenna_nums, antenna_names, ant_str, @@ -1219,7 +1225,11 @@ def _get_data( # do select operations on everything except data_array, flag_array # and nsample_array self._select_by_index( - blt_inds, freq_inds, pol_inds, history_update_string, keep_all_metadata + blt_inds=blt_inds, + freq_inds=freq_inds, + pol_inds=pol_inds, + history_update_string=history_update_string, + keep_all_metadata=keep_all_metadata, ) # determine which axes can be sliced, rather than fancy indexed @@ -1409,6 +1419,7 @@ def _get_data( def read_uvh5( self, filename, + *, antenna_nums=None, antenna_names=None, ant_str=None, @@ -1476,23 +1487,23 @@ def read_uvh5( # Now read in the data self._get_data( meta.datagrp, - antenna_nums, - antenna_names, - ant_str, - bls, - frequencies, - freq_chans, - times, - time_range, - lsts, - lst_range, - polarizations, - blt_inds, - phase_center_ids, - catalog_names, - data_array_dtype, - keep_all_metadata, - multidim_index, + antenna_nums=antenna_nums, + antenna_names=antenna_names, + ant_str=ant_str, + bls=bls, + frequencies=frequencies, + freq_chans=freq_chans, + times=times, + time_range=time_range, + lsts=lsts, + lst_range=lst_range, + polarizations=polarizations, + blt_inds=blt_inds, + phase_center_ids=phase_center_ids, + catalog_names=catalog_names, + data_array_dtype=data_array_dtype, + keep_all_metadata=keep_all_metadata, + multidim_index=multidim_index, ) if close_meta: meta.close() @@ -1682,6 +1693,7 @@ def _write_header(self, header): def write_uvh5( self, filename, + *, clobber=False, chunks=True, data_compression=None, @@ -1858,6 +1870,7 @@ def write_uvh5( def initialize_uvh5_file( self, filename, + *, clobber=False, chunks=True, data_compression=None, @@ -1995,7 +2008,7 @@ def initialize_uvh5_file( return def _check_header( - self, filename, run_check_acceptability=True, background_lsts=True + self, filename, *, run_check_acceptability=True, background_lsts=True ): """ Check that the metadata in a file header matches the object's metadata. @@ -2074,6 +2087,7 @@ def _check_header( def write_uvh5_part( self, filename, + *, data_array, flag_array, nsample_array, @@ -2227,20 +2241,20 @@ def write_uvh5_part( # figure out which "full file" indices to write data to blt_inds, freq_inds, pol_inds, _ = self._select_preprocess( - antenna_nums, - antenna_names, - ant_str, - bls, - frequencies, - freq_chans, - times, - time_range, - lsts, - lst_range, - polarizations, - blt_inds, - phase_center_ids, - catalog_names, + antenna_nums=antenna_nums, + antenna_names=antenna_names, + ant_str=ant_str, + bls=bls, + frequencies=frequencies, + freq_chans=freq_chans, + times=times, + time_range=time_range, + lsts=lsts, + lst_range=lst_range, + polarizations=polarizations, + blt_inds=blt_inds, + phase_center_ids=phase_center_ids, + catalog_names=catalog_names, ) # make sure that the dimensions of the data to write are correct diff --git a/pyuvdata/uvflag/tests/test_uvflag.py b/pyuvdata/uvflag/tests/test_uvflag.py index 47b454406c..cc1410f51f 100644 --- a/pyuvdata/uvflag/tests/test_uvflag.py +++ b/pyuvdata/uvflag/tests/test_uvflag.py @@ -3368,7 +3368,7 @@ def test_flags2waterfall_errors(uvdata_obj): uv = uvdata_obj # Flag array must have same shape as uv.flag_array with pytest.raises(ValueError) as cm: - flags2waterfall(uv, np.array([4, 5])) + flags2waterfall(uv, flag_array=np.array([4, 5])) assert str(cm.value).startswith("Flag array must align with UVData or UVCal") diff --git a/pyuvdata/uvflag/uvflag.py b/pyuvdata/uvflag/uvflag.py index c2007a1d48..a76e5fdcbe 100644 --- a/pyuvdata/uvflag/uvflag.py +++ b/pyuvdata/uvflag/uvflag.py @@ -645,7 +645,7 @@ def __init__( # Given a path, read indata self.read( indata, - history, + history=history, telescope_name=telescope_name, mwa_metafits_file=mwa_metafits_file, use_future_array_shapes=use_future_array_shapes, @@ -1507,7 +1507,7 @@ def to_waterfall( warnings.warn("This object is already a waterfall. Nothing to change.") return if (not keep_pol) and (len(self.polarization_array) > 1): - self.collapse_pol(method) + self.collapse_pol(method=method) if self.mode == "flag": darr = self.flag_array From c78b1d614be033536498410de3e4e9f7904ac95c Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Mon, 28 Aug 2023 15:43:35 -0700 Subject: [PATCH 06/12] update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b29331405f..834c05ca41 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,8 @@ All notable changes to this project will be documented in this file. time for each time range or the time_array (if there's a time_array and no time_range). ### Changed +- Require keyword arguments rather than allowing for passing arguments by position for +functions and methods with many parameters. - Only one of `time_array` and `time_range` (and similarly `lst_array` and `lst_range`) can be set on a UVCal object. - If `time_range` is set it must be 2D with a shape of (Ntimes, 2) where the first axis From 1eaf8f0f3fadd98eef5312b6fa5e60e773b8a290 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Mon, 28 Aug 2023 15:43:42 -0700 Subject: [PATCH 07/12] fix tutorials --- docs/uvcal_tutorial.rst | 8 ++++---- docs/uvdata_tutorial.rst | 29 +++++++++++++++++++++-------- pyuvdata/uvcal/tests/test_uvcal.py | 6 +++--- pyuvdata/uvcal/uvcal.py | 6 +++--- 4 files changed, 31 insertions(+), 18 deletions(-) diff --git a/docs/uvcal_tutorial.rst b/docs/uvcal_tutorial.rst index b404e17038..f43c0e1db8 100644 --- a/docs/uvcal_tutorial.rst +++ b/docs/uvcal_tutorial.rst @@ -190,7 +190,7 @@ data-like arrays as well, filled with zeros. >>> from pyuvdata.data import DATA_PATH >>> uvd_file = os.path.join(DATA_PATH, "zen.2458098.45361.HH.uvh5_downselected") >>> uvd = UVData.from_file(uvd_file, file_type="uvh5", use_future_array_shapes=True) - >>> uvc = UVCal.initialize_from_uvdata(uvd, "multiply", "redundant") + >>> uvc = UVCal.initialize_from_uvdata(uvd, gain_convention="multiply", cal_style="redundant") >>> print(uvc.ant_array) [ 0 1 11 12 13 23 24 25] @@ -214,7 +214,7 @@ a) Data for a single antenna and instrumental polarization >>> from pyuvdata.data import DATA_PATH >>> filename = os.path.join(DATA_PATH, 'zen.2457555.42443.HH.uvcA.omni.calfits') >>> uvc = UVCal.from_file(filename, use_future_array_shapes=True) - >>> gain = uvc.get_gains(9, 'Jxx') # gain for ant=9, pol='Jxx' + >>> gain = uvc.get_gains(9, jpol='Jxx') # gain for ant=9, pol='Jxx' >>> # One can equivalently make any of these calls with the input wrapped in a tuple. >>> gain = uvc.get_gains((9, 'Jxx')) @@ -587,7 +587,7 @@ specified by passing an index array. True >>> # Prepend a ``-`` to the sort string to sort in descending order. - >>> cal.reorder_antennas('-number') + >>> cal.reorder_antennas(order='-number') >>> print(np.min(np.diff(cal.ant_array)) <= 0) True @@ -671,7 +671,7 @@ array for the time axis. True >>> # Prepend a ``-`` to the sort string to sort in descending order. - >>> cal.reorder_times('-time') + >>> cal.reorder_times(order='-time') >>> print(np.min(np.diff(cal.time_array)) <= 0) True diff --git a/docs/uvdata_tutorial.rst b/docs/uvdata_tutorial.rst index 644a534dce..1b977a9498 100644 --- a/docs/uvdata_tutorial.rst +++ b/docs/uvdata_tutorial.rst @@ -601,7 +601,7 @@ Phasing/unphasing data >>> # center, though it does not need to be unique. We are specifying that the type >>> # here is "sidereal", which means that the position is represented by a fixed set >>> # of coordinates in a sidereal coordinate frame (e.g., ICRS, FK5, etc). - >>> uvd.phase(5.23368, 0.710940, epoch="J2000", cat_name='target1', cat_type="sidereal") + >>> uvd.phase(lon=5.23368, lat=0.710940, epoch="J2000", cat_name='target1', cat_type="sidereal") >>> uvd.print_phase_center_info() ID Cat Entry Type Az/Lon/RA El/Lat/Dec Frame Epoch # Name hours deg @@ -623,7 +623,7 @@ Phasing/unphasing data >>> # You can also now phase to "ephem" objects, which >>> # move with time, e.g. solar system bodies. The phase method has a `lookup_name` >>> # option which, if set to true, will allow you to search JPL-Horizons for coords - >>> uvd.phase(0, 0, epoch="J2000", cat_name="Sun", lookup_name=True) + >>> uvd.phase(lon=0, lat=0, epoch="J2000", cat_name="Sun", lookup_name=True) >>> uvd.print_phase_center_info() ID Cat Entry Type Az/Lon/RA El/Lat/Dec Frame Epoch Ephem Range Dist V_rad # Name hours deg Start-MJD End-MJD pc km/s @@ -640,7 +640,7 @@ Phasing/unphasing data >>> # used to be designated with phase_type="drift" -- in that it is still phased and >>> # can be to any azimuth and elevation, not just zenith). Note that we need to >>> # supply `phase_frame` as "altaz", since driftscans are always in that frame. - >>> uvd.phase(0, pi/2, cat_name="zenith", phase_frame='altaz', cat_type="driftscan", select_mask=select_mask) + >>> uvd.phase(lon=0, lat=pi/2, cat_name="zenith", phase_frame='altaz', cat_type="driftscan", select_mask=select_mask) >>> # Now when using `print_phase_center_info`, we'll see that there are multiple >>> # phase centers present in the data @@ -857,7 +857,8 @@ a) Getting antenna positions in topocentric frame in units of meters >>> antpos = uvd.antenna_positions + uvd.telescope_location >>> # convert to topocentric (East, North, Up or ENU) coords. - >>> antpos = utils.ENU_from_ECEF(antpos, *uvd.telescope_location_lat_lon_alt) + >>> lat, lon, alt = uvd.telescope_location_lat_lon_alt + >>> antpos = utils.ENU_from_ECEF(antpos, latitude=lat, longitude=lon, altitude=alt) UVData: Selecting data ---------------------- @@ -1523,13 +1524,25 @@ are written to the appropriate parts of the file on disk. >>> data_array = 0.5 * uvd2.data_array >>> flag_array = uvd2.flag_array >>> nsample_array = uvd2.nsample_array - >>> uvd.write_uvh5_part(partfile, data_array, flag_array, nsample_array, freq_chans=freq_inds1) + >>> uvd.write_uvh5_part( + ... partfile, + ... data_array=data_array, + ... flag_array=flag_array, + ... nsample_array=nsample_array, + ... freq_chans=freq_inds1 + ... ) >>> uvd2 = UVData.from_file(filename, freq_chans=freq_inds2, use_future_array_shapes=True) >>> data_array = 2.0 * uvd2.data_array >>> flag_array = uvd2.flag_array >>> nsample_array = uvd2.nsample_array - >>> uvd.write_uvh5_part(partfile, data_array, flag_array, nsample_array, freq_chans=freq_inds2) + >>> uvd.write_uvh5_part( + ... partfile, + ... data_array=data_array, + ... flag_array=flag_array, + ... nsample_array=nsample_array, + ... freq_chans=freq_inds2 + ... ) .. _uvdata_sorting_data: @@ -1554,7 +1567,7 @@ various conventions (``'ant10'``, ``' >>> from pyuvdata.data import DATA_PATH >>> uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits') >>> uvd = UVData.from_file(uvfits_file, use_future_array_shapes=True) - >>> uvd.conjugate_bls('ant1>> uvd.conjugate_bls(convention='ant1>> print(np.min(uvd.ant_2_array - uvd.ant_1_array) >= 0) True @@ -1667,7 +1680,7 @@ ordering set by the user. >>> print(uvutils.polnum2str(uvd.polarization_array)) ['rr', 'll', 'rl', 'lr'] - >>> uvd.reorder_pols('CASA') + >>> uvd.reorder_pols(order='CASA') >>> print(uvutils.polnum2str(uvd.polarization_array)) ['rr', 'rl', 'lr', 'll'] diff --git a/pyuvdata/uvcal/tests/test_uvcal.py b/pyuvdata/uvcal/tests/test_uvcal.py index ca60680eb8..678a1244ee 100644 --- a/pyuvdata/uvcal/tests/test_uvcal.py +++ b/pyuvdata/uvcal/tests/test_uvcal.py @@ -3918,15 +3918,15 @@ def test_uvcal_get_methods(future_shapes, gain_data): np.testing.assert_array_almost_equal(gain_arr, expected_array) # test variable key input - gain_arr2 = uvc.get_gains(key[0], jpol=key[1]) + gain_arr2 = uvc.get_gains(key[0], key[1]) np.testing.assert_array_almost_equal(gain_arr, gain_arr2) gain_arr2 = uvc.get_gains(key[0]) np.testing.assert_array_almost_equal(gain_arr, gain_arr2) gain_arr2 = uvc.get_gains(key[:1]) np.testing.assert_array_almost_equal(gain_arr, gain_arr2) - gain_arr2 = uvc.get_gains(10, jpol=-5) + gain_arr2 = uvc.get_gains(10, -5) np.testing.assert_array_almost_equal(gain_arr, gain_arr2) - gain_arr2 = uvc.get_gains(10, jpol="x") + gain_arr2 = uvc.get_gains(10, "x") np.testing.assert_array_almost_equal(gain_arr, gain_arr2) # check has_key diff --git a/pyuvdata/uvcal/uvcal.py b/pyuvdata/uvcal/uvcal.py index c1814f1f3f..34743e98de 100644 --- a/pyuvdata/uvcal/uvcal.py +++ b/pyuvdata/uvcal/uvcal.py @@ -1644,7 +1644,7 @@ def _parse_key(self, ant, *, jpol=None): return key - def get_gains(self, ant, *, jpol=None, squeeze_pol=True): + def get_gains(self, ant, jpol=None, *, squeeze_pol=True): """ Get the gain associated with an antenna and/or polarization. @@ -1672,7 +1672,7 @@ def get_gains(self, ant, *, jpol=None, squeeze_pol=True): self._parse_key(ant, jpol=jpol), self.gain_array, squeeze_pol=squeeze_pol ) - def get_flags(self, ant, *, jpol=None, squeeze_pol=True): + def get_flags(self, ant, jpol=None, *, squeeze_pol=True): """ Get the flags associated with an antenna and/or polarization. @@ -1697,7 +1697,7 @@ def get_flags(self, ant, *, jpol=None, squeeze_pol=True): self._parse_key(ant, jpol=jpol), self.flag_array, squeeze_pol=squeeze_pol ) - def get_quality(self, ant, *, jpol=None, squeeze_pol=True): + def get_quality(self, ant, jpol=None, *, squeeze_pol=True): """ Get the qualities associated with an antenna and/or polarization. From aaaaec5fd295e54bea14e10aec5e73de81929531 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Sun, 15 Oct 2023 19:08:49 -0700 Subject: [PATCH 08/12] fix min_deps tests --- pyuvdata/tests/test_utils.py | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/pyuvdata/tests/test_utils.py b/pyuvdata/tests/test_utils.py index ea78cdb788..c4bb638035 100644 --- a/pyuvdata/tests/test_utils.py +++ b/pyuvdata/tests/test_utils.py @@ -268,21 +268,35 @@ def test_no_moon(): with pytest.raises(ValueError, match=msg): uvutils.XYZ_from_LatLonAlt(lat, lon, alt, frame="mcmf") with pytest.raises(ValueError, match=msg): - uvutils.get_lst_for_time([2451545.0], 0, 0, 0, frame="mcmf") + uvutils.get_lst_for_time( + [2451545.0], latitude=0, longitude=0, altitude=0, frame="mcmf" + ) with pytest.raises(ValueError, match=msg): - uvutils.ENU_from_ECEF(None, 0.0, 1.0, 10.0, frame="mcmf") + uvutils.ENU_from_ECEF( + None, latitude=0.0, longitude=1.0, altitude=10.0, frame="mcmf" + ) with pytest.raises(ValueError, match=msg): - uvutils.ECEF_from_ENU(None, 0.0, 1.0, 10.0, frame="mcmf") + uvutils.ECEF_from_ENU( + None, latitude=0.0, longitude=1.0, altitude=10.0, frame="mcmf" + ) with pytest.raises(ValueError, match=msg): uvutils.transform_icrs_to_app( - [2451545.0], 0, 0, (0, 0, 0), telescope_frame="mcmf" + time_array=[2451545.0], + ra=0, + dec=0, + telescope_loc=(0, 0, 0), + telescope_frame="mcmf", ) with pytest.raises(ValueError, match=msg): uvutils.transform_app_to_icrs( - [2451545.0], 0, 0, (0, 0, 0), telescope_frame="mcmf" + time_array=[2451545.0], + app_ra=0, + app_dec=0, + telescope_loc=(0, 0, 0), + telescope_frame="mcmf", ) with pytest.raises(ValueError, match=msg): - uvutils.calc_app_coords(0, 0, telescope_frame="mcmf") + uvutils.calc_app_coords(lon_coord=0.0, lat_coord=0.0, telescope_frame="mcmf") def test_lla_xyz_lla_roundtrip(): From 07c0333f02a8bcf57b9f703376ac6c5ba5650463 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Thu, 31 Aug 2023 13:35:14 -0700 Subject: [PATCH 09/12] address review comments --- pyuvdata/uvdata/initializers.py | 2 +- pyuvdata/uvdata/tests/test_initializers.py | 2 +- pyuvdata/uvdata/tests/test_uvdata.py | 4 +--- pyuvdata/uvdata/uvdata.py | 2 +- pyuvdata/uvdata/uvfits.py | 4 ++-- 5 files changed, 6 insertions(+), 8 deletions(-) diff --git a/pyuvdata/uvdata/initializers.py b/pyuvdata/uvdata/initializers.py index 8e457b4fcf..475f4b037e 100644 --- a/pyuvdata/uvdata/initializers.py +++ b/pyuvdata/uvdata/initializers.py @@ -153,7 +153,7 @@ def get_time_params( def get_freq_params( - *, freq_array: np.ndarray, channel_width: float | np.ndarray | None = None + freq_array: np.ndarray, *, channel_width: float | np.ndarray | None = None ) -> tuple[np.ndarray, np.ndarray]: """Configure frequency parameters for new UVData object.""" if not isinstance(freq_array, np.ndarray): diff --git a/pyuvdata/uvdata/tests/test_initializers.py b/pyuvdata/uvdata/tests/test_initializers.py index 6981966aac..0f4494a671 100644 --- a/pyuvdata/uvdata/tests/test_initializers.py +++ b/pyuvdata/uvdata/tests/test_initializers.py @@ -295,7 +295,7 @@ def test_alternate_freq_inputs(): freq_array = np.linspace(1e8, 2e8, 15) channel_width = freq_array[1] - freq_array[0] - freqs, widths = get_freq_params(freq_array=freq_array, channel_width=channel_width) + freqs, widths = get_freq_params(freq_array, channel_width=channel_width) freqs2, widths2 = get_freq_params( freq_array=freq_array, channel_width=channel_width * np.ones_like(freq_array) diff --git a/pyuvdata/uvdata/tests/test_uvdata.py b/pyuvdata/uvdata/tests/test_uvdata.py index 7e610a44b2..be843d6c66 100644 --- a/pyuvdata/uvdata/tests/test_uvdata.py +++ b/pyuvdata/uvdata/tests/test_uvdata.py @@ -8937,9 +8937,7 @@ def test_frequency_average(casa_uvfits, future_shapes, flex_spw, sum_corr): n_chan_to_avg = 2 with uvtest.check_warnings(UserWarning, "eq_coeffs vary by frequency"): - uvobj.frequency_average( - n_chan_to_avg=2, keep_ragged=True, summing_correlator_mode=sum_corr - ) + uvobj.frequency_average(2, keep_ragged=True, summing_correlator_mode=sum_corr) assert uvobj.Nfreqs == (uvobj2.Nfreqs / n_chan_to_avg) diff --git a/pyuvdata/uvdata/uvdata.py b/pyuvdata/uvdata/uvdata.py index fdbc5ccf85..b3e07674f8 100644 --- a/pyuvdata/uvdata/uvdata.py +++ b/pyuvdata/uvdata/uvdata.py @@ -9643,8 +9643,8 @@ def resample_in_time( def frequency_average( self, - *, n_chan_to_avg, + *, summing_correlator_mode=False, propagate_flags=False, respect_spws=True, diff --git a/pyuvdata/uvdata/uvfits.py b/pyuvdata/uvdata/uvfits.py index 50bf2006f7..b24dd7ac12 100644 --- a/pyuvdata/uvdata/uvfits.py +++ b/pyuvdata/uvdata/uvfits.py @@ -198,8 +198,8 @@ def _get_parameter_data( def _get_data( self, - *, vis_hdu, + *, antenna_nums, antenna_names, ant_str, @@ -754,7 +754,7 @@ def read_uvfits( if read_data: # Now read in the data self._get_data( - vis_hdu=vis_hdu, + vis_hdu, antenna_nums=antenna_nums, antenna_names=antenna_names, ant_str=ant_str, From 294dfb3807338f1834d2364d78f759b2ae252f5e Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Wed, 6 Sep 2023 17:32:53 -0700 Subject: [PATCH 10/12] address review comments --- docs/uvcal_tutorial.rst | 2 +- pyuvdata/tests/test_utils.py | 12 ++++---- pyuvdata/utils.py | 42 ++++++++++++---------------- pyuvdata/uvdata/initializers.py | 6 ++-- pyuvdata/uvdata/tests/test_uvdata.py | 10 ++++--- pyuvdata/uvdata/uvdata.py | 6 ++-- pyuvdata/uvdata/uvh5.py | 4 +-- pyuvdata/uvflag/tests/test_uvflag.py | 3 +- pyuvdata/uvflag/uvflag.py | 6 ++-- 9 files changed, 48 insertions(+), 43 deletions(-) diff --git a/docs/uvcal_tutorial.rst b/docs/uvcal_tutorial.rst index f43c0e1db8..af0b2751b8 100644 --- a/docs/uvcal_tutorial.rst +++ b/docs/uvcal_tutorial.rst @@ -214,7 +214,7 @@ a) Data for a single antenna and instrumental polarization >>> from pyuvdata.data import DATA_PATH >>> filename = os.path.join(DATA_PATH, 'zen.2457555.42443.HH.uvcA.omni.calfits') >>> uvc = UVCal.from_file(filename, use_future_array_shapes=True) - >>> gain = uvc.get_gains(9, jpol='Jxx') # gain for ant=9, pol='Jxx' + >>> gain = uvc.get_gains(9, 'Jxx') # gain for ant=9, pol='Jxx' >>> # One can equivalently make any of these calls with the input wrapped in a tuple. >>> gain = uvc.get_gains((9, 'Jxx')) diff --git a/pyuvdata/tests/test_utils.py b/pyuvdata/tests/test_utils.py index c4bb638035..81cc5933c4 100644 --- a/pyuvdata/tests/test_utils.py +++ b/pyuvdata/tests/test_utils.py @@ -2784,7 +2784,9 @@ def test_redundancy_finder(): for gi, gp in enumerate(bl_gps_unconj): for bi, bl in enumerate(gp): if bl in conjugates: - bl_gps_unconj[gi][bi] = uvutils.baseline_index_flip(bl, len(antnums)) + bl_gps_unconj[gi][bi] = uvutils.baseline_index_flip( + bl, Nants_telescope=len(antnums) + ) bl_gps_unconj = [sorted(bgp) for bgp in bl_gps_unconj] bl_gps_ants = [sorted(bgp) for bgp in baseline_groups_ants] assert np.all(sorted(bl_gps_ants) == sorted(bl_gps_unconj)) @@ -2841,7 +2843,7 @@ def test_redundancy_conjugates(): ant1_arr = np.tile(np.arange(Nants), Nants) ant2_arr = np.repeat(np.arange(Nants), Nants) Nbls = ant1_arr.size - bl_inds = uvutils.antnums_to_baseline(ant1_arr, ant2_arr, Nants) + bl_inds = uvutils.antnums_to_baseline(ant1_arr, ant2_arr, Nants_telescope=Nants) maxbl = 100.0 bl_vecs = np.random.uniform(-maxbl, maxbl, (Nbls, 3)) @@ -4262,7 +4264,7 @@ def test_determine_blt_order(blt_order): ntime = 2 def getbl(ant1, ant2): - return uvutils.antnums_to_baseline(ant1, ant2, nant) + return uvutils.antnums_to_baseline(ant1, ant2, Nants_telescope=nant) def getantbls(): # Arrange them backwards so by default they are NOT sorted @@ -4334,7 +4336,7 @@ def test_determine_blt_order_size_1(): times = np.array([2458119.5]) ant1 = np.array([0]) ant2 = np.array([1]) - bl = uvutils.antnums_to_baseline(ant1, ant2, 2) + bl = uvutils.antnums_to_baseline(ant1, ant2, Nants_telescope=2) order = uvutils.determine_blt_order( time_array=times, @@ -4357,7 +4359,7 @@ def test_determine_rect_time_first(): ant1 = np.arange(3) ant2 = np.arange(3) ANT1, ANT2 = np.meshgrid(ant1, ant2) - bls = uvutils.antnums_to_baseline(ANT1.flatten(), ANT2.flatten(), 3) + bls = uvutils.antnums_to_baseline(ANT1.flatten(), ANT2.flatten(), Nants_telescope=3) rng = np.random.default_rng(12345) diff --git a/pyuvdata/utils.py b/pyuvdata/utils.py index 170e04a8c5..a355762d9f 100644 --- a/pyuvdata/utils.py +++ b/pyuvdata/utils.py @@ -755,7 +755,7 @@ def _sort_freq_helper( return index_array -def baseline_to_antnums(baseline, Nants_telescope): +def baseline_to_antnums(baseline, *, Nants_telescope): """ Get the antenna numbers corresponding to a given baseline number. @@ -793,7 +793,7 @@ def baseline_to_antnums(baseline, Nants_telescope): return ant1.item(0), ant2.item(0) -def antnums_to_baseline(ant1, ant2, Nants_telescope, *, attempt256=False): +def antnums_to_baseline(ant1, ant2, *, Nants_telescope, attempt256=False): """ Get the baseline number corresponding to two given antenna numbers. @@ -851,10 +851,10 @@ def antnums_to_baseline(ant1, ant2, Nants_telescope, *, attempt256=False): return baseline.item(0) -def baseline_index_flip(baseline, Nants_telescope): +def baseline_index_flip(baseline, *, Nants_telescope): """Change baseline number to reverse antenna order.""" - ant1, ant2 = baseline_to_antnums(baseline, Nants_telescope) - return antnums_to_baseline(ant2, ant1, Nants_telescope) + ant1, ant2 = baseline_to_antnums(baseline, Nants_telescope=Nants_telescope) + return antnums_to_baseline(ant2, ant1, Nants_telescope=Nants_telescope) def _x_orientation_rep_dict(x_orientation): @@ -1792,7 +1792,7 @@ def _rotate_matmul_wrapper(*, xyz_array, rot_matrix, n_rot): return rotated_xyz -def _rotate_one_axis(*, xyz_array, rot_amount, rot_axis): +def _rotate_one_axis(xyz_array, *, rot_amount, rot_axis): """ Rotate an array of 3D positions around the a single axis (x, y, or z). @@ -1877,7 +1877,7 @@ def _rotate_one_axis(*, xyz_array, rot_amount, rot_axis): ) -def _rotate_two_axis(*, xyz_array, rot_amount1, rot_amount2, rot_axis1, rot_axis2): +def _rotate_two_axis(xyz_array, *, rot_amount1, rot_amount2, rot_axis1, rot_axis2): """ Rotate an array of 3D positions sequentially around a pair of axes (x, y, or z). @@ -1925,21 +1925,15 @@ def _rotate_two_axis(*, xyz_array, rot_amount1, rot_amount2, rot_axis1, rot_axis return deepcopy(xyz_array) elif no_rot1: # If rot_amount1 is None, then ignore it and just work w/ the 2nd rotation - return _rotate_one_axis( - xyz_array=xyz_array, rot_amount=rot_amount2, rot_axis=rot_axis2 - ) + return _rotate_one_axis(xyz_array, rot_amount=rot_amount2, rot_axis=rot_axis2) elif no_rot2: # If rot_amount2 is None, then ignore it and just work w/ the 1st rotation - return _rotate_one_axis( - xyz_array=xyz_array, rot_amount=rot_amount1, rot_axis=rot_axis1 - ) + return _rotate_one_axis(xyz_array, rot_amount=rot_amount1, rot_axis=rot_axis1) elif rot_axis1 == rot_axis2: # Capture the case where someone wants to do a sequence of rotations on the same # axis. Also known as just rotating a single axis. return _rotate_one_axis( - xyz_array=xyz_array, - rot_amount=rot_amount1 + rot_amount2, - rot_axis=rot_axis1, + xyz_array, rot_amount=rot_amount1 + rot_amount2, rot_axis=rot_axis1 ) # Figure out how many individual rotation matricies we need, accounting for the @@ -2214,8 +2208,8 @@ def calc_uvw( ant_rot_vectors = np.reshape( np.transpose( _rotate_one_axis( - xyz_array=_rotate_two_axis( - xyz_array=ant_vectors, + _rotate_two_axis( + ant_vectors, rot_amount1=unique_gha, rot_amount2=unique_dec, rot_axis1=2, @@ -2288,14 +2282,14 @@ def calc_uvw( # up on the map). This is a much easier transform to handle. if np.all(gha_delta_array == 0.0) and np.all(old_app_dec == app_dec): new_coords = _rotate_one_axis( - xyz_array=uvw_array[:, [2, 0, 1], np.newaxis], + uvw_array[:, [2, 0, 1], np.newaxis], rot_amount=frame_pa - (0.0 if old_frame_pa is None else old_frame_pa), rot_axis=0, )[:, :, 0] else: new_coords = _rotate_two_axis( - xyz_array=_rotate_two_axis( - xyz_array=uvw_array[:, [2, 0, 1], np.newaxis], + _rotate_two_axis( + uvw_array[:, [2, 0, 1], np.newaxis], rot_amount1=0.0 if (from_enu or old_frame_pa is None) else (-old_frame_pa), @@ -2314,7 +2308,7 @@ def calc_uvw( # the chosen frame, if we not in ENU coordinates if not to_enu: new_coords = _rotate_one_axis( - xyz_array=new_coords, rot_amount=frame_pa, rot_axis=0 + new_coords, rot_amount=frame_pa, rot_axis=0 ) # Finally drop the now-vestigal last axis of the array @@ -4551,7 +4545,7 @@ def get_antenna_redundancies( mini = aj for ai in range(mini, Nants): anti, antj = antenna_numbers[ai], antenna_numbers[aj] - bidx = antnums_to_baseline(antj, anti, Nants) + bidx = antnums_to_baseline(antj, anti, Nants_telescope=Nants) bv = antenna_positions[ai] - antenna_positions[aj] bl_vecs.append(bv) bls.append(bidx) @@ -4564,7 +4558,7 @@ def get_antenna_redundancies( for gi, gp in enumerate(gps): for bi, bl in enumerate(gp): if bl in conjs: - gps[gi][bi] = baseline_index_flip(bl, Nants) + gps[gi][bi] = baseline_index_flip(bl, Nants_telescope=Nants) return gps, vecs, lens diff --git a/pyuvdata/uvdata/initializers.py b/pyuvdata/uvdata/initializers.py index 475f4b037e..ed39965dd3 100644 --- a/pyuvdata/uvdata/initializers.py +++ b/pyuvdata/uvdata/initializers.py @@ -188,7 +188,7 @@ def get_baseline_params( ) -> np.ndarray: """Configure baseline parameters for new UVData object.""" return utils.antnums_to_baseline( - antpairs[:, 0], antpairs[:, 1], len(antenna_numbers) + antpairs[:, 0], antpairs[:, 1], Nants_telescope=len(antenna_numbers) ) @@ -274,7 +274,9 @@ def configure_blt_rectangularity( # We don't know if it's rectangular or not. # Let's try to figure it out. baselines = utils.antnums_to_baseline( - antpairs[:, 0], antpairs[:, 1], len(np.unique(unique_antpairs)) + antpairs[:, 0], + antpairs[:, 1], + Nants_telescope=len(np.unique(unique_antpairs)), ) ( diff --git a/pyuvdata/uvdata/tests/test_uvdata.py b/pyuvdata/uvdata/tests/test_uvdata.py index be843d6c66..6f930e0f08 100644 --- a/pyuvdata/uvdata/tests/test_uvdata.py +++ b/pyuvdata/uvdata/tests/test_uvdata.py @@ -4896,8 +4896,10 @@ def test_key2inds_conj_all_pols_bl_fringe(casa_uvfits): # Mix one instance of this baseline. uv.ant_1_array[0] = ant2 uv.ant_2_array[0] = ant1 - uv.baseline_array[0] = uvutils.antnums_to_baseline(ant2, ant1, uv.Nants_telescope) - bl = uvutils.antnums_to_baseline(ant1, ant2, uv.Nants_telescope) + uv.baseline_array[0] = uvutils.antnums_to_baseline( + ant2, ant1, Nants_telescope=uv.Nants_telescope + ) + bl = uvutils.antnums_to_baseline(ant1, ant2, Nants_telescope=uv.Nants_telescope) bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0] ind1, ind2, indp = uv._key2inds(bl) @@ -4926,7 +4928,7 @@ def test_key2inds_conj_all_pols_bls(casa_uvfits): ant1 = uv.ant_1_array[0] ant2 = uv.ant_2_array[0] - bl = uvutils.antnums_to_baseline(ant2, ant1, uv.Nants_telescope) + bl = uvutils.antnums_to_baseline(ant2, ant1, Nants_telescope=uv.Nants_telescope) bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0] ind1, ind2, indp = uv._key2inds(bl) @@ -4944,7 +4946,7 @@ def test_key2inds_conj_all_pols_missing_data_bls(casa_uvfits): uv.select(polarizations=["rl"]) ant1 = uv.ant_1_array[0] ant2 = uv.ant_2_array[0] - bl = uvutils.antnums_to_baseline(ant2, ant1, uv.Nants_telescope) + bl = uvutils.antnums_to_baseline(ant2, ant1, Nants_telescope=uv.Nants_telescope) with pytest.raises( KeyError, match="Baseline 81924 not found for polarization array in data." diff --git a/pyuvdata/uvdata/uvdata.py b/pyuvdata/uvdata/uvdata.py index b3e07674f8..c6155ac4f2 100644 --- a/pyuvdata/uvdata/uvdata.py +++ b/pyuvdata/uvdata/uvdata.py @@ -3600,7 +3600,9 @@ def baseline_to_antnums(self, baseline): int or array_like of int second antenna number(s) """ - return uvutils.baseline_to_antnums(baseline, self.Nants_telescope) + return uvutils.baseline_to_antnums( + baseline, Nants_telescope=self.Nants_telescope + ) def antnums_to_baseline(self, ant1, ant2, *, attempt256=False): """ @@ -3622,7 +3624,7 @@ def antnums_to_baseline(self, ant1, ant2, *, attempt256=False): baseline number corresponding to the two antenna numbers. """ return uvutils.antnums_to_baseline( - ant1, ant2, self.Nants_telescope, attempt256=attempt256 + ant1, ant2, Nants_telescope=self.Nants_telescope, attempt256=attempt256 ) def antpair2ind(self, ant1, ant2=None, *, ordered=True): diff --git a/pyuvdata/uvdata/uvh5.py b/pyuvdata/uvdata/uvh5.py index 59958dd011..0a600446ec 100644 --- a/pyuvdata/uvdata/uvh5.py +++ b/pyuvdata/uvdata/uvh5.py @@ -740,7 +740,7 @@ def unique_ants(self) -> set: def baseline_array(self) -> np.ndarray: """The baselines in the file, as unique integers.""" return uvutils.antnums_to_baseline( - self.ant_1_array, self.ant_2_array, self.Nants_telescope + self.ant_1_array, self.ant_2_array, Nants_telescope=self.Nants_telescope ) @cached_property @@ -749,7 +749,7 @@ def unique_baseline_array(self) -> np.ndarray: return uvutils.antnums_to_baseline( self.unique_antpair_1_array, self.unique_antpair_2_array, - self.Nants_telescope, + Nants_telescope=self.Nants_telescope, ) @cached_property diff --git a/pyuvdata/uvflag/tests/test_uvflag.py b/pyuvdata/uvflag/tests/test_uvflag.py index cc1410f51f..fa89db6d51 100644 --- a/pyuvdata/uvflag/tests/test_uvflag.py +++ b/pyuvdata/uvflag/tests/test_uvflag.py @@ -4177,7 +4177,8 @@ def test_select_parse_ants(uvf_from_data, uvf_mode): assert np.array_equiv( np.unique(uvf.baseline_array), uvutils.antnums_to_baseline( - *np.transpose([(88, 97), (97, 104), (97, 105)]), uvf.Nants_telescope + *np.transpose([(88, 97), (97, 104), (97, 105)]), + Nants_telescope=uvf.Nants_telescope, ), ) diff --git a/pyuvdata/uvflag/uvflag.py b/pyuvdata/uvflag/uvflag.py index a76e5fdcbe..76b49954eb 100644 --- a/pyuvdata/uvflag/uvflag.py +++ b/pyuvdata/uvflag/uvflag.py @@ -1279,7 +1279,9 @@ def baseline_to_antnums(self, baseline): """ assert self.type == "baseline", "Must be 'baseline' type UVFlag object." - return uvutils.baseline_to_antnums(baseline, self.Nants_telescope) + return uvutils.baseline_to_antnums( + baseline, Nants_telescope=self.Nants_telescope + ) def antnums_to_baseline(self, ant1, ant2, *, attempt256=False): """ @@ -1302,7 +1304,7 @@ def antnums_to_baseline(self, ant1, ant2, *, attempt256=False): """ assert self.type == "baseline", "Must be 'baseline' type UVFlag object." return uvutils.antnums_to_baseline( - ant1, ant2, self.Nants_telescope, attempt256=attempt256 + ant1, ant2, Nants_telescope=self.Nants_telescope, attempt256=attempt256 ) def get_baseline_nums(self): From e1aa3ca220371a48033992408952335f5b19d714 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Thu, 7 Sep 2023 10:09:41 -0700 Subject: [PATCH 11/12] address reviewer comments --- docs/uvdata_tutorial.rst | 4 +-- pyuvdata/tests/test_utils.py | 4 +-- pyuvdata/uvdata/miriad.py | 2 +- pyuvdata/uvdata/tests/test_miriad.py | 2 +- pyuvdata/uvdata/tests/test_uvdata.py | 42 ++++++++++++++-------------- pyuvdata/uvdata/uvdata.py | 10 +++---- 6 files changed, 31 insertions(+), 33 deletions(-) diff --git a/docs/uvdata_tutorial.rst b/docs/uvdata_tutorial.rst index 1b977a9498..bfd846bf66 100644 --- a/docs/uvdata_tutorial.rst +++ b/docs/uvdata_tutorial.rst @@ -1567,11 +1567,11 @@ various conventions (``'ant10'``, ``' >>> from pyuvdata.data import DATA_PATH >>> uvfits_file = os.path.join(DATA_PATH, 'day2_TDEM0003_10s_norx_1src_1spw.uvfits') >>> uvd = UVData.from_file(uvfits_file, use_future_array_shapes=True) - >>> uvd.conjugate_bls(convention='ant1>> uvd.conjugate_bls('ant1>> print(np.min(uvd.ant_2_array - uvd.ant_1_array) >= 0) True - >>> uvd2.conjugate_bls(convention='u<0', use_enu=False) + >>> uvd2.conjugate_bls('u<0', use_enu=False) >>> print(np.max(uvd2.uvw_array[:, 0]) <= 0) True diff --git a/pyuvdata/tests/test_utils.py b/pyuvdata/tests/test_utils.py index 81cc5933c4..5f9eafe50d 100644 --- a/pyuvdata/tests/test_utils.py +++ b/pyuvdata/tests/test_utils.py @@ -2675,7 +2675,7 @@ def test_redundancy_finder(): uvd.select(times=uvd.time_array[0]) uvd.unproject_phase(use_ant_pos=True) # uvw_array is now equivalent to baseline positions - uvd.conjugate_bls(convention="ant1i which we call ant1= 0 assert np.allclose(uv1.ant_1_array, uv2.ant_2_array) @@ -2929,13 +2929,13 @@ def test_conjugate_bls(casa_uvfits, metadata_only, future_shapes): ) # check everything returned to original values with original convention - uv2.conjugate_bls(convention="ant10", use_enu=False) assert np.min(uv2.uvw_array[:, 0]) >= 0 - uv2.conjugate_bls(convention="v<0", use_enu=False) + uv2.conjugate_bls("v<0", use_enu=False) assert np.max(uv2.uvw_array[:, 1]) <= 0 - uv2.conjugate_bls(convention="v>0", use_enu=False) + uv2.conjugate_bls("v>0", use_enu=False) assert np.min(uv2.uvw_array[:, 1]) >= 0 # unphase to drift to test using ENU positions uv2.unproject_phase(use_ant_pos=True) - uv2.conjugate_bls(convention="u<0") + uv2.conjugate_bls("u<0") assert np.max(uv2.uvw_array[:, 0]) <= 0 - uv2.conjugate_bls(convention="u>0") + uv2.conjugate_bls("u>0") assert np.min(uv2.uvw_array[:, 0]) >= 0 - uv2.conjugate_bls(convention="v<0") + uv2.conjugate_bls("v<0") assert np.max(uv2.uvw_array[:, 1]) <= 0 - uv2.conjugate_bls(convention="v>0") + uv2.conjugate_bls("v>0") assert np.min(uv2.uvw_array[:, 1]) >= 0 # test errors with pytest.raises(ValueError, match="convention must be one of"): - uv2.conjugate_bls(convention="foo") + uv2.conjugate_bls("foo") with pytest.raises(ValueError, match="If convention is an index array"): - uv2.conjugate_bls(convention=np.arange(5) - 1) + uv2.conjugate_bls(np.arange(5) - 1) with pytest.raises(ValueError, match="If convention is an index array"): - uv2.conjugate_bls(convention=[uv2.Nblts]) + uv2.conjugate_bls([uv2.Nblts]) @pytest.mark.filterwarnings("ignore:This method will be removed in version 3.0 when") @@ -6256,7 +6256,7 @@ def test_get_antenna_redundancies(pyuvsim_redundant): assert bl in uv0.baseline_array # conjugate data differently - uv0.conjugate_bls(convention="ant10", use_enu=True) tol = 0.05 # a quick and dirty redundancy calculation unique_bls, baseline_inds = np.unique(uv.baseline_array, return_index=True) @@ -6841,7 +6841,7 @@ def test_redundancy_finder_when_nblts_not_nbls_times_ntimes(casa_uvfits): """Test the redundancy finder functions when Nblts != Nbls * Ntimes.""" tol = 1 # meter uv = casa_uvfits - uv.conjugate_bls(convention="u>0", use_enu=True) + uv.conjugate_bls("u>0", use_enu=True) # check that Nblts != Nbls * Ntimes assert uv.Nblts != uv.Nbls * uv.Ntimes @@ -11227,7 +11227,7 @@ def test_fix_phase(hera_uvh5, tmp_path, future_shapes, use_ant_pos, phase_frame) uv_in_bad_copy = uv_in_bad.copy() if file_type == "miriad": - uv_in_bad_copy.conjugate_bls(convention="ant10", uvw_tol=tol) if include_conjugates: result = result + (None,) @@ -10333,7 +10331,7 @@ def inflate_by_redundancy(self, *, tol=1.0, blt_order="time", blt_minor_order=No string specifying minor order along the blt axis (see `reorder_blts`) """ - self.conjugate_bls(convention="u>0") + self.conjugate_bls("u>0") red_gps, centers, lengths = self.get_redundancies( tol=tol, use_antpos=True, conjugate_bls=True ) From f7f3adab3285a294657e7a4fbaf1526c03d0b283 Mon Sep 17 00:00:00 2001 From: Bryna Hazelton Date: Thu, 7 Sep 2023 16:01:41 -0700 Subject: [PATCH 12/12] don't require order to be passed by name for reorder methods --- docs/uvcal_tutorial.rst | 4 +- docs/uvdata_tutorial.rst | 10 +-- pyuvdata/uvcal/tests/test_uvcal.py | 66 ++++++++-------- pyuvdata/uvcal/uvcal.py | 12 +-- pyuvdata/uvdata/miriad.py | 2 +- pyuvdata/uvdata/ms.py | 2 +- pyuvdata/uvdata/tests/test_miriad.py | 2 +- pyuvdata/uvdata/tests/test_uvdata.py | 110 +++++++++++++-------------- pyuvdata/uvdata/tests/test_uvh5.py | 6 +- pyuvdata/uvdata/uvdata.py | 6 +- 10 files changed, 110 insertions(+), 110 deletions(-) diff --git a/docs/uvcal_tutorial.rst b/docs/uvcal_tutorial.rst index af0b2751b8..2a95e3eddc 100644 --- a/docs/uvcal_tutorial.rst +++ b/docs/uvcal_tutorial.rst @@ -587,7 +587,7 @@ specified by passing an index array. True >>> # Prepend a ``-`` to the sort string to sort in descending order. - >>> cal.reorder_antennas(order='-number') + >>> cal.reorder_antennas('-number') >>> print(np.min(np.diff(cal.ant_array)) <= 0) True @@ -671,7 +671,7 @@ array for the time axis. True >>> # Prepend a ``-`` to the sort string to sort in descending order. - >>> cal.reorder_times(order='-time') + >>> cal.reorder_times('-time') >>> print(np.min(np.diff(cal.time_array)) <= 0) True diff --git a/docs/uvdata_tutorial.rst b/docs/uvdata_tutorial.rst index bfd846bf66..7c634cf4d2 100644 --- a/docs/uvdata_tutorial.rst +++ b/docs/uvdata_tutorial.rst @@ -1600,17 +1600,17 @@ an option to sort the auto visibilities before the cross visibilities (``autos_f >>> # Explicity sorting by 'time' then 'baseline' gets the same result >>> uvd2 = uvd.copy() - >>> uvd2.reorder_blts(order='time', minor_order='baseline') + >>> uvd2.reorder_blts('time', minor_order='baseline') >>> print(uvd == uvd2) True - >>> uvd.reorder_blts(order='ant1', minor_order='ant2') + >>> uvd.reorder_blts('ant1', minor_order='ant2') >>> print(np.min(np.diff(uvd.ant_1_array)) >= 0) True >>> # You can also sort and conjugate in a single step for the purposes of comparing two objects - >>> uvd.reorder_blts(order='bda', conj_convention='ant1>> uvd2.reorder_blts(order='bda', conj_convention='ant1>> uvd.reorder_blts('bda', conj_convention='ant1>> uvd2.reorder_blts('bda', conj_convention='ant1>> print(uvd == uvd2) True @@ -1680,7 +1680,7 @@ ordering set by the user. >>> print(uvutils.polnum2str(uvd.polarization_array)) ['rr', 'll', 'rl', 'lr'] - >>> uvd.reorder_pols(order='CASA') + >>> uvd.reorder_pols('CASA') >>> print(uvutils.polnum2str(uvd.polarization_array)) ['rr', 'rl', 'lr', 'll'] diff --git a/pyuvdata/uvcal/tests/test_uvcal.py b/pyuvdata/uvcal/tests/test_uvcal.py index 678a1244ee..1444a1836a 100644 --- a/pyuvdata/uvcal/tests/test_uvcal.py +++ b/pyuvdata/uvcal/tests/test_uvcal.py @@ -1891,12 +1891,12 @@ def test_reorder_ants( ant_num_diff = np.diff(calobj2.ant_array) assert np.all(ant_num_diff > 0) - calobj2.reorder_antennas(order="-number") + calobj2.reorder_antennas("-number") ant_num_diff = np.diff(calobj2.ant_array) assert np.all(ant_num_diff < 0) sorted_names = np.sort(calobj.antenna_names) - calobj.reorder_antennas(order="name") + calobj.reorder_antennas("name") temp = np.asarray(calobj.antenna_names) dtype_use = temp.dtype name_array = np.zeros_like(calobj.ant_array, dtype=dtype_use) @@ -1908,10 +1908,10 @@ def test_reorder_ants( assert np.all(sorted_names == name_array) # test sorting with an integer array. First resort back to by number - calobj2.reorder_antennas(order="number") + calobj2.reorder_antennas("number") sorted_nums = [int(name[3:]) for name in sorted_names] index_array = [np.nonzero(calobj2.ant_array == ant)[0][0] for ant in sorted_nums] - calobj2.reorder_antennas(order=index_array) + calobj2.reorder_antennas(index_array) assert calobj2 == calobj @@ -1921,21 +1921,21 @@ def test_reorder_ants_errors(gain_data): match="order must be one of 'number', 'name', '-number', '-name' or an " "index array of length Nants_data", ): - gain_data.reorder_antennas(order="foo") + gain_data.reorder_antennas("foo") with pytest.raises( ValueError, match="If order is an index array, it must contain all indicies for the" "ant_array, without duplicates.", ): - gain_data.reorder_antennas(order=gain_data.antenna_numbers.astype(float)) + gain_data.reorder_antennas(gain_data.antenna_numbers.astype(float)) with pytest.raises( ValueError, match="If order is an index array, it must contain all indicies for the" "ant_array, without duplicates.", ): - gain_data.reorder_antennas(order=gain_data.antenna_numbers[:8]) + gain_data.reorder_antennas(gain_data.antenna_numbers[:8]) @pytest.mark.filterwarnings("ignore:The input_flag_array is deprecated") @@ -2118,7 +2118,7 @@ def test_reorder_times( calobj.reorder_times() assert calobj == calobj2 - calobj2.reorder_times(order="-time") + calobj2.reorder_times("-time") if time_range: time_diff = np.diff(calobj2.time_range[:, 0]) else: @@ -2133,7 +2133,7 @@ def test_reorder_times( total_quality_diff = np.diff(calobj2.total_quality_array, axis=2) assert np.all(total_quality_diff < 0) - calobj.reorder_times(order=np.flip(np.arange(calobj.Ntimes))) + calobj.reorder_times(np.flip(np.arange(calobj.Ntimes))) assert calobj == calobj2 @@ -2142,21 +2142,21 @@ def test_reorder_times_errors(gain_data): ValueError, match="order must be one of 'time', '-time' or an index array of length Ntimes", ): - gain_data.reorder_times(order="foo") + gain_data.reorder_times("foo") with pytest.raises( ValueError, match="If order is an array, it must contain all indicies for the time axis, " "without duplicates.", ): - gain_data.reorder_times(order=np.arange(gain_data.Ntimes) * 2) + gain_data.reorder_times(np.arange(gain_data.Ntimes) * 2) with pytest.raises( ValueError, match="If order is an array, it must contain all indicies for the time axis, " "without duplicates.", ): - gain_data.reorder_times(order=np.arange(7)) + gain_data.reorder_times(np.arange(7)) gain_data = time_array_to_time_range(gain_data, keep_time_array=True) with uvtest.check_warnings( @@ -2206,11 +2206,11 @@ def test_reorder_jones( calobj = calobj2.copy() # this is a no-op because it's already sorted this way - calobj2.reorder_jones(order="-number") + calobj2.reorder_jones("-number") jnum_diff = np.diff(calobj2.jones_array) assert np.all(jnum_diff < 0) - calobj2.reorder_jones(order="number") + calobj2.reorder_jones("number") jnum_diff = np.diff(calobj2.jones_array) assert np.all(jnum_diff > 0) @@ -2234,7 +2234,7 @@ def test_reorder_jones( # test sorting with an index array. Sort back to number first so indexing works sorted_nums = uvutils.jstr2num(sorted_names, x_orientation=calobj.x_orientation) index_array = [np.nonzero(calobj.jones_array == num)[0][0] for num in sorted_nums] - calobj.reorder_jones(order=index_array) + calobj.reorder_jones(index_array) assert calobj2 == calobj @@ -2248,21 +2248,21 @@ def test_reorder_jones_errors(gain_data): match="order must be one of 'number', 'name', '-number', '-name' or an " "index array of length Njones", ): - calobj.reorder_jones(order="foo") + calobj.reorder_jones("foo") with pytest.raises( ValueError, match="If order is an array, it must contain all indicies for " "the jones axis, without duplicates.", ): - calobj.reorder_jones(order=np.arange(gain_data.Njones) * 2) + calobj.reorder_jones(np.arange(gain_data.Njones) * 2) with pytest.raises( ValueError, match="If order is an array, it must contain all indicies for " "the jones axis, without duplicates.", ): - calobj.reorder_jones(order=np.arange(2)) + calobj.reorder_jones(np.arange(2)) @pytest.mark.filterwarnings("ignore:The input_flag_array is deprecated") @@ -2325,14 +2325,14 @@ def test_add_different_sorting( cal2 = calobj.select(jones=np.array([-6, -8]), inplace=False) if sort_type == "ant": - cal1.reorder_antennas(order="number") - cal2.reorder_antennas(order="-number") - calobj.reorder_antennas(order="name") + cal1.reorder_antennas("number") + cal2.reorder_antennas("-number") + calobj.reorder_antennas("name") order_check = cal1._ant_array == cal2._ant_array elif sort_type == "time": - cal1.reorder_times(order="time") - cal2.reorder_times(order="-time") - calobj.reorder_times(order="time") + cal1.reorder_times("time") + cal2.reorder_times("-time") + calobj.reorder_times("time") order_check = cal1._time_array == cal2._time_array elif sort_type == "freq": if wide_band: @@ -2346,9 +2346,9 @@ def test_add_different_sorting( calobj.reorder_freqs(channel_order="freq") order_check = cal1._freq_array == cal2._freq_array elif sort_type == "jones": - cal1.reorder_jones(order="name") - cal2.reorder_jones(order="-number") - calobj.reorder_jones(order="number") + cal1.reorder_jones("name") + cal2.reorder_jones("-number") + calobj.reorder_jones("number") order_check = cal1._jones_array == cal2._jones_array # Make sure that the order has actually been scrambled @@ -2359,11 +2359,11 @@ def test_add_different_sorting( cal4 = cal2 + cal1 if sort_type == "ant": - cal3.reorder_antennas(order="name") - cal4.reorder_antennas(order="name") + cal3.reorder_antennas("name") + cal4.reorder_antennas("name") elif sort_type == "time": - cal3.reorder_times(order="time") - cal4.reorder_times(order="time") + cal3.reorder_times("time") + cal4.reorder_times("time") elif sort_type == "freq": if wide_band: cal3.reorder_freqs() @@ -2372,8 +2372,8 @@ def test_add_different_sorting( cal3.reorder_freqs(channel_order="freq") cal4.reorder_freqs(channel_order="freq") elif sort_type == "jones": - cal3.reorder_jones(order="number") - cal4.reorder_jones(order="number") + cal3.reorder_jones("number") + cal4.reorder_jones("number") # Deal with the history separately, since it will be different assert str.startswith(cal3.history, calobj.history) diff --git a/pyuvdata/uvcal/uvcal.py b/pyuvdata/uvcal/uvcal.py index 34743e98de..a5d47f3b6c 100644 --- a/pyuvdata/uvcal/uvcal.py +++ b/pyuvdata/uvcal/uvcal.py @@ -1738,8 +1738,8 @@ def get_time_array(self): def reorder_antennas( self, - *, order="number", + *, run_check=True, check_extra=True, run_check_acceptability=True, @@ -1983,8 +1983,8 @@ def reorder_freqs( def reorder_times( self, - *, order="time", + *, run_check=True, check_extra=True, run_check_acceptability=True, @@ -2086,8 +2086,8 @@ def reorder_times( def reorder_jones( self, - *, order="name", + *, run_check=True, check_extra=True, run_check_acceptability=True, @@ -2630,7 +2630,7 @@ def __add__( this_ants_ind[other_argsort] ] - this.reorder_antennas(order=temp_ind) + this.reorder_antennas(temp_ind) if len(this_times_ind) != 0: this_argsort = np.argsort(this_times_ind) @@ -2642,7 +2642,7 @@ def __add__( this_times_ind[other_argsort] ] - this.reorder_times(order=temp_ind) + this.reorder_times(temp_ind) if len(this_freq_ind) != 0: this_argsort = np.argsort(this_freq_ind) @@ -2672,7 +2672,7 @@ def __add__( this_jones_ind[other_argsort] ] - this.reorder_jones(order=temp_ind) + this.reorder_jones(temp_ind) # Update filename parameter this.filename = uvutils._combine_filenames(this.filename, other.filename) diff --git a/pyuvdata/uvdata/miriad.py b/pyuvdata/uvdata/miriad.py index 455a7c590a..4480290aaf 100644 --- a/pyuvdata/uvdata/miriad.py +++ b/pyuvdata/uvdata/miriad.py @@ -1586,7 +1586,7 @@ def read_miriad( else: order = self.blt_order[0] minor_order = None - self.reorder_blts(order=order, minor_order=minor_order) + self.reorder_blts(order, minor_order=minor_order) # If the data set was recorded using the old phasing method, fix that now. if fix_old_proj and projected: diff --git a/pyuvdata/uvdata/ms.py b/pyuvdata/uvdata/ms.py index bdf558b43d..ff3dfd50f3 100644 --- a/pyuvdata/uvdata/ms.py +++ b/pyuvdata/uvdata/ms.py @@ -2332,7 +2332,7 @@ def read_ms( self.freq_array = np.expand_dims(self.freq_array, 0) # order polarizations - self.reorder_pols(order=pol_order, run_check=False) + self.reorder_pols(pol_order, run_check=False) if use_future_array_shapes: self.use_future_array_shapes() diff --git a/pyuvdata/uvdata/tests/test_miriad.py b/pyuvdata/uvdata/tests/test_miriad.py index 9e1db262d0..cceeae9cfb 100644 --- a/pyuvdata/uvdata/tests/test_miriad.py +++ b/pyuvdata/uvdata/tests/test_miriad.py @@ -1147,7 +1147,7 @@ def test_roundtrip_optional_params(uv_in_paper, tmp_path): assert uv_in == uv_out # test with bda as well (single entry in tuple) - uv_in.reorder_blts(order="bda") + uv_in.reorder_blts("bda") uv_in.write_miriad(testfile, clobber=True) uv_out.read(testfile, use_future_array_shapes=True) diff --git a/pyuvdata/uvdata/tests/test_uvdata.py b/pyuvdata/uvdata/tests/test_uvdata.py index 007227cd3c..a1c9934bc4 100644 --- a/pyuvdata/uvdata/tests/test_uvdata.py +++ b/pyuvdata/uvdata/tests/test_uvdata.py @@ -351,8 +351,8 @@ def uv_phase_time_split(hera_uvh5): uv_phase = hera_uvh5.copy uv_raw = hera_uvh5.copy - uv_phase.reorder_blts(order="time", minor_order="baseline") - uv_raw.reorder_blts(order="time", minor_order="baseline") + uv_phase.reorder_blts("time", minor_order="baseline") + uv_raw.reorder_blts("time", minor_order="baseline") uv_phase.phase(ra=0, dec=0, cat_name="npole", epoch="J2000", use_ant_pos=True) times = np.unique(uv_phase.time_array) @@ -1439,7 +1439,7 @@ def test_select_phase_center_id(tmp_path, carma_miriad): @pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values") def test_select_phase_center_id_blts(carma_miriad): uv_obj = carma_miriad - uv_obj.reorder_blts(order="baseline") + uv_obj.reorder_blts("baseline") uv1 = uv_obj.select( phase_center_ids=0, blt_inds=np.arange(uv_obj.Nblts // 2), inplace=False @@ -3106,7 +3106,7 @@ def test_reorder_pols(casa_uvfits, future_shapes): uv2.data_array = uv2.data_array[:, :, :, order] uv2.nsample_array = uv2.nsample_array[:, :, :, order] uv2.flag_array = uv2.flag_array[:, :, :, order] - uv1.reorder_pols(order=order) + uv1.reorder_pols(order) assert uv1 == uv2 # Restore original order @@ -3114,13 +3114,13 @@ def test_reorder_pols(casa_uvfits, future_shapes): uv2.reorder_pols() assert uv1 == uv2 - uv1.reorder_pols(order="AIPS") + uv1.reorder_pols("AIPS") # check that we have aips ordering aips_pols = np.array([-1, -2, -3, -4]).astype(int) assert np.all(uv1.polarization_array == aips_pols) uv2 = uv1.copy() - uv2.reorder_pols(order="CASA") + uv2.reorder_pols("CASA") # check that we have casa ordering casa_pols = np.array([-1, -3, -4, -2]).astype(int) assert np.all(uv2.polarization_array == casa_pols) @@ -3132,7 +3132,7 @@ def test_reorder_pols(casa_uvfits, future_shapes): assert np.all(uv2.data_array == uv1.data_array[:, :, :, order]) assert np.all(uv2.flag_array == uv1.flag_array[:, :, :, order]) - uv2.reorder_pols(order="AIPS") + uv2.reorder_pols("AIPS") # check that we have aips ordering again assert uv1 == uv2 @@ -3141,11 +3141,11 @@ def test_reorder_pols(casa_uvfits, future_shapes): ValueError, match="order must be one of: 'AIPS', 'CASA', or an index array of length Npols", ): - uv2.reorder_pols(order={"order": "foo"}) + uv2.reorder_pols({"order": "foo"}) # check error if order is an array of the wrong length with pytest.raises(ValueError, match="If order is an index array, it must"): - uv2.reorder_pols(order=[3, 2, 1]) + uv2.reorder_pols([3, 2, 1]) @pytest.mark.filterwarnings("ignore:Telescope EVLA is not") @@ -3167,7 +3167,7 @@ def test_reorder_blts_errs(casa_uvfits, order, minor_order, msg): Verify that reorder_blts throws expected errors when supplied with bad args """ with pytest.raises(ValueError, match=msg): - casa_uvfits.reorder_blts(order=order, minor_order=minor_order) + casa_uvfits.reorder_blts(order, minor_order=minor_order) @pytest.mark.filterwarnings("ignore:This method will be removed in version 3.0 when") @@ -3270,7 +3270,7 @@ def test_reorder_blts_equiv(casa_uvfits, args1, args2, future_shapes): def test_reorder_blts_sort_order( hera_uvh5, order, m_order, check_tuple, check_attr, autos_first ): - hera_uvh5.reorder_blts(order=order, minor_order=m_order, autos_first=autos_first) + hera_uvh5.reorder_blts(order, minor_order=m_order, autos_first=autos_first) assert hera_uvh5.blt_order == check_tuple if isinstance(order, str) and autos_first: auto_inds = np.nonzero(hera_uvh5.ant_1_array == hera_uvh5.ant_2_array)[0] @@ -7060,7 +7060,7 @@ def test_upsample_in_time(hera_uvh5, future_shapes): uv_object.phase_center_catalog[0] = init_phase_dict # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline") + uv_object.reorder_blts("baseline") # save some values for later init_data_size = uv_object.data_array.size @@ -7096,7 +7096,7 @@ def test_upsample_in_time_with_flags(hera_uvh5): uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline") + uv_object.reorder_blts("baseline") # save some values for later init_wf = uv_object.get_data(0, 1) @@ -7132,7 +7132,7 @@ def test_upsample_in_time_noninteger_resampling(hera_uvh5): uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline") + uv_object.reorder_blts("baseline") # save some values for later init_data_size = uv_object.data_array.size @@ -7193,7 +7193,7 @@ def test_upsample_in_time_summing_correlator_mode(hera_uvh5): uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline") + uv_object.reorder_blts("baseline") # save some values for later init_data_size = uv_object.data_array.size @@ -7231,7 +7231,7 @@ def test_upsample_in_time_summing_correlator_mode_with_flags(hera_uvh5): uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline") + uv_object.reorder_blts("baseline") # save some values for later init_wf = uv_object.get_data(0, 1) @@ -7269,7 +7269,7 @@ def test_upsample_in_time_summing_correlator_mode_nonint_resampling(hera_uvh5): uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline") + uv_object.reorder_blts("baseline") # save some values for later init_data_size = uv_object.data_array.size @@ -7312,7 +7312,7 @@ def test_partial_upsample_in_time(hera_uvh5): uv_object.integration_time[bl_inds] = uv_object.integration_time[0] / 2.0 # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline") + uv_object.reorder_blts("baseline") # save some values for later init_wf_01 = uv_object.get_data(0, 1) @@ -7351,7 +7351,7 @@ def test_upsample_in_time_drift(hera_uvh5): uv_object = hera_uvh5 # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline") + uv_object.reorder_blts("baseline") # save some values for later init_data_size = uv_object.data_array.size @@ -7408,7 +7408,7 @@ def test_upsample_in_time_drift_no_phasing(hera_uvh5, driftscan, partial_phase): ) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline") + uv_object.reorder_blts("baseline") # save some values for later init_data_size = uv_object.data_array.size @@ -7454,7 +7454,7 @@ def test_downsample_in_time(hera_uvh5, future_shapes): uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") uv_object2 = uv_object.copy() # save some values for later @@ -7505,7 +7505,7 @@ def test_downsample_in_time_partial_flags(hera_uvh5): uv_object = hera_uvh5 uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") # save some values for later init_wf = uv_object.get_data(0, 1) original_int_time = np.amax(uv_object.integration_time) @@ -7559,7 +7559,7 @@ def test_downsample_in_time_totally_flagged(hera_uvh5, future_shapes): uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") uv_object2 = uv_object.copy() # save some values for later @@ -7613,7 +7613,7 @@ def test_downsample_in_time_uneven_samples(hera_uvh5): uv_object = hera_uvh5 uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") uv_object2 = uv_object.copy() # save some values for later @@ -7667,7 +7667,7 @@ def test_downsample_in_time_uneven_samples_keep_ragged(hera_uvh5): uv_object = hera_uvh5 uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") uv_object2 = uv_object.copy() # save some values for later @@ -7709,7 +7709,7 @@ def test_downsample_in_time_summing_correlator_mode(hera_uvh5): uv_object = hera_uvh5 uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") # save some values for later init_data_size = uv_object.data_array.size @@ -7754,7 +7754,7 @@ def test_downsample_in_time_summing_correlator_mode_partial_flags(hera_uvh5): uv_object = hera_uvh5 uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") # save some values for later init_wf = uv_object.get_data(0, 1) @@ -7800,7 +7800,7 @@ def test_downsample_in_time_summing_correlator_mode_totally_flagged(hera_uvh5): uv_object = hera_uvh5 uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") # save some values for later init_wf = uv_object.get_data(0, 1) @@ -7847,7 +7847,7 @@ def test_downsample_in_time_summing_correlator_mode_uneven_samples(hera_uvh5): uv_object = hera_uvh5 uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") # save some values for later init_wf = uv_object.get_data(0, 1) @@ -7900,7 +7900,7 @@ def test_downsample_in_time_summing_correlator_mode_uneven_samples_drop_ragged( uv_object = hera_uvh5 uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") # save some values for later init_wf = uv_object.get_data(0, 1) @@ -7947,7 +7947,7 @@ def test_partial_downsample_in_time(hera_uvh5): uv_object.integration_time[bl_inds] = uv_object.integration_time[0] * 2.0 # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline") + uv_object.reorder_blts("baseline") # save some values for later init_wf_01 = uv_object.get_data(0, 1) @@ -7994,7 +7994,7 @@ def test_downsample_in_time_drift(hera_uvh5): uv_object = hera_uvh5 # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") uv_object2 = uv_object.copy() # save some values for later @@ -8062,7 +8062,7 @@ def test_downsample_in_time_drift_no_phasing(hera_uvh5, driftscan, partial_phase ) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") uv_object2 = uv_object.copy() # save some values for later @@ -8116,7 +8116,7 @@ def test_downsample_in_time_nsample_precision(hera_uvh5): uv_object = hera_uvh5 uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") # save some values for later init_wf = uv_object.get_data(0, 1) original_int_time = np.amax(uv_object.integration_time) @@ -8170,7 +8170,7 @@ def test_downsample_in_time_errors(hera_uvh5): uv_object = hera_uvh5 uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") # raise an error if set neither min_int_time and n_times_to_avg with pytest.raises( @@ -8202,7 +8202,7 @@ def test_downsample_in_time_errors(hera_uvh5): # raise an error if phase centers change within an downsampling window uv_object2 = uv_object.copy() - uv_object2.reorder_blts(order="time") + uv_object2.reorder_blts("time") mask = np.full(uv_object2.Nblts, False) mask[: uv_object2.Nblts // 3] = True uv_object2.phase(ra=0, dec=0, phase_frame="icrs", select_mask=mask, cat_name="foo") @@ -8280,7 +8280,7 @@ def test_downsample_in_time_int_time_mismatch_warning(hera_uvh5): uv_object = hera_uvh5 uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") # save some values for later init_data_size = uv_object.data_array.size @@ -8324,7 +8324,7 @@ def test_downsample_in_time_varying_integration_time(hera_uvh5): uv_object = hera_uvh5 uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") # save some values for later init_wf = uv_object.get_data(0, 1) @@ -8381,7 +8381,7 @@ def test_downsample_in_time_varying_int_time_partial_flags(hera_uvh5): uv_object = hera_uvh5 uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") # downselect to 14 times and one baseline uv_object.select(times=np.unique(uv_object.time_array)[:14]) @@ -8430,7 +8430,7 @@ def test_downsample_in_time_varying_integration_time_warning(hera_uvh5): uv_object = hera_uvh5 uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") # save some values for later init_wf = uv_object.get_data(0, 1) @@ -8475,7 +8475,7 @@ def test_upsample_downsample_in_time(hera_uvh5): uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") uv_object2 = uv_object.copy() max_integration_time = np.amin(uv_object.integration_time) / 2.0 @@ -8566,7 +8566,7 @@ def test_upsample_downsample_in_time_odd_resample(hera_uvh5, future_shapes): uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") uv_object2 = uv_object.copy() # try again with a resampling factor of 3 (test odd numbers) @@ -8613,7 +8613,7 @@ def test_upsample_downsample_in_time_metadata_only(hera_uvh5): uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd")) # reorder to make sure we get the right value later - uv_object.reorder_blts(order="baseline", minor_order="time") + uv_object.reorder_blts("baseline", minor_order="time") uv_object2 = uv_object.copy() max_integration_time = np.amin(uv_object.integration_time) / 2.0 @@ -12001,9 +12001,9 @@ def test_add_pol_sorting_bl(casa_uvfits, add_type, sort_type, future_shapes): ) if sort_type == "blt": - uv1.reorder_blts(order="time", minor_order="ant1") - uv2.reorder_blts(order="time", minor_order="ant2") - casa_uvfits.reorder_blts(order="bda") + uv1.reorder_blts("time", minor_order="ant1") + uv2.reorder_blts("time", minor_order="ant2") + casa_uvfits.reorder_blts("bda") order_check = uv1.ant_1_array == uv2.ant_1_array elif sort_type == "freq": uv1.reorder_freqs(channel_order="freq") @@ -12011,9 +12011,9 @@ def test_add_pol_sorting_bl(casa_uvfits, add_type, sort_type, future_shapes): casa_uvfits.reorder_freqs(spw_order="freq") order_check = uv1.freq_array == uv2.freq_array elif sort_type == "pol": - uv1.reorder_pols(order="AIPS") - uv2.reorder_pols(order="CASA") - casa_uvfits.reorder_pols(order="CASA") + uv1.reorder_pols("AIPS") + uv2.reorder_pols("CASA") + casa_uvfits.reorder_pols("CASA") order_check = uv1.polarization_array == uv2.polarization_array # Make sure that the order has actually been scrambled @@ -12023,11 +12023,11 @@ def test_add_pol_sorting_bl(casa_uvfits, add_type, sort_type, future_shapes): uv3 = uv1 + uv2 if sort_type == "blt": - uv3.reorder_blts(order="bda") + uv3.reorder_blts("bda") elif sort_type == "freq": uv3.reorder_freqs(channel_order="freq") elif sort_type == "pol": - uv3.reorder_pols(order="CASA") + uv3.reorder_pols("CASA") # Deal with the history separately, since it will be different assert str.startswith(uv3.history, casa_uvfits.history) @@ -12651,7 +12651,7 @@ def test_setting_time_axis_wrongly(casa_uvfits): with pytest.raises(ValueError, match="time_axis_faster_than_bls is True but"): casa_uvfits.check() - casa_uvfits.reorder_blts(order="time", minor_order="baseline") + casa_uvfits.reorder_blts("time", minor_order="baseline") casa_uvfits.blts_are_rectangular = True casa_uvfits.time_axis_faster_than_bls = True with pytest.raises( @@ -12659,7 +12659,7 @@ def test_setting_time_axis_wrongly(casa_uvfits): ): casa_uvfits.check() - casa_uvfits.reorder_blts(order="baseline", minor_order="time") + casa_uvfits.reorder_blts("baseline", minor_order="time") assert not casa_uvfits.time_axis_faster_than_bls casa_uvfits.blts_are_rectangular = True assert not casa_uvfits.time_axis_faster_than_bls @@ -12676,12 +12676,12 @@ def test_set_rectangularity(casa_uvfits, hera_uvh5): assert casa_uvfits.blts_are_rectangular is False assert casa_uvfits.time_axis_faster_than_bls is False - hera_uvh5.reorder_blts(order="time", minor_order="baseline") + hera_uvh5.reorder_blts("time", minor_order="baseline") hera_uvh5.set_rectangularity(force=True) assert hera_uvh5.blts_are_rectangular is True assert hera_uvh5.time_axis_faster_than_bls is False - hera_uvh5.reorder_blts(order=np.random.permutation(hera_uvh5.Nblts)) + hera_uvh5.reorder_blts(np.random.permutation(hera_uvh5.Nblts)) hera_uvh5.set_rectangularity(force=True) assert hera_uvh5.blts_are_rectangular is False assert hera_uvh5.time_axis_faster_than_bls is False diff --git a/pyuvdata/uvdata/tests/test_uvh5.py b/pyuvdata/uvdata/tests/test_uvh5.py index 4e20164efe..bfef79cd44 100644 --- a/pyuvdata/uvdata/tests/test_uvh5.py +++ b/pyuvdata/uvdata/tests/test_uvh5.py @@ -351,7 +351,7 @@ def test_uvh5_optional_parameters(casa_uvfits, tmp_path): assert uv_in == uv_out # test with blt_order = bda as well (single entry in tuple) - uv_in.reorder_blts(order="bda") + uv_in.reorder_blts("bda") uv_in.write_uvh5(testfile, clobber=True) uv_out.read(testfile, use_future_array_shapes=True) @@ -3621,7 +3621,7 @@ def setup_class(self): meta = uvh5.FastUVH5Meta(self.fl) uvd = meta.to_uvdata() - uvd.reorder_blts(order="baseline", minor_order="time") + uvd.reorder_blts("baseline", minor_order="time") self.fltime_axis_faster_than_bls = os.path.join( self.tmp_path.name, "time_axis_faster_than_bls.uvh5" ) @@ -3728,7 +3728,7 @@ def test_getting_lsts(self): # Now test a different ordering. uvd = meta.to_uvdata() - uvd.reorder_blts(order="baseline", minor_order="time") + uvd.reorder_blts("baseline", minor_order="time") uvd.initialize_uvh5_file( os.path.join(self.tmp_path.name, "time_axis_faster_than_bls.uvh5"), clobber=True, diff --git a/pyuvdata/uvdata/uvdata.py b/pyuvdata/uvdata/uvdata.py index 889ebd28d3..0f2c1129dc 100644 --- a/pyuvdata/uvdata/uvdata.py +++ b/pyuvdata/uvdata/uvdata.py @@ -4782,8 +4782,8 @@ def conjugate_bls(self, convention="ant1 None: def reorder_blts( self, - *, order="time", + *, minor_order=None, autos_first=False, conj_convention=None, @@ -6702,7 +6702,7 @@ def __add__( this_pol_ind[other_argsort] ] - this.reorder_pols(order=temp_ind) + this.reorder_pols(temp_ind) # Pad out self to accommodate new data blt_order = None