diff --git a/.circleci/config.yml b/.circleci/config.yml index 1aafa61441..2bba91e7c4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -140,7 +140,7 @@ workflows: - pyuvdata: name: pyuvdata_3.11 python_version: "3.11" - env_name: "pyuvdata_tests" + env_name: "pyuvdata_tests_311" - pyuvdata: name: pyuvdata_min_deps python_version: "3.9" diff --git a/.github/workflows/macosx_windows_ci.yaml b/.github/workflows/macosx_windows_ci.yaml index 60528160d7..e4f03c10d7 100644 --- a/.github/workflows/macosx_windows_ci.yaml +++ b/.github/workflows/macosx_windows_ci.yaml @@ -36,6 +36,8 @@ jobs: run: | if [[ "${{ runner.os }}" = "Windows" ]]; then echo "::set-output name=ENV_NAME::pyuvdata_tests_windows" + elif [[ "${{ matrix.python-version }}" == "3.11" ]]; then + echo "::set-output name=ENV_NAME::pyuvdata_tests_311" else echo "::set-output name=ENV_NAME::pyuvdata_tests" fi diff --git a/ci/pyuvdata_tests_311.yml b/ci/pyuvdata_tests_311.yml new file mode 100644 index 0000000000..b5a4e9e62c --- /dev/null +++ b/ci/pyuvdata_tests_311.yml @@ -0,0 +1,30 @@ +# Need a special environment file for python 3.11 because h5py <3.9 throws lots of +# deprecation warnings with numpy >= 1.25. But h5py 3.9 cannot be resolved with other +# dependencies on python 3.11. So we're restricting numpy to <1.25 on python 3.11 for now. +name: pyuvdata_tests_311 +channels: + - conda-forge +dependencies: + - astropy>=5.0.4 + - astropy-healpix>=0.6 + - astroquery>=0.4.4 + - docstring_parser>=0.15 + - h5py>=3.1 + - hdf5plugin>=3.1.0 + - numpy>=1.20.*,<1.25 + - pyerfa>=2.0 + - python-casacore>=3.3.1 + - pyyaml>=5.1 + - scipy>=1.5 + - coverage + - pytest>=6.2.0 + - pytest-cases>=3.6.9 + - pytest-cov + - pytest-xdist + - cython + - setuptools_scm<7.0|>=7.0.3 + - pip + - pip: + - lunarsky>=0.2.1 + - novas + - novas_de405 diff --git a/pyuvdata/tests/__init__.py b/pyuvdata/tests/__init__.py index a9aaece89a..e6732ee784 100644 --- a/pyuvdata/tests/__init__.py +++ b/pyuvdata/tests/__init__.py @@ -150,14 +150,27 @@ def __exit__(self, exc_type=None, exc_val=None, exc_tb=None): # only check if we're not currently handling an exception if exc_type is None and exc_val is None and exc_tb is None: if self.expected_warning is None: - assert len(self) == 0 + expected_length = 0 else: - assert len(self) == len(self.expected_warning), ( - f"{len(self.expected_warning)} warnings expected, " + expected_length = len(self.expected_warning) + + if len(self) != expected_length: + warn_file_line = [] + msg_list = [] + for each in self: + warn_file_line.append(f"{each.filename}: {each.lineno}") + msg_list.append([each.message for each in self]) + if self.expected_warning is None: + err_msg = "No warnings expected, " + else: + err_msg = f"{len(self.expected_warning)} warnings expected, " + err_msg += ( f"{len(self)} warnings issued. The list of emitted warnings is: " - f"{[each.message for each in self]}." + f"{msg_list}. The filenames and line numbers are: {warn_file_line}" ) + raise AssertionError(err_msg) + if expected_length > 0: for warn_i, exp_warn in enumerate(self.expected_warning): if not any(issubclass(r.category, exp_warn) for r in self): __tracebackhide__ = True diff --git a/pyuvdata/tests/test_utils.py b/pyuvdata/tests/test_utils.py index 191a99d845..d3e28abd87 100644 --- a/pyuvdata/tests/test_utils.py +++ b/pyuvdata/tests/test_utils.py @@ -1851,7 +1851,8 @@ def test_calc_app_fk4_roundtrip(astrometry_args, telescope_frame): @pytest.mark.filterwarnings('ignore:ERFA function "pmsafe" yielded 4 of') @pytest.mark.filterwarnings('ignore:ERFA function "utcut1" yielded 2 of') @pytest.mark.filterwarnings('ignore:ERFA function "d2dtf" yielded 1 of') -def test_astrometry_icrs_to_app(astrometry_args): +@pytest.mark.parametrize("use_extra", [True, False]) +def test_astrometry_icrs_to_app(astrometry_args, use_extra): """ Check for consistency beteen astrometry libraries when converting ICRS -> TOPP @@ -1881,6 +1882,15 @@ def test_astrometry_icrs_to_app(astrometry_args): coord_results[3] = (precalc_ra, precalc_dec) + kwargs = {} + extra_args = ["pm_ra", "pm_dec", "vrad", "dist"] + if use_extra: + for key in extra_args: + kwargs[key] = astrometry_args[key] + else: + # don't compare to precalc if not using extra arguments + coord_results = coord_results[:-1] + for idx, name in enumerate(astrometry_list): coord_results[idx] = uvutils.transform_icrs_to_app( astrometry_args["time_array"], @@ -1888,11 +1898,8 @@ def test_astrometry_icrs_to_app(astrometry_args): astrometry_args["icrs_dec"], astrometry_args["telescope_loc"], epoch=astrometry_args["epoch"], - pm_ra=astrometry_args["pm_ra"], - pm_dec=astrometry_args["pm_dec"], - vrad=astrometry_args["vrad"], - dist=astrometry_args["dist"], astrometry_library=name, + **kwargs, ) for idx in range(len(coord_results) - 1): diff --git a/pyuvdata/utils.py b/pyuvdata/utils.py index e8fade05e7..458eb66421 100644 --- a/pyuvdata/utils.py +++ b/pyuvdata/utils.py @@ -2768,23 +2768,38 @@ def transform_icrs_to_app( for idx in range(len(app_ra)): if multi_coord or (idx == 0): # Create a catalog entry for the source in question + if pm_ra is None: + pm_ra_use = 0.0 + else: + pm_ra_use = pm_ra_coord.to_value("mas/yr") * np.cos( + dec_coord[idx].to_value("rad") + ) + + if pm_dec is None: + pm_dec_use = 0.0 + else: + pm_dec_use = pm_dec_coord.to_value("mas/yr") + + if dist is None or np.any(dist == 0.0): + parallax = 0.0 + else: + parallax = d_coord[idx].kiloparsec ** -1.0 + + if vrad is None: + vrad_use = 0.0 + else: + vrad_use = v_coord[idx].to_value("km/s") + cat_entry = novas.make_cat_entry( "dummy_name", # Dummy source name "GKK", # Catalog ID, fixed for now 156, # Star ID number, fixed for now ra_coord[idx].to_value("hourangle"), dec_coord[idx].to_value("deg"), - 0.0 - if pm_ra is None - else ( - pm_ra_coord.to_value("mas/yr") - * np.cos(dec_coord[idx].to_value("rad")) - ), - 0.0 if pm_dec is None else pm_dec_coord.to_value("mas/yr"), - 0.0 - if (dist is None or np.any(dist == 0.0)) - else (d_coord.kiloparsec**-1.0), - 0.0 if (vrad is None) else v_coord.to_value("km/s"), + pm_ra_use, + pm_dec_use, + parallax, + vrad_use, ) # Update polar wobble parameters for a given timestamp @@ -2810,13 +2825,34 @@ def transform_icrs_to_app( # liberfa wants things in radians pm_x_array *= np.pi / (3600.0 * 180.0) pm_y_array *= np.pi / (3600.0 * 180.0) + + if pm_ra is None: + pm_ra_use = 0.0 + else: + pm_ra_use = pm_ra_coord.to_value("rad/yr") + + if pm_dec is None: + pm_dec_use = 0.0 + else: + pm_dec_use = pm_dec_coord.to_value("rad/yr") + + if dist is None or np.any(dist == 0.0): + parallax = 0.0 + else: + parallax = d_coord.pc**-1.0 + + if vrad is None: + vrad_use = 0 + else: + vrad_use = v_coord.to_value("km/s") + [_, _, _, app_dec, app_ra, eqn_org] = erfa.atco13( ra_coord.to_value("rad"), dec_coord.to_value("rad"), - 0.0 if (pm_ra is None) else pm_ra_coord.to_value("rad/yr"), - 0.0 if (pm_dec is None) else pm_dec_coord.to_value("rad/yr"), - 0.0 if (dist is None or np.any(dist == 0.0)) else (d_coord.pc**-1.0), - 0.0 if (vrad is None) else v_coord.to_value("km/s"), + pm_ra_use, + pm_dec_use, + parallax, + vrad_use, time_obj_array.utc.jd, 0.0, time_obj_array.delta_ut1_utc, diff --git a/pyuvdata/uvbeam/mwa_beam.py b/pyuvdata/uvbeam/mwa_beam.py index 166cadd0ed..6753113404 100644 --- a/pyuvdata/uvbeam/mwa_beam.py +++ b/pyuvdata/uvbeam/mwa_beam.py @@ -94,7 +94,7 @@ def P1sin(nmax, theta): # Pn(cos x)/sin x = -dPn(cos_th)/dcos_th Pm_cos_delta_cos = lpmv(orders, n, cos_th - delta_cos) # backward difference - Pm_sin[1, 0] = -(P[0] - Pm_cos_delta_cos[0]) / delta_cos + Pm_sin[1, 0] = -(P[0, 0] - Pm_cos_delta_cos[0, 0]) / delta_cos elif cos_th == -1: # The first approach, to just use the analytical derivative @@ -103,7 +103,7 @@ def P1sin(nmax, theta): # Pn(cos x)/sin x = -dPn(cos_th)/dcos_th Pm_cos_delta_cos = lpmv(orders, n, cos_th - delta_cos) # forward difference - Pm_sin[1, 0] = -(Pm_cos_delta_cos[0] - P[0]) / delta_cos + Pm_sin[1, 0] = -(Pm_cos_delta_cos[0, 0] - P[0, 0]) / delta_cos else: Pm_sin = P / sin_th diff --git a/pyuvdata/uvbeam/uvbeam.py b/pyuvdata/uvbeam/uvbeam.py index d2137144c9..2d5de47d47 100644 --- a/pyuvdata/uvbeam/uvbeam.py +++ b/pyuvdata/uvbeam/uvbeam.py @@ -950,7 +950,7 @@ def _fix_auto_power(self): self.data_array[:, :, pol_screen] ) - def _check_auto_power(self, fix_auto_power=False): + def _check_auto_power(self, fix_auto_power=False, warn_tols=(0, 0)): """ Check for complex auto polarization power beams. @@ -959,6 +959,11 @@ def _check_auto_power(self, fix_auto_power=False): fix_auto_power : bool If auto polarization power beams with imaginary values are found, fix those values so that they are real-only in data_array. + warn_tols : tuple of float + Tolerances (relative, absolute) to use in comparing max imaginary part of + auto polarization power beams to zero (passed to numpy.isclose). If the max + imaginary part is close to zero within the tolerances and fix_auto_power is + True, silently fix them to be zero and do not warn. """ if self.beam_type != "power" or self.polarization_array is None: @@ -988,11 +993,12 @@ def _check_auto_power(self, fix_auto_power=False): np.abs(np.imag(np.rollaxis(self.data_array, pol_axis)[pol_screen])) ) if fix_auto_power: - warnings.warn( - "Fixing auto polarization power beams to be be real-only, " - "after some imaginary values were detected in data_array. " - f"Largest imaginary component was {max_imag}." - ) + if not np.isclose(max_imag, 0, rtol=warn_tols[0], atol=warn_tols[1]): + warnings.warn( + "Fixing auto polarization power beams to be be real-only, " + "after some imaginary values were detected in data_array. " + f"Largest imaginary component was {max_imag}." + ) self._fix_auto_power() else: raise ValueError( @@ -1277,8 +1283,11 @@ def efield_to_power( if calc_cross_pols: # Sometimes the auto pol beams can have a small complex part due to - # numerical precision errors. Fix that (with warnings). - beam_object._check_auto_power(fix_auto_power=True) + # numerical precision errors. Fix that (with warnings if the complex part + # is larger than the tolerances). + beam_object._check_auto_power( + fix_auto_power=True, warn_tols=beam_object._data_array.tols + ) history_update_string = " Converted from efield to power using pyuvdata." diff --git a/pyuvdata/uvcal/fhd_cal.py b/pyuvdata/uvcal/fhd_cal.py index 506fecbec9..123ec92c00 100644 --- a/pyuvdata/uvcal/fhd_cal.py +++ b/pyuvdata/uvcal/fhd_cal.py @@ -243,7 +243,7 @@ def read_fhd_cal( self.history += "\n" + self.pyuvdata_version_str if not read_data: - n_pols = int(obs_data["N_POL"]) + n_pols = int(obs_data["N_POL"][0]) # FHD only has the diagonal elements (jxx, jyy), so limit to 2 self.Njones = int(np.min([n_pols, 2])) diff --git a/pyuvdata/uvdata/mir.py b/pyuvdata/uvdata/mir.py index df5b22816c..15255d7819 100644 --- a/pyuvdata/uvdata/mir.py +++ b/pyuvdata/uvdata/mir.py @@ -452,9 +452,9 @@ def _init_from_mir_parser( assert len(spw_nchan) == 1 # Get the data in the right units and dtype - spw_fsky = float(spw_fsky * 1e9) # GHz -> Hz - spw_fres = float(spw_fres * 1e6) # MHz -> Hz - spw_nchan = int(spw_nchan) + spw_fsky = float(spw_fsky[0] * 1e9) # GHz -> Hz + spw_fres = float(spw_fres[0] * 1e6) # MHz -> Hz + spw_nchan = int(spw_nchan[0]) # We need to do a some extra handling here, because a single correlator # can produce multiple spectral windows (e.g., LSB/USB). The scheme below diff --git a/pyuvdata/uvdata/mir_meta_data.py b/pyuvdata/uvdata/mir_meta_data.py index 423abaa673..240a2fdf51 100644 --- a/pyuvdata/uvdata/mir_meta_data.py +++ b/pyuvdata/uvdata/mir_meta_data.py @@ -3101,7 +3101,9 @@ def read(self, filepath, nchunks=8): if (file_size % (rec_size + hdr_dtype.itemsize)) != 0: # If the file size doesn't go in evenly, then read in just the first # record and try to figure it out. - nchunks = int(np.fromfile(old_ac_file, dtype=hdr_dtype, count=1)["nChunks"]) + nchunks = int( + np.fromfile(old_ac_file, dtype=hdr_dtype, count=1)["nChunks"][0] + ) rec_size = 4 * 16384 * nchunks * 2 assert ( file_size % (rec_size + hdr_dtype.itemsize) diff --git a/pyuvdata/uvdata/tests/test_uvdata.py b/pyuvdata/uvdata/tests/test_uvdata.py index 4e6060c7bd..55b421327a 100644 --- a/pyuvdata/uvdata/tests/test_uvdata.py +++ b/pyuvdata/uvdata/tests/test_uvdata.py @@ -12426,7 +12426,7 @@ def test_flex_pol_uvh5(future_shapes, multispw, sorting, uv_phase_comp, tmp_path spw_final_order = [1, 4, 5, 0, 3, 2, 6, 7, 8, 9, 10, 11] spw_order = np.zeros_like(uvd.spw_array) for idx, spw in enumerate(spw_final_order): - spw_order[idx] = np.nonzero(uvd.spw_array == spw)[0] + spw_order[idx] = np.nonzero(uvd.spw_array == spw)[0][0] uvd.reorder_freqs(spw_order=spw_order) uvd.check(check_autos=True) diff --git a/pyuvdata/uvdata/tests/test_uvfits.py b/pyuvdata/uvdata/tests/test_uvfits.py index f9a099d42a..d4bb4bb74d 100644 --- a/pyuvdata/uvdata/tests/test_uvfits.py +++ b/pyuvdata/uvdata/tests/test_uvfits.py @@ -1675,10 +1675,14 @@ def test_no_spoof(sma_mir, tmp_path, spoof): sma_mir._set_app_coords_helper() filename = os.path.join(tmp_path, "spoof.uvfits" if spoof else "no_spoof.uvfits") - with uvtest.check_warnings( - DeprecationWarning if spoof else None, - "UVFITS-required metadata are now set automatically to " if spoof else None, - ): + if spoof: + warn_type = DeprecationWarning + warn_msg = "UVFITS-required metadata are now set automatically to " + else: + warn_type = None + warn_msg = "" + + with uvtest.check_warnings(warn_type, match=warn_msg): sma_mir.write_uvfits(filename, spoof_nonessential=spoof) sma_uvfits = UVData.from_file(filename, use_future_array_shapes=True) diff --git a/pyuvdata/uvdata/uvdata.py b/pyuvdata/uvdata/uvdata.py index e81225077c..b2ad278dba 100644 --- a/pyuvdata/uvdata/uvdata.py +++ b/pyuvdata/uvdata/uvdata.py @@ -2777,7 +2777,7 @@ def remove_flex_pol(self, combine_spws=True): this_spw = np.array(spw_set["spws"])[spw_set["pols"] == pol] spw_order[this_ind] = np.nonzero( self.spw_array == this_spw - )[0] + )[0][0] else: spw_order = None @@ -5220,7 +5220,7 @@ def reorder_freqs( if self.flex_spw_polarization_array is not None: spw_sort_inds = np.zeros_like(self.spw_array) for idx, spw in enumerate(new_spw_array): - spw_sort_inds[idx] = np.nonzero(self.spw_array == spw)[0] + spw_sort_inds[idx] = np.nonzero(self.spw_array == spw)[0][0] self.flex_spw_polarization_array = self.flex_spw_polarization_array[ spw_sort_inds ] diff --git a/pyuvdata/uvdata/uvfits.py b/pyuvdata/uvdata/uvfits.py index 5e0d625df8..de74d66b30 100644 --- a/pyuvdata/uvdata/uvfits.py +++ b/pyuvdata/uvdata/uvfits.py @@ -1112,10 +1112,10 @@ def write_uvfits( # a common practice is to set the PZERO to the JD at midnight of the first time jd_midnight = np.floor(self.time_array[0] - 0.5) + 0.5 if write_precision == 32: - time_array1 = np.float32(self.time_array - jd_midnight) - time_array2 = np.float32( - self.time_array - jd_midnight - np.float64(time_array1) - ) + time_array1 = (self.time_array - jd_midnight).astype(np.float32) + time_array2 = ( + self.time_array - jd_midnight - time_array1.astype(np.float64) + ).astype(np.float32) else: time_array1 = self.time_array - jd_midnight @@ -1177,8 +1177,10 @@ def write_uvfits( # angles in uvfits files are stored in degrees, so first convert to degrees lst_array_deg = np.rad2deg(self.lst_array) if write_precision == 32: - lst_array_1 = np.float32(lst_array_deg) - lst_array_2 = np.float32(lst_array_deg - np.float64(lst_array_1)) + lst_array_1 = lst_array_deg.astype(np.float32) + lst_array_2 = (lst_array_deg - lst_array_1.astype(np.float64)).astype( + np.float32 + ) else: lst_array_1 = lst_array_deg group_parameter_dict["LST "] = lst_array_1 @@ -1549,7 +1551,7 @@ def write_uvfits( # coordinate frame, although nothing in phase_center_catalog forces # objects to share the same frame. So we want to make sure that # everything lines up with the coordinate frame listed. - ra_arr[idx], dec_arr[idx] = uvutils.transform_sidereal_coords( + new_ra, new_dec = uvutils.transform_sidereal_coords( phase_dict["cat_lon"], phase_dict["cat_lat"], phase_dict["cat_frame"], @@ -1558,6 +1560,8 @@ def write_uvfits( out_coord_epoch=phase_dict.get("cat_epoch"), time_array=np.mean(self.time_array), ) + ra_arr[idx] = new_ra[0] + dec_arr[idx] = new_dec[0] epo_arr[idx] = ( phase_dict["cat_epoch"]