diff --git a/README.md b/README.md index 7f067c4..5e4a5d7 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,18 @@ Nemo is a map filtering and source detection and characterization pipeline, designed to find galaxy clusters using the Sunyaev-Zel'dovich effect. It can also be used to find sources. -* Documentation: +* Documentation: * License: Will be BSD 2-clause when released (this is a currently a private repository: for now Nemo cannot be used for non-ACT, non-SO approved projects; however, the aim is to make the -code fully public by the time the first AdvACT cluster catalog is published) +code fully public by the time the first AdvACT cluster catalog is published). +* Authors: Matt Hilton, with contributions from Simone Aiola, David Alonso, Matthew Hasselfield, +Toby Marriage, Sigurd Naess, and Cristóbal Sifón. Nemo is *not* the pipeline used for [Hasselfield et al. (2013)](http://adsabs.harvard.edu/abs/2013JCAP...07..008H), but implements many of the ideas presented there, and should give similar results, given the same map (or at least it did in the past). *It is* the pipeline, that has been used for the -[two-season ACTPol cluster catalog paper](http://adsabs.harvard.edu/abs/2017arXiv170905600H). +[two-season ACTPol cluster catalog paper](http://adsabs.harvard.edu/abs/2017arXiv170905600H), +and the AdvACT S18 cluster catalog paper. A slightly modified version of Matthew Hasselfield's `gnfw.py` code (used for modeling cluster pressure profiles) is included in Nemo. @@ -30,24 +33,25 @@ removed, except for the version used for the # Software needed Nemo itself is written in Python (3.6+), and requires the following additional modules to be installed -(currently used versions are given in brackets, earlier and later versions also probably work): +(currently used versions are given in brackets, later versions also probably work): * numpy (1.13.3) * scipy (1.3.0) * matplotlib (2.1.1) * astLib (0.11.3) -* [pixell](https://github.com/simonsobs/pixell/) (0.5.3 or git version) +* [pixell](https://github.com/simonsobs/pixell/) (0.6.3 or git version) * Pillow (5.1.0) * astropy (3.2.1) -* IPython (7.2.0) * Cython (0.24.1) * PyYAML (3.12) * Colossus (1.2.9) +* [CCL](https://github.com/LSSTDESC/CCL) (2.1 or later) * mpi4py (3.0.0) * colorcet (1.0.0; https://github.com/bokeh/colorcet/releases) All of the dependencies can be installed using `pip`, and should be installed automatically as needed -by the `setup.py` script. +by the `setup.py` script (your mileage may vary in terms of how successful `pip` is at building +some of the external dependencies, depending on your set up). # Installation @@ -82,7 +86,8 @@ export PYTHONPATH=$HOME/local/lib/python3.6/site-packages:$PYTHONPATH # Running Nemo -Documentation is available at . +Documentation is available at , including a number of +tutorials. See [examples/equD56/README.md](examples/equD56/README.md) for a tutorial on how to re-create the ACTPol two-season cluster catalog (including mass estimates). diff --git a/bin/nemo b/bin/nemo index fba107f..6d899f4 100644 --- a/bin/nemo +++ b/bin/nemo @@ -163,8 +163,9 @@ if __name__ == '__main__': H0=70. Om0=0.30 Ob0=0.05 - sigma_8=0.8 - mockSurvey=MockSurvey.MockSurvey(minMass, areaDeg2, zMin, zMax, H0, Om0, Ob0, sigma_8, enableDrawSample = True) + sigma8=0.8 + ns=0.95 + mockSurvey=MockSurvey.MockSurvey(minMass, areaDeg2, zMin, zMax, H0, Om0, Ob0, sigma8, ns, enableDrawSample = True) selFnCollection=pipelines.makeSelFnCollection(config, mockSurvey) if config.MPIEnabled == True: config.comm.barrier() diff --git a/bin/nemoCatalogCheck b/bin/nemoCatalogCheck index fcd8722..288257d 100644 --- a/bin/nemoCatalogCheck +++ b/bin/nemoCatalogCheck @@ -39,7 +39,7 @@ if __name__ == '__main__': configFileName=args.configFileName catFileName=args.catalogFileName config=startUp.NemoConfig(configFileName, setUpMaps = False, MPIEnabled = False, verbose = False) - outputLabel=os.path.split(parDictFileName)[-1].replace(".yml", "") + outputLabel=os.path.split(configFileName)[-1].replace(".yml", "") print(">>> Checking catalog %s against nemo output:" % (catFileName)) optimalCatalogFileName=config.rootOutDir+os.path.sep+"%s_optimalCatalog.fits" % (os.path.split(config.rootOutDir)[-1]) diff --git a/bin/nemoCosmo b/bin/nemoCosmo index 6fa2d67..b8df0f3 100644 --- a/bin/nemoCosmo +++ b/bin/nemoCosmo @@ -38,15 +38,15 @@ import warnings #warnings.filterwarnings("error") #------------------------------------------------------------------------------------------------------------- -def lnprob(H0, Om0, sigma8, Ob0, tenToA0, B0, Mpivot, sigma_int): +def lnprob(H0, Om0, sigma8, Ob0, ns, tenToA0, B0, Mpivot, sigma_int): """Log likelihood function for use with Cobaya. """ # This can fail if wander outside parameter space where mass function is defined try: - selFn.update(H0, Om0, Ob0, sigma8, scalingRelationDict = {'tenToA0': tenToA0, 'B0': B0, - 'Mpivot': Mpivot, 'sigma_int': sigma_int}) + selFn.update(H0, Om0, Ob0, sigma8, ns, scalingRelationDict = {'tenToA0': tenToA0, 'B0': B0, + 'Mpivot': Mpivot, 'sigma_int': sigma_int}) except: return -np.inf @@ -152,8 +152,8 @@ if __name__ == '__main__': ['sigma8', {'prior': {'min': 0.6, 'max': 0.9}, 'ref': {'dist': 'norm', 'loc': 0.8, 'scale': 0.1}, 'proposal': 0.02, - 'latex': '\sigma_8'}] - ]) + 'latex': '\sigma_8'}], + ['ns', 0.95]]) if args.dumpConfig == True: cobaya.yaml.yaml_dump_file("default.yml", info, error_if_exists = False) print("Dumped default Cobaya configuration to `default.yml`") @@ -183,17 +183,3 @@ if __name__ == '__main__': # Run Cobaya... updated_info, products=run(info) - # We'll generally be running under MPI... so just run GetDist later instead... - #gd_sample=MCSamplesFromCobaya(updated_info, products["sample"]) - - #g=gdplt.getSubplotPlotter() - #params=['H0', 'Om0', 'sigma8'] - #param_3d = None - #g.triangle_plot(gd_sample, params, plot_3d_with_param=param_3d, filled=True, shaded=False) - #g.export(fname=cosmoOutDir+os.path.sep+"cornerplot.pdf") - #g.export(fname=cosmoOutDir+os.path.sep+"cornerplot.png") - - #mean=gd_sample.getMeans()[:3] # H0, Om0, sigma8 - #print("Mean:") - #print(mean) - diff --git a/bin/nemoMass b/bin/nemoMass index e0a2a4d..b147365 100644 --- a/bin/nemoMass +++ b/bin/nemoMass @@ -277,7 +277,8 @@ if __name__ == '__main__': if key not in tab.keys(): forcedPhotometry=True config=startUp.NemoConfig(parDictFileName, MPIEnabled = args.MPIEnabled, divideTilesByProcesses = False, - setUpMaps = forcedPhotometry, verbose = False, strictMPIExceptions = strictMPIExceptions) + setUpMaps = forcedPhotometry, writeTileDir = False, verbose = False, + strictMPIExceptions = strictMPIExceptions) # Remaining set-up massOptions=config.parDict['massOptions'] @@ -314,11 +315,15 @@ if __name__ == '__main__': Ob0=massOptions['Ob0'] else: Ob0=0.05 - if 'sigma_8' in list(massOptions.keys()): - sigma_8=massOptions['sigma_8'] + if 'sigma8' in list(massOptions.keys()): + sigma8=massOptions['sigma8'] else: - sigma_8=0.8 - mockSurvey=MockSurvey.MockSurvey(minMass, areaDeg2, zMin, zMax, H0, Om0, Ob0, sigma_8) + sigma8=0.8 + if 'ns' in list(massOptions.keys()): + ns=massOptions['ns'] + else: + ns=0.95 + mockSurvey=MockSurvey.MockSurvey(minMass, areaDeg2, zMin, zMax, H0, Om0, Ob0, sigma8, ns) colsToAdd=['M500', 'M500Uncorr', 'M200m', 'M200mUncorr', 'M500NoRel', 'M500UncorrNoRel', 'M200mNoRel', 'M200mUncorrNoRel'] diff --git a/bin/nemoModel b/bin/nemoModel new file mode 100644 index 0000000..c533cdc --- /dev/null +++ b/bin/nemoModel @@ -0,0 +1,100 @@ +#!/usr/bin/env python + +""" + +Make a map containing model objects using the info in a nemo output catalog + +""" + +import os +import sys +import numpy as np +import astropy.table as atpy +from astLib import * +from nemo import startUp +from nemo import maps +import argparse +import astropy.io.fits as pyfits + +#------------------------------------------------------------------------------------------------------------ +if __name__ == '__main__': + + parser=argparse.ArgumentParser("nemoModel") + parser.add_argument("catalogFileName", help = """A catalog (FITS table format) as produced by nemo.""") + parser.add_argument("templateFileName", help = """A FITS image file. The output sky model image will + have the same pixelization.""") + parser.add_argument("beamFileName", help = """A file containing the beam profile, in the standard format + used by ACT.""") + parser.add_argument("outputFileName", help = """The name of the output file that will contain the sky + model image.""") + parser.add_argument("-f", "--frequency-GHz", dest = "obsFreqGHz", type = float, default = 150.0, + help = """If the nemo catalog contains SZ-selected clusters, the SZ signal will be + evaluted at the given frequency, ignoring relativistic effects + (default: 150.0).""") + parser.add_argument("-M", "--mpi", dest="MPIEnabled", action="store_true", help="""Enable MPI. If used, + the image will be broken into a number of tiles, with one tile per process. If you + want to use this, run with e.g., mpiexec -np 4 nemoMass configFile.yml -M""", + default = False) + parser.add_argument("-n", "--no-strict-errors", dest="noStrictMPIExceptions", action="store_true", + help="""Disable strict exception handling (applies under MPI only, i.e., must be + used with the -M switch). If you use this option, you will get the full traceback + when a Python Exception is triggered, but the code may not terminate. This is due + to the Exception handling in mpi4py.""", + default = False) + args = parser.parse_args() + + if args.noStrictMPIExceptions == True: + strictMPIExceptions=False + else: + strictMPIExceptions=True + + # Create a stub config (then we can use existing start-up stuff to dish out the tiles) + parDict={} + mapDict={} + mapDict['mapFileName']=args.templateFileName + mapDict['obsFreqGHz']=args.obsFreqGHz + mapDict['beamFileName']=args.beamFileName + parDict['unfilteredMaps']=[mapDict] + parDict['mapFilters']=[] + if args.MPIEnabled == True: + parDict['makeTileDir']=True + parDict['tileOverlapDeg']=1.0 + parDict['tileDefLabel']='auto' + parDict['tileDefinitions']={'targetTileWidthDeg': 10.0, 'targetTileHeightDeg': 5.0} + + config=startUp.NemoConfig(parDict, MPIEnabled = args.MPIEnabled, divideTilesByProcesses = True, + makeOutputDirs = False, setUpMaps = True, writeTileDir = False, + verbose = False, strictMPIExceptions = strictMPIExceptions) + + tab=atpy.Table().read(args.catalogFileName) + + # Build a dictionary containing the model images which we'll stich together at the end + modelsDict={} + for tileName in config.tileNames: + shape=(config.tileCoordsDict[tileName]['clippedSection'][3], + config.tileCoordsDict[tileName]['clippedSection'][1]) + wcs=astWCS.WCS(config.tileCoordsDict[tileName]['header'], mode = 'pyfits') + try: + modelsDict[tileName]=maps.makeModelImage(shape, wcs, tab, args.beamFileName, + obsFreqGHz = args.obsFreqGHz) + except: + raise Exception("makeModelImage failed on tile '%s'" % (tileName)) + + # Gathering + if config.MPIEnabled == True: + config.comm.barrier() + gathered_modelsDicts=config.comm.gather(modelsDict, root = 0) + if config.rank == 0: + print("... gathered sky model tiles ...") + for tileModelDict in gathered_modelsDicts: + for tileName in tileModelDict.keys(): + modelsDict[tileName]=tileModelDict[tileName] + + # Stitching + if config.rank == 0: + d=np.zeros([config.origWCS.header['NAXIS2'], config.origWCS.header['NAXIS1']]) + wcs=config.origWCS + for tileName in modelsDict.keys(): + minX, maxX, minY, maxY=config.tileCoordsDict[tileName]['clippedSection'] + d[minY:maxY, minX:maxX]=modelsDict[tileName] + astImages.saveFITS(args.outputFileName, d, wcs) diff --git a/bin/nemoSelFn b/bin/nemoSelFn index 415b8bf..9e47af1 100644 --- a/bin/nemoSelFn +++ b/bin/nemoSelFn @@ -59,8 +59,9 @@ if __name__ == '__main__': H0=70. Om0=0.30 Ob0=0.05 - sigma_8=0.8 - mockSurvey=MockSurvey.MockSurvey(minMass, areaDeg2, zMin, zMax, H0, Om0, Ob0, sigma_8, enableDrawSample = True) + sigma8=0.8 + ns=0.95 + mockSurvey=MockSurvey.MockSurvey(minMass, areaDeg2, zMin, zMax, H0, Om0, Ob0, sigma8, ns, enableDrawSample = True) selFnCollection=pipelines.makeSelFnCollection(config, mockSurvey) diff --git a/docs/usage.rst b/docs/usage.rst index dc6ba94..c6a3a3a 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -1,8 +1,8 @@ .. _UsagePage: -====================== -Installation and Usage -====================== +============================= +Introduction and Installation +============================= .. mdinclude:: ../README.md diff --git a/examples/AdvACT/S18d_202006_APP.yml b/examples/AdvACT/S18d_202006_APP.yml new file mode 100644 index 0000000..33a7f71 --- /dev/null +++ b/examples/AdvACT/S18d_202006_APP.yml @@ -0,0 +1,153 @@ +# Nemo config file +# +# This config has GNFW + scaling relation parameters for Yizhou, Mansfield, Trac + Battaglia 2020 +# +# YAML format +# - use null to return None in Python +# - YAML is fussy about exp numbers: use e.g. 1.0e+14 for M500MSun (not 1e14), 1.0e-07 for tol + +# Valid units are uK or Jy/sr +# this should be a list of maps at different frequencies +# NOTE: surveyMask is optional +unfilteredMaps: + - {mapFileName: "maps/Jun2020/act_s08_s18_cmb_f150_daynight_map.fits", + weightsFileName: "maps/Jun2020/act_s08_s18_cmb_f150_daynight_ivar.fits", + obsFreqGHz: 149.6, units: 'uK', + beamFileName: "maps/Jun2020/beams/s16_pa2_f150_nohwp_night_beam_profile_jitter.txt"} + - {mapFileName: "maps/Jun2020/act_s08_s18_cmb_f090_daynight_map.fits", + weightsFileName: "maps/Jun2020/act_s08_s18_cmb_f090_daynight_ivar.fits", + obsFreqGHz: 97.8, units: 'uK', + beamFileName: "maps/Jun2020/beams/s16_pa3_f090_nohwp_night_beam_profile_jitter.txt"} + +# Masks +surveyMask: "maps/Jun2020/AdvACTSurveyMask_v7_galLatCut_S18-dust-artifacts-extended-post10mJy.fits" + +# Instead of giving point source mask, can give catalog instead +maskPointSourcesFromCatalog: + - "PS_S18_f150_auto/PS_S18_f150_auto_optimalCatalog.fits" + - "customPSMask_S18/customPSCatalog_S18.fits" + +# Detection/catalog options +# Set useInterpolator; True for sub-pixel flux and SNR measurements +thresholdSigma: 4.0 +minObjPix: 1 +findCenterOfMass: True +useInterpolator: True +rejectBorder: 0 +objIdent: 'ACT-CL' +longNames: False + +# Photometry options +photFilter: 'Arnaud_M2e14_z0p4' + +# Optionally override the GNFW parameters - if not present, Arnaud et al. (2010) parameters are used +# The example below is for the Averaged Pressure Profile (name TBC; from Yizhou) +GNFWParams: {P0: 4.48, c500: 1.34, gamma: 0.45, alpha: 1.24, beta: 5.29, tol: 1.0e-7, npts: 100} + +# Mass measurement options - used by nemoMass and nemoSelFn scripts +# Writes out .fits file to nemoOutDir/nemoOutDir_M500.fits +# redshiftCatalog: A .fits table containing name, RADeg, decDeg, redshift, redshiftErr columns +# tenToA0, B0, Mpivot, sigma_int: Fixed scaling relation options (see H13 or ACTPol paper) +# rescaleFactor, rescaleFactorErr: For MCal masses, as in the ACTPol paper (i.e., just rescales M500 results by 1/rescaleFactor) +massOptions: {tenToA0: 4.34e-05, # From P0, APP profile - see UPPTesting_Jun2020.py + B0: 0.08, + Mpivot: 3.0e+14, + sigma_int: 0.2, + relativisticCorrection: True, + rescaleFactor: 0.69, + rescaleFactorErr: 0.07, + redshiftCatalog: "S18Clusters/AdvACT_confirmed.fits"} + +# Selection function options - this calculation can also be enabled with the nemo -S switch +# NOTE: could eventually add 'completenessFraction' to 'massLimitMaps', which is why that's a dictionary list +# Use selFnFootprints to calculate average completeness in given sky areas - e.g., overlap with optical surveys +calcSelFn: True +selFnOptions: {fixedSNRCut: 5.0, + massLimitMaps: [{z: 0.5}]} + +selFnFootprints: + - {label: "HSC", + maskList: ["HSCCheckAndSelFn/s19a_fdfc_CAR_contarea_ziy-gt-5.fits"]} + - {label: "KiDS", + maskList: ["KiDSSelFn/mask_KiDSN.fits", "KiDSSelFn/mask_KiDSS.fits"]} + - {label: "DES", + maskList: ["DESY3/AdvACT_y3a2_footprint_griz_1exp_v2.0.fits"]} + +# Filter definitions: +# allFilters is a dictionary of parameters that will be copied into all mapFilters +# (these can be overridden by keys with the same name in mapFilters) +allFilters: {class: "ArnaudModelMatchedFilter", + params: {noiseParams: {method: "dataMap", + noiseGridArcmin: 40.}, + saveFilteredMaps: False, + saveRMSMap: False, + savePlots: False, + saveDS9Regions: False, + outputUnits: 'yc', + edgeTrimArcmin: 0.0} + } + +# mapFilters is a list of all the different filters to apply +# (keys in mapFilters with the same name as those in allFilters take priority) +mapFilters: + - {label: "Arnaud_M1e14_z0p2", + params: {M500MSun: 1.0e+14, z: 0.2}} + - {label: "Arnaud_M2e14_z0p2", + params: {M500MSun: 2.0e+14, z: 0.2}} + - {label: "Arnaud_M4e14_z0p2", + params: {M500MSun: 4.0e+14, z: 0.2}} + - {label: "Arnaud_M8e14_z0p2", + params: {M500MSun: 8.0e+14, z: 0.2}} + - {label: "Arnaud_M1e14_z0p4", + params: {M500MSun: 1.0e+14, z: 0.4}} + - {label: "Arnaud_M2e14_z0p4", + params: {M500MSun: 2.0e+14, z: 0.4, + saveFilteredMaps: True, + savePlots: True}} + - {label: "Arnaud_M4e14_z0p4", + params: {M500MSun: 4.0e+14, z: 0.4}} + - {label: "Arnaud_M8e14_z0p4", + params: {M500MSun: 8.0e+14, z: 0.4}} + - {label: "Arnaud_M1e14_z0p8", + params: {M500MSun: 1.0e+14, z: 0.8}} + - {label: "Arnaud_M2e14_z0p8", + params: {M500MSun: 2.0e+14, z: 0.8}} + - {label: "Arnaud_M4e14_z0p8", + params: {M500MSun: 4.0e+14, z: 0.8}} + - {label: "Arnaud_M8e14_z0p8", + params: {M500MSun: 8.0e+14, z: 0.8}} + - {label: "Arnaud_M1e14_z1p2", + params: {M500MSun: 1.0e+14, z: 1.2}} + - {label: "Arnaud_M2e14_z1p2", + params: {M500MSun: 2.0e+14, z: 1.2}} + - {label: "Arnaud_M4e14_z1p2", + params: {M500MSun: 4.0e+14, z: 1.2}} + - {label: "Arnaud_M8e14_z1p2", + params: {M500MSun: 8.0e+14, z: 1.2}} + +# Source injection test - this can also be enabled with the nemo -I switch +#sourceInjectionTest: True +sourceInjectionIterations: 50 +sourcesPerTile: 50 +sourceInjectionModels: + - {redshift: 0.8, M500: 2.0e+14} + - {redshift: 0.4, M500: 2.0e+14} + - {redshift: 0.2, M500: 2.0e+14} + - {redshift: 0.1, M500: 2.0e+14} + +# tileDir options - cut-up each map into smaller sections +makeTileDir: True +makeQuickLookMaps: True +tileOverlapDeg: 1.0 +tileDefLabel: 'auto' +tileDefinitions: {mask: 'maps/Jun2020/AdvACTSurveyMask_v7_S18.fits', + targetTileWidthDeg: 10.0, + targetTileHeightDeg: 5.0} + +# If this is given, only the named tiles will be processed (useful for testing) +#tileNameList: + #- '1_10_7' # powerful f150 source; do as set - sensitive to point-source mask threshold + #- '1_10_8' # J2327 (next to a source); do as set - sensitive to point-source mask threshold + #- '2_0_7' # powerful f150 source + #- '2_2_8' # powerful f150 source + #- '3_0_1' # powerful f150 source diff --git a/examples/SOSims/MFMF_SOSim_3freq_small.yml b/examples/SOSims/MFMF_SOSim_3freq_small.yml index a44631c..5191013 100644 --- a/examples/SOSims/MFMF_SOSim_3freq_small.yml +++ b/examples/SOSims/MFMF_SOSim_3freq_small.yml @@ -53,16 +53,20 @@ photFilter: 'Arnaud_M2e14_z0p4' # If 'fit', use fit to (theta, Q) done by nemo for 'photFilter' kernel # tenToA0, B0, Mpivot, sigma_int: Fixed scaling relation options (see H13 or ACTPol paper) # rescaleFactor, rescaleFactorErr: For MCal masses, as in the ACTPol paper (i.e., just rescales M500 results by 1/rescaleFactor) -massOptions: {tenToA0: 4.95e-5, - B0: 0.08, - Mpivot: 3.0e+14, +# NOTE: These parameters reproduce the websky sim scaling relation (well, no idea on scatter) +massOptions: {tenToA0: 2.65e-05, + B0: 0.0, + Mpivot: 3.0e+14, sigma_int: 0.2, + H0: 68.0, + Om0: 0.31, + Ob0: 0.049, + sigma8: 0.81, + ns: 0.965, relativisticCorrection: True, - rescaleFactor: 0.68, - rescaleFactorErr: 0.11, - redshiftCatalog: "redshifts.fits", - forcedPhotometry: False, - Q: 'fit'} + rescaleFactor: 1.00, + rescaleFactorErr: 0.01, + redshiftCatalog: "redshifts.fits"} # Selection function options - only used by the nemoSelFn script # NOTE: could eventually add 'completenessFraction' to 'massLimitMaps', which is why that's a dictionary list diff --git a/examples/SOSims/MFMF_SOSim_3freq_tiles.yml b/examples/SOSims/MFMF_SOSim_3freq_tiles.yml index d13ffcc..ce99a45 100644 --- a/examples/SOSims/MFMF_SOSim_3freq_tiles.yml +++ b/examples/SOSims/MFMF_SOSim_3freq_tiles.yml @@ -53,16 +53,20 @@ photFilter: 'Arnaud_M2e14_z0p4' # If 'fit', use fit to (theta, Q) done by nemo for 'photFilter' kernel # tenToA0, B0, Mpivot, sigma_int: Fixed scaling relation options (see H13 or ACTPol paper) # rescaleFactor, rescaleFactorErr: For MCal masses, as in the ACTPol paper (i.e., just rescales M500 results by 1/rescaleFactor) -massOptions: {tenToA0: 4.95e-5, - B0: 0.08, +# NOTE: These parameters reproduce the websky sim scaling relation (well, no idea on scatter) +massOptions: {tenToA0: 2.65e-05, + B0: 0.0, Mpivot: 3.0e+14, sigma_int: 0.2, + H0: 68.0, + Om0: 0.31, + Ob0: 0.049, + sigma8: 0.81, + ns: 0.965, relativisticCorrection: True, - rescaleFactor: 0.63, # Approx. matches WebSky scaling (slope seems different though) + rescaleFactor: 1.00, rescaleFactorErr: 0.01, - redshiftCatalog: "redshifts.fits", - forcedPhotometry: False, - Q: 'fit'} + redshiftCatalog: "redshifts.fits"} # Selection function options # NOTE: could eventually add 'completenessFraction' to 'massLimitMaps', which is why that's a dictionary list diff --git a/examples/SOSims/NzCheck.py b/examples/SOSims/NzCheck.py index 9a2a299..98cc002 100644 --- a/examples/SOSims/NzCheck.py +++ b/examples/SOSims/NzCheck.py @@ -2,6 +2,8 @@ N(z) check - for comparing across codes using different mass function implementations etc.. +NOTE: We messed up when setting this task, so the mass limit is M500c > 5e13 MSun/h + """ import os @@ -45,21 +47,22 @@ footprintLabel = footprintLabel, zStep = zStep) scalingRelationDict=selFn.scalingRelationDict - H0, Om0, Ob0, sigma_8 = 70.0, 0.30, 0.05, 0.80 - selFn.update(H0, Om0, Ob0, sigma_8, scalingRelationDict = scalingRelationDict) + H0, Om0, Ob0, sigma8, ns = 70.0, 0.30, 0.05, 0.80, 0.95 + h=H0/100.0 + selFn.update(H0, Om0, Ob0, sigma8, ns, scalingRelationDict = scalingRelationDict) print("Total area = %.3f square degrees" % (selFn.totalAreaDeg2)) # N(z) with M500c > 5e13 MSun - no selection function applied - countsByRedshift=selFn.mockSurvey.clusterCount[:, np.greater(selFn.mockSurvey.log10M, np.log10(5e13))].sum(axis = 1) + countsByRedshift=selFn.mockSurvey.clusterCount[:, np.greater(selFn.mockSurvey.log10M, np.log10(5e13/h))].sum(axis = 1) with open("NzCheck_noSelFn.csv", "w") as outFile: for i in range(len(selFn.mockSurvey.z)): - outFile.write("%.1f <= z < %.1f\t%d\n" % (selFn.mockSurvey.zBinEdges[i], selFn.mockSurvey.zBinEdges[i+1], countsByRedshift[i])) + outFile.write("%.1f <= z < %.1f\t%.3f\n" % (selFn.mockSurvey.zBinEdges[i], selFn.mockSurvey.zBinEdges[i+1], countsByRedshift[i])) # N(z) with M500c > 5e13 MSun - with S/N > 5 selection function applied predMz=selFn.compMz*selFn.mockSurvey.clusterCount - countsByRedshift=predMz[:, np.greater(selFn.mockSurvey.log10M, np.log10(5e13))].sum(axis = 1) + countsByRedshift=predMz[:, np.greater(selFn.mockSurvey.log10M, np.log10(5e13/h))].sum(axis = 1) with open("NzCheck_withSelFn.csv", "w") as outFile: for i in range(len(selFn.mockSurvey.z)): - outFile.write("%.1f <= z < %.1f\t%d\n" % (selFn.mockSurvey.zBinEdges[i], selFn.mockSurvey.zBinEdges[i+1], countsByRedshift[i])) + outFile.write("%.1f <= z < %.1f\t%.3f\n" % (selFn.mockSurvey.zBinEdges[i], selFn.mockSurvey.zBinEdges[i+1], countsByRedshift[i])) diff --git a/examples/SOSims/README.md b/examples/SOSims/README.md index 06fae69..701954e 100644 --- a/examples/SOSims/README.md +++ b/examples/SOSims/README.md @@ -88,8 +88,9 @@ SO survey footprint, by breaking the map into tiles (see below). # Extracting the survey mass limit -If the `calcSelFn` parameter set to True in the Nemo configuration file, the -main `nemo` script will calculate and output estimates of the 90% mass +If the `calcSelFn` parameter set to `True` in the Nemo configuration file, +or if `nemo` is run using the `-S` switch, the main `nemo` script will +calculate and output estimates of the 90% mass completeness threshold at some user-set signal-to-noise level (e.g., S/N > 5). You will find various plots related to the survey completeness as a function of redshift under `MFMF_SOSim_3freq_small/diagnostics/`, including a @@ -171,7 +172,7 @@ again, replacing `$NUM_PROCESSES` with the number of cores you want to run on. T `slurm_mass.sh` scripts shows to run this using [Slurm](https://slurm.schedmd.com/overview.html) (this takes less than 3 minutes for a catalog of ~30,000 clusters with the settings given). -Note `rescaleFactor` in `massOptions` in the `MFMF_SOSim_3freq_tiles.yml` configuration file -has been set such that the "calibrated" masses produced by `nemoMass` (`M500Cal`, `M200mCal`) are on -approximately the same scale as the WebSky halo catalog (the slope of the mass scaling -relation in the simulations is slightly different though). +Note that the cosmological and scaling relation parameters set in the `massOptions` section of +both of the example configuration files given here (`MFMF_SOSim_3freq_tiles.yml` and +`MFMF_SOSim_3freq_small.yml`) have been set to approximately reproduce those +used in the WebSky simulations. diff --git a/examples/SOSims/selFnExample.py b/examples/SOSims/selFnExample.py index 94586b0..f2ef7f4 100644 --- a/examples/SOSims/selFnExample.py +++ b/examples/SOSims/selFnExample.py @@ -18,18 +18,18 @@ import IPython #------------------------------------------------------------------------------------------------------------ -def printNumClusters(H0, Om0, Ob0, sigma_8, scalingRelationDict = None): +def printNumClusters(H0, Om0, Ob0, sigma8, ns, scalingRelationDict = None): """Example of how to update selection function. """ # This function has to be called every time a parameter value is changed - selFn.update(H0, Om0, Ob0, sigma_8, scalingRelationDict = scalingRelationDict) + selFn.update(H0, Om0, Ob0, sigma8, ns, scalingRelationDict = scalingRelationDict) - print(">>> H0 = %.2f km/s/Mpc Om0 = %.2f Ob0 = %.2f sigma_8 = %.2f tenToA0 = %.3e sigma_int = %.2f" \ - % (selFn.mockSurvey.H0, selFn.mockSurvey.Om0, selFn.mockSurvey.Ob0, selFn.mockSurvey.sigma_8, - selFn.scalingRelationDict['tenToA0'], selFn.scalingRelationDict['sigma_int'])) - numClusters=selFn.mockSurvey.calcNumClustersExpected(selFn = selFn.compMz) + print(">>> H0 = %.2f km/s/Mpc Om0 = %.2f Ob0 = %.2f sigma8 = %.2f ns = %.3f tenToA0 = %.3e sigma_int = %.2f" \ + % (selFn.mockSurvey.H0, selFn.mockSurvey.Om0, selFn.mockSurvey.Ob0, selFn.mockSurvey.sigma8, + selFn.mockSurvey.ns, selFn.scalingRelationDict['tenToA0'], selFn.scalingRelationDict['sigma_int'])) + numClusters=selFn.mockSurvey.calcNumClustersExpected(compMz = selFn.compMz) print("... number of clusters expected = %.2f (in %.2f square degrees) ..." % (numClusters, selFn.totalAreaDeg2)) #------------------------------------------------------------------------------------------------------------ @@ -64,16 +64,16 @@ def printNumClusters(H0, Om0, Ob0, sigma_8, scalingRelationDict = None): # Default parameters t0=time.time() - H0, Om0, Ob0, sigma_8 = 70.0, 0.30, 0.05, 0.80 - printNumClusters(H0, Om0, Ob0, sigma_8, scalingRelationDict = scalingRelationDict) + H0, Om0, Ob0, sigma8, ns = 70.0, 0.30, 0.05, 0.80, 0.95 + printNumClusters(H0, Om0, Ob0, sigma8, ns, scalingRelationDict = scalingRelationDict) t1=time.time() print("... iteration took %.3f sec ..." % (t1-t0)) completeness.makeMzCompletenessPlot(selFn.compMz, selFn.mockSurvey.log10M, selFn.mockSurvey.z, "test0", "test0_Mz.pdf") # A complete iteration of changing cosmological parameters t0=time.time() - H0, Om0, Ob0, sigma_8 = 72.0, 0.27, 0.05, 0.74 - printNumClusters(H0, Om0, Ob0, sigma_8, scalingRelationDict = scalingRelationDict) + H0, Om0, Ob0, sigma8, ns = 72.0, 0.27, 0.05, 0.74, 0.95 + printNumClusters(H0, Om0, Ob0, sigma8, ns, scalingRelationDict = scalingRelationDict) t1=time.time() print("... iteration took %.3f sec ..." % (t1-t0)) completeness.makeMzCompletenessPlot(selFn.compMz, selFn.mockSurvey.log10M, selFn.mockSurvey.z, "test1", "test1_Mz.pdf") @@ -81,7 +81,7 @@ def printNumClusters(H0, Om0, Ob0, sigma_8, scalingRelationDict = None): # Changing the scaling relation normalisation t0=time.time() scalingRelationDict['tenToA0']=3e-5 - printNumClusters(H0, Om0, Ob0, sigma_8, scalingRelationDict = scalingRelationDict) + printNumClusters(H0, Om0, Ob0, sigma8, ns, scalingRelationDict = scalingRelationDict) t1=time.time() print("... iteration took %.3f sec ..." % (t1-t0)) completeness.makeMzCompletenessPlot(selFn.compMz, selFn.mockSurvey.log10M, selFn.mockSurvey.z, "test2", "test2_Mz.pdf") @@ -90,7 +90,7 @@ def printNumClusters(H0, Om0, Ob0, sigma_8, scalingRelationDict = None): t0=time.time() scalingRelationDict['tenToA0']=4.95e-5 scalingRelationDict['sigma_int']=0.001 - printNumClusters(H0, Om0, Ob0, sigma_8, scalingRelationDict = scalingRelationDict) + printNumClusters(H0, Om0, Ob0, sigma8, ns, scalingRelationDict = scalingRelationDict) t1=time.time() print("... iteration took %.3f sec ..." % (t1-t0)) completeness.makeMzCompletenessPlot(selFn.compMz, selFn.mockSurvey.log10M, selFn.mockSurvey.z, "test3", "test3_Mz.pdf") diff --git a/examples/SOSims/slurm_mass.sh b/examples/SOSims/slurm_mass.sh index 3486f08..f635f05 100644 --- a/examples/SOSims/slurm_mass.sh +++ b/examples/SOSims/slurm_mass.sh @@ -1,8 +1,9 @@ #!/bin/sh -#SBATCH --nodes=5 +#SBATCH --nodes=10 #SBATCH --ntasks-per-node=20 #SBATCH --mem=64000 #SBATCH --time=03:30:00 -source ~/.bashrc +#source ~/.bashrc +source /home/mjh/SETUP_CONDA.sh time mpiexec nemoMass MFMF_SOSim_3freq_tiles.yml -M diff --git a/examples/SOSims/slurm_nemo.sh b/examples/SOSims/slurm_nemo.sh index 4f10432..436e8fa 100644 --- a/examples/SOSims/slurm_nemo.sh +++ b/examples/SOSims/slurm_nemo.sh @@ -4,5 +4,6 @@ #SBATCH --mem=64000 #SBATCH --time=10:00:00 -source ~/.bashrc +#source ~/.bashrc +source /home/mjh/SETUP_CONDA.sh time mpiexec nemo MFMF_SOSim_3freq_tiles.yml -M -n diff --git a/examples/SOSims/validationScripts/README.md b/examples/SOSims/validationScripts/README.md new file mode 100644 index 0000000..28aca7a --- /dev/null +++ b/examples/SOSims/validationScripts/README.md @@ -0,0 +1,12 @@ +# Validation scripts + +This directory contains some scripts that can be used to sanity check +the WebSky sims data and Nemo. These can be run on the output produced +by running Nemo using the config files in the parent directory. + +The `massRecovery*.py` scripts plot the masses recovered by Nemo, in +comparison with the true halo masses from the WebSky catalog. + +The `makeMassFunctionPlotsCCL*.py` scripts can be used to check that +the cluster counts routines in Nemo (implemented using CCL) match +what is expected from the WebSky simulations. diff --git a/examples/SOSims/validationScripts/checkMassRecovery.py b/examples/SOSims/validationScripts/checkMassRecovery.py new file mode 100644 index 0000000..e254ed3 --- /dev/null +++ b/examples/SOSims/validationScripts/checkMassRecovery.py @@ -0,0 +1,145 @@ +""" + +Fit the scaling relation in the sims + +""" + +import os +import sys +import numpy as np +import astropy.table as atpy +from nemo import catalogs, signals, plotSettings, MockSurvey +from astropy.cosmology import FlatLambdaCDM +from scipy import stats +import pyccl as ccl +import pylab as plt +import IPython + +#------------------------------------------------------------------------------------------------------------ +def calcMass(tab, massOptions, tckQFitDict, fRelWeightsDict, mockSurvey): + """Calculates masses for cluster data in table. Because why not re-do on the fly when hippo busy? + + """ + + count=0 + for row in tab: + count=count+1 + #print("... %d/%d; %s (%.3f +/- %.3f) ..." % (count, len(tab), row['name'], + #row['redshift'], row['redshiftErr'])) + + tileName=row['tileName'] + + # Cuts on z, fixed_y_c for forced photometry mode (invalid objects will be listed but without a mass) + if row['fixed_y_c'] > 0 and np.isnan(row['redshift']) == False: + # Corrected for mass function steepness + massDict=signals.calcM500Fromy0(row['fixed_y_c']*1e-4, row['fixed_err_y_c']*1e-4, + row['redshift'], row['redshiftErr'], + tenToA0 = massOptions['tenToA0'], + B0 = massOptions['B0'], + Mpivot = massOptions['Mpivot'], + sigma_int = massOptions['sigma_int'], + tckQFit = tckQFitDict[tileName], mockSurvey = mockSurvey, + applyMFDebiasCorrection = True, + applyRelativisticCorrection = True, + fRelWeightsDict = fRelWeightsDict[tileName]) + row['M500']=massDict['M500'] + row['M500_errPlus']=massDict['M500_errPlus'] + row['M500_errMinus']=massDict['M500_errMinus'] + + return tab + +#------------------------------------------------------------------------------------------------------------ +# Main + +# Options - masses are as output by Nemo routines; we compare these to halo catalog (converting as necessary) +massCol="M500" +massCol="M200m" + +# Websky cosmo - for on-the-fly redone masses +minMass=1e13 +areaDeg2=700. # doesn't matter +zMin=0.0 +zMax=2.0 +H0, Om0, Ob0, sigma8, ns = 68.0, 0.049+0.261, 0.049, 0.81, 0.965 +TCMB=2.72548 +cosmoModel=FlatLambdaCDM(H0 = H0, Om0 = Om0, Ob0 = Ob0, Tcmb0 = TCMB) +mockSurvey=MockSurvey.MockSurvey(minMass, areaDeg2, zMin, zMax, H0, Om0, Ob0, sigma8, ns) +massOptions={'tenToA0': 2.65e-05, + 'B0': 0.0, + 'Mpivot': 3.0e+14, + 'sigma_int': 0.2} +tckQFitDict=signals.loadQ("../MFMF_SOSim_3freq_tiles/selFn/QFit.fits") +fRelWeightsDict=signals.loadFRelWeights("../MFMF_SOSim_3freq_tiles/selFn/fRelWeights.fits") + +# Make combined table +mergedTabFileName="trueMasses_MFMF_SOSim_3freq_tiles_M500.fits" +if os.path.exists(mergedTabFileName) == False: + halos=atpy.Table().read("../halos.fits") + tab=atpy.Table().read("../MFMF_SOSim_3freq_tiles/MFMF_SOSim_3freq_tiles_M500.fits") + tab=tab[tab['fixed_SNR'] > 6] + + tab, halos, rDeg=catalogs.crossMatch(tab, halos, radiusArcmin = 1.0) + + zs=halos['z'] + yc=tab['fixed_y_c'] + M200m=halos['M200m'] + + M200mDef=ccl.halos.MassDef200m(c_m='Bhattacharya13') + M500cDef=ccl.halos.MassDef(500, "critical") + M500c=[] + count=0 + for m, z in zip(M200m, zs): + M500c.append(M200mDef.translate_mass(mockSurvey.cosmoModel, m, 1/(1+z), M500cDef)) + M500c=np.array(M500c) + + tab['true_M500']=M500c/1e14 + tab['true_M200']=M200m/1e14 + tab['redshift']=zs + tab.write(mergedTabFileName, overwrite = True) + +# Re-do masses on the fly +tab=atpy.Table().read(mergedTabFileName) + +# Cut on mass and z to do the fit +MMin=3.0 +zBinEdges=[0.2, 0.4, 0.6, 0.8, 1.0] +for i in range(len(zBinEdges)-1): + zMin=zBinEdges[i] + zMax=zBinEdges[i+1] + + fitTab=tab[tab['M500'] > MMin] + fitTab=fitTab[fitTab['redshift'] > zMin] + fitTab=fitTab[fitTab['redshift'] < zMax] + + # NOTE: This is done in place anyway + fitTab=calcMass(fitTab, massOptions, tckQFitDict, fRelWeightsDict, mockSurvey) + + y=fitTab['M500'] + x=fitTab['true_M500'] + result=stats.linregress(x, y) + sumSqRes=np.sum((x-y)**2) + calibFactor=np.mean(fitTab['true_M500'])/np.mean(fitTab['M500']) + + # Scaling relation plot + plotSettings.update_rcParams() + plt.figure(figsize=(9.5,9)) + ax=plt.axes([0.1, 0.1, 0.89, 0.89]) + ax.set_aspect('equal') + plotRange=np.linspace(1.0, 50.0, 100) + plt.plot(x, y, '.') + plt.plot(plotRange, plotRange, 'k-') + plt.xlabel("$M^{\\rm true}_{\\rm 500c}$ (10$^{14}$ $M_{\odot}$)") + plt.ylabel("$M_{\\rm 500c}$ (10$^{14}$ $M_{\odot}$)") + plt.xlim(2, 50) + plt.ylim(2, 50) + plt.loglog() + plt.title("%.1f < z < %.1f" % (zMin, zMax)) + plt.savefig("massRecovery_%.1f_%.1f.png" % (zMin, zMax)) + plt.close() + + print("%.1f < z < %.1f:" % (zMin, zMax)) + print(" calibFactor = ", calibFactor) + print(" sumSqRes = ", sumSqRes) + + #IPython.embed() + #sys.exit() diff --git a/examples/SOSims/validationScripts/checkMassRecovery_M200m.py b/examples/SOSims/validationScripts/checkMassRecovery_M200m.py new file mode 100644 index 0000000..103a7dd --- /dev/null +++ b/examples/SOSims/validationScripts/checkMassRecovery_M200m.py @@ -0,0 +1,136 @@ +""" + +Fit the scaling relation in the sims + +""" + +import os +import sys +import numpy as np +import astropy.table as atpy +from nemo import catalogs, signals, plotSettings, MockSurvey +from astropy.cosmology import FlatLambdaCDM +from scipy import stats +import pylab as plt +import IPython + +#------------------------------------------------------------------------------------------------------------ +def calcMass(tab, massOptions, tckQFitDict, fRelWeightsDict, mockSurvey): + """Calculates masses for cluster data in table. Because why not re-do on the fly when hippo busy? + + """ + + count=0 + for row in tab: + count=count+1 + #print("... %d/%d; %s (%.3f +/- %.3f) ..." % (count, len(tab), row['name'], + #row['redshift'], row['redshiftErr'])) + + tileName=row['tileName'] + + # Cuts on z, fixed_y_c for forced photometry mode (invalid objects will be listed but without a mass) + if row['fixed_y_c'] > 0 and np.isnan(row['redshift']) == False: + # Corrected for mass function steepness + massDict=signals.calcM500Fromy0(row['fixed_y_c']*1e-4, row['fixed_err_y_c']*1e-4, + row['redshift'], row['redshiftErr'], + tenToA0 = massOptions['tenToA0'], + B0 = massOptions['B0'], + Mpivot = massOptions['Mpivot'], + sigma_int = massOptions['sigma_int'], + tckQFit = tckQFitDict[tileName], mockSurvey = mockSurvey, + applyMFDebiasCorrection = True, + applyRelativisticCorrection = True, + fRelWeightsDict = fRelWeightsDict[tileName]) + row['M500']=massDict['M500'] + row['M500_errPlus']=massDict['M500_errPlus'] + row['M500_errMinus']=massDict['M500_errMinus'] + + return tab + +#------------------------------------------------------------------------------------------------------------ +# Main + +# Websky cosmo - for on-the-fly redone masses +minMass=1e13 +areaDeg2=700. # doesn't matter +zMin=0.0 +zMax=2.0 +H0, Om0, Ob0, sigma8, ns = 68.0, 0.049+0.261, 0.049, 0.81, 0.965 +TCMB=2.72548 +cosmoModel=FlatLambdaCDM(H0 = H0, Om0 = Om0, Ob0 = Ob0, Tcmb0 = TCMB) +mockSurvey=MockSurvey.MockSurvey(minMass, areaDeg2, zMin, zMax, H0, Om0, Ob0, sigma8, ns) +massOptions={'tenToA0': 2.65e-05, + 'B0': 0.0, + 'Mpivot': 3.0e+14, + 'sigma_int': 0.2} +tckQFitDict=signals.loadQ("../MFMF_SOSim_3freq_tiles/selFn/QFit.fits") +fRelWeightsDict=signals.loadFRelWeights("../MFMF_SOSim_3freq_tiles/selFn/fRelWeights.fits") + +# Make combined table +mergedTabFileName="trueMasses_MFMF_SOSim_3freq_tiles_M500.fits" +if os.path.exists(mergedTabFileName) == False: + halos=atpy.Table().read("../halos.fits") + tab=atpy.Table().read("../MFMF_SOSim_3freq_tiles/MFMF_SOSim_3freq_tiles_M500.fits") + tab=tab[tab['fixed_SNR'] > 6] + + tab, halos, rDeg=catalogs.crossMatch(tab, halos, radiusArcmin = 1.0) + + zs=halos['z'] + yc=tab['fixed_y_c'] + M200m=halos['M200m'] + M500c=[] + count=0 + for m, z in zip(M200m, zs): + count=count+1 + print(count, len(M200m)) + M500c.append(signals.convertM200mToM500c(m, z)) + M500c=np.array(M500c) + M500c=M500c[:, 0] + tab['true_M500']=M500c/1e14 + tab['true_M200']=M200m/1e14 + tab['redshift']=zs + tab.write(mergedTabFileName, overwrite = True) + +# Re-do masses on the fly +tab=atpy.Table().read(mergedTabFileName) + +# Cut on mass and z to do the fit +MMin=3.0 +zBinEdges=[0.2, 0.4, 0.6, 0.8, 1.0] +for i in range(len(zBinEdges)-1): + zMin=zBinEdges[i] + zMax=zBinEdges[i+1] + + fitTab=tab[tab['M200m'] > MMin] + fitTab=fitTab[fitTab['redshift'] > zMin] + fitTab=fitTab[fitTab['redshift'] < zMax] + + # NOTE: This is done in place anyway + fitTab=calcMass(fitTab, massOptions, tckQFitDict, fRelWeightsDict, mockSurvey) + + y=fitTab['M200m'] + x=fitTab['true_M200'] + result=stats.linregress(x, y) + sumSqRes=np.sum((x-y)**2) + calibFactor=np.mean(fitTab['true_M500'])/np.mean(fitTab['M500']) + + # Scaling relation plot + plotSettings.update_rcParams() + plt.figure(figsize=(9.5,9)) + ax=plt.axes([0.1, 0.1, 0.89, 0.89]) + ax.set_aspect('equal') + plotRange=np.linspace(1.0, 50.0, 100) + plt.plot(x, y, '.') + plt.plot(plotRange, plotRange, 'k-') + plt.xlabel("$M^{\\rm true}_{\\rm 200m}$ (10$^{14}$ $M_{\odot}$)") + plt.ylabel("$M_{\\rm 200m}$ (10$^{14}$ $M_{\odot}$)") + plt.xlim(2, 50) + plt.ylim(2, 50) + plt.loglog() + plt.title("%.1f < z < %.1f" % (zMin, zMax)) + plt.savefig("massRecoveryM200m_%.1f_%.1f.png" % (zMin, zMax)) + plt.close() + + print("%.1f < z < %.1f:" % (zMin, zMax)) + print(" calibFactor = ", calibFactor) + print(" sumSqRes = ", sumSqRes) diff --git a/examples/SOSims/validationScripts/makeMassFunctionPlotsCCL.py b/examples/SOSims/validationScripts/makeMassFunctionPlotsCCL.py new file mode 100644 index 0000000..600964f --- /dev/null +++ b/examples/SOSims/validationScripts/makeMassFunctionPlotsCCL.py @@ -0,0 +1,169 @@ +""" + +Plot the mass function in z bins. + +Range adjusted to drop the last bin, which is more incomplete in the sense that it may not cover that +full mass bin (whereas all other bins are guaranteed to by definition). + +""" + +import os +import sys +import astropy.table as atpy +import astropy.io.fits as pyfits +import IPython +import numpy as np +from nemo import plotSettings, completeness, signals +import pylab as plt +from scipy import stats +from astLib import * +import pyccl as ccl +from colossus.lss import mass_function + +#------------------------------------------------------------------------------------------------------------ +# Options +SNRCut=4.0 +selFnDir="../MFMF_SOSim_3freq_tiles/selFn" +footprintLabel=None +massCol='M200m' +zBinEdges=[0.2, 0.5, 0.9, 1.2] +zMin=min(zBinEdges) +zMax=max(zBinEdges) +log10MBinEdges=np.linspace(13.8, 15.5, 18) + +# Handling different mass definitions +if massCol == 'M500c': + delta=500 + rhoType="critical" +elif massCol == 'M200m': + delta=200 + rhoType="matter" +else: + raise Exception("Unsupported massCol - should be M500c or M200m") +deltaLabel="%d%s" % (delta, rhoType[0]) +log10MBinCentres=(log10MBinEdges[1:]+log10MBinEdges[:-1])/2 + +# Set up Websky cosmology +H0, Om0, Ob0, sigma_8, ns = 68.0, 0.31, 0.049, 0.81, 0.965 +selFn=completeness.SelFn(selFnDir, SNRCut, footprintLabel = footprintLabel, zStep = 0.02, + delta = delta, rhoType = rhoType) +scalingRelationDict=selFn.scalingRelationDict +selFn.update(H0, Om0, Ob0, sigma_8, ns, scalingRelationDict = scalingRelationDict) +print("Total area = %.3f square degrees" % (selFn.totalAreaDeg2)) + +# Cut to just the halos in the survey mask +cutTabFileName="halosInMask.fits" +if os.path.exists(cutTabFileName) == False: + print("Cutting halos catalog to the survey mask") + tab=atpy.Table().read('../halos.fits') + checkMask=selFn.checkCoordsInAreaMask(tab['RADeg'], tab['decDeg']) + tab=tab[checkMask] + tab.write(cutTabFileName, overwrite = True) +print("Reading %s" % (cutTabFileName)) +tab=atpy.Table().read(cutTabFileName) + +# On-the-fly mass conversion as quick with CCL +if massCol == "M500c": + print("Converting M200m to M500c") + M500c=[] + count=0 + M200mDef=ccl.halos.MassDef200m(c_m='Bhattacharya13') + M500cDef=ccl.halos.MassDef(500, "critical") + M500c=[] + count=0 + for row in tab: + M500c.append(M200mDef.translate_mass(selFn.mockSurvey.cosmoModel, row['M200m'], 1/(1+row['z']), M500cDef)) + tab['M500c']=M500c + +# Bit of preprocessing to make life easier +tab['fixed_SNR']=100.0 +tab.rename_column('z', 'redshift') +tab[massCol]=tab[massCol]/1e14 + +## Example (not used here) - N(z) with M500c > 5e13 MSun - with selection function applied +#predMz=selFn.compMz*selFn.mockSurvey.clusterCount +#countsByRedshift=predMz[:, np.greater(selFn.mockSurvey.log10M, np.log10(5e13))].sum(axis = 1) + +# All the analysis first ------------------------------------------------------------------------------------ +# WARNING: We're using halo catalogs, so disabled completeness correction +results={} +predMz=selFn.mockSurvey.clusterCount +for i in range(len(zBinEdges)-1): + zMin=zBinEdges[i] + zMax=zBinEdges[i+1] + label='%.1f < z < %.1f' % (zMin, zMax) + shellVolumeMpc3=selFn.mockSurvey._comovingVolume(zMax)-selFn.mockSurvey._comovingVolume(zMin) + zMask=np.logical_and(selFn.mockSurvey.z >= zMin, selFn.mockSurvey.z < zMax) + countsByMass=predMz[zMask, :].sum(axis = 0) + + predCounts=np.zeros(len(log10MBinEdges)-1) + predNumDensity=np.zeros(len(log10MBinEdges)-1) + obsCounts=np.zeros(len(log10MBinEdges)-1) + obsCountsErr=np.zeros(len(log10MBinEdges)-1) + obsNumDensity=np.zeros(len(log10MBinEdges)-1) + obsNumDensityErr=np.zeros(len(log10MBinEdges)-1) + + h=H0/100. + binTab=tab[np.logical_and(tab['redshift'] >= zMin, tab['redshift'] < zMax)] + obsLog10Ms=np.log10(binTab[massCol]*1e14) + for j in range(len(log10MBinEdges)-1): + mMin=log10MBinEdges[j] + mMax=log10MBinEdges[j+1] + mMask=np.logical_and(selFn.mockSurvey.log10M >= mMin, selFn.mockSurvey.log10M < mMax) + predCounts[j]=countsByMass[mMask].sum() + obsMask=np.logical_and(obsLog10Ms >= mMin, obsLog10Ms < mMax) + obsCounts[j]=obsMask.sum() + obsCountsErr[j]=np.sqrt(obsCounts[j]) + predNumDensity[j]=predCounts[j]/shellVolumeMpc3 + obsNumDensity[j]=obsCounts[j]/shellVolumeMpc3 + #complCorr[j]=selFn.compMz[zMask, :].mean(axis = 0)[mMask].mean() + validMask=(obsCounts > 0) + results[label]={'log10MBinCentres': log10MBinCentres[validMask], + 'predCounts': predCounts[validMask], + 'obsCounts': obsCounts[validMask], + 'obsCountsErr': obsCountsErr[validMask], + 'predNumDensity': predNumDensity[validMask], + 'obsNumDensity': obsNumDensity[validMask], + 'obsNumDensityErr': (obsCountsErr[validMask]/obsCounts[validMask])*obsNumDensity[validMask]} + +# Counts comparison plot (just N as a function of mass) ----------------------------------------------------- +plotSettings.update_rcParams() +plt.figure(figsize=(9,6.5)) +ax=plt.axes([0.15, 0.12, 0.84, 0.85]) +for key in results.keys(): + plotLog10MBinCentres=results[key]['log10MBinCentres'] + pred=results[key]['predCounts'] + obs=results[key]['obsCounts'] + obsErr=results[key]['obsCountsErr'] + plt.errorbar(plotLog10MBinCentres, obs, yerr = obsErr, + elinewidth = 3, fmt = 'D', ms = 6, zorder = 900, label = key) + plt.plot(plotLog10MBinCentres, pred, 'k-') +plt.semilogy() +plt.ylim(0.1, 5e5) +plt.xlim(14.0, log10MBinEdges.max()) +plt.xlabel("log$_{10}$($M^{\\rm true}_{\\rm %s}$ / $M_{\odot}$)" % (deltaLabel)) +plt.ylabel("$N$") +plt.legend() +plt.savefig("%s_counts.png" % (massCol)) +plt.close() + +# Counts per unit volume (N per Mpc^3) ---------------------------------------------------------------------- +plotSettings.update_rcParams() +plt.figure(figsize=(9,6.5)) +ax=plt.axes([0.15, 0.12, 0.84, 0.85]) +for key in results.keys(): + plotLog10MBinCentres=results[key]['log10MBinCentres'] + pred=results[key]['predNumDensity'] + obs=results[key]['obsNumDensity'] + obsErr=results[key]['obsNumDensityErr'] + plt.errorbar(plotLog10MBinCentres, obs, yerr = obsErr, + elinewidth = 3, fmt = 'D', ms = 6, zorder = 900, label = key) + plt.plot(plotLog10MBinCentres, pred, 'k-') +plt.semilogy() +#plt.ylim(0.1, 5e5) +plt.xlim(14.0, log10MBinEdges.max()) +plt.xlabel("log$_{10}$($M^{\\rm true}_{\\rm %s}$ / $M_{\odot}$)" % (deltaLabel)) +plt.ylabel("$N$ (Mpc$^{3}$)") +plt.legend() +plt.savefig("%s_numDensity.png" % (massCol)) +plt.close() diff --git a/examples/SOSims/validationScripts/makeMassFunctionPlotsCCL_recovered.py b/examples/SOSims/validationScripts/makeMassFunctionPlotsCCL_recovered.py new file mode 100644 index 0000000..5cae054 --- /dev/null +++ b/examples/SOSims/validationScripts/makeMassFunctionPlotsCCL_recovered.py @@ -0,0 +1,159 @@ +""" + +Plot the mass function in z bins. + +Range adjusted to drop the last bin, which is more incomplete in the sense that it may not cover that +full mass bin (whereas all other bins are guaranteed to by definition). + +""" + +import os +import sys +import astropy.table as atpy +import astropy.io.fits as pyfits +import IPython +import numpy as np +from nemo import plotSettings, completeness, signals +import pylab as plt +from scipy import stats +from astLib import * +import pyccl as ccl +from colossus.lss import mass_function + +#------------------------------------------------------------------------------------------------------------ +# Options +SNRCut=4.0 +selFnDir="../MFMF_SOSim_3freq_tiles/selFn" +footprintLabel=None +massCol='M200m' +zBinEdges=[0.2, 0.5, 0.9, 1.2] +zMin=min(zBinEdges) +zMax=max(zBinEdges) +log10MBinEdges=np.linspace(13.8, 15.5, 18) +symbs=['D', 's', 'o'] +# Handling different mass definitions +if massCol == 'M500c': + delta=500 + rhoType="critical" +elif massCol == 'M200m': + delta=200 + rhoType="matter" +else: + raise Exception("Unsupported massCol - should be M500c or M200m") +deltaLabel="%d%s" % (delta, rhoType[0]) +log10MBinCentres=(log10MBinEdges[1:]+log10MBinEdges[:-1])/2 + +# Set up Websky cosmology +H0, Om0, Ob0, sigma_8, ns = 68.0, 0.31, 0.049, 0.81, 0.965 +selFn=completeness.SelFn(selFnDir, SNRCut, footprintLabel = footprintLabel, zStep = 0.02, + delta = delta, rhoType = rhoType) +scalingRelationDict=selFn.scalingRelationDict +selFn.update(H0, Om0, Ob0, sigma_8, ns, scalingRelationDict = scalingRelationDict) +print("Total area = %.3f square degrees" % (selFn.totalAreaDeg2)) + +# Load Nemo catalog +tab=atpy.Table().read('../MFMF_SOSim_3freq_tiles/MFMF_SOSim_3freq_tiles_M500.fits') +tab.rename_column("M500", "M500c") + +# All the analysis first ------------------------------------------------------------------------------------ +# WARNING: We're using halo catalogs, so disabled completeness correction +results={} +predMz=selFn.mockSurvey.clusterCount +for i in range(len(zBinEdges)-1): + zMin=zBinEdges[i] + zMax=zBinEdges[i+1] + label='%.1f < z < %.1f' % (zMin, zMax) + shellVolumeMpc3=selFn.mockSurvey._comovingVolume(zMax)-selFn.mockSurvey._comovingVolume(zMin) + zMask=np.logical_and(selFn.mockSurvey.z >= zMin, selFn.mockSurvey.z < zMax) + countsByMass=predMz[zMask, :].sum(axis = 0) + + predCounts=np.zeros(len(log10MBinEdges)-1) + predNumDensity=np.zeros(len(log10MBinEdges)-1) + obsCounts=np.zeros(len(log10MBinEdges)-1) + obsCountsErr=np.zeros(len(log10MBinEdges)-1) + obsNumDensity=np.zeros(len(log10MBinEdges)-1) + obsNumDensityErr=np.zeros(len(log10MBinEdges)-1) + complCorr=np.zeros(len(log10MBinEdges)-1) # Holds average completeness in each mass bin + + h=H0/100. + binTab=tab[np.logical_and(tab['redshift'] >= zMin, tab['redshift'] < zMax)] + obsLog10Ms=np.log10(binTab[massCol]*1e14) + for j in range(len(log10MBinEdges)-1): + mMin=log10MBinEdges[j] + mMax=log10MBinEdges[j+1] + mMask=np.logical_and(selFn.mockSurvey.log10M >= mMin, selFn.mockSurvey.log10M < mMax) + predCounts[j]=countsByMass[mMask].sum() + obsMask=np.logical_and(obsLog10Ms >= mMin, obsLog10Ms < mMax) + obsCounts[j]=obsMask.sum() + obsCountsErr[j]=np.sqrt(obsCounts[j]) + predNumDensity[j]=predCounts[j]/shellVolumeMpc3 + obsNumDensity[j]=obsCounts[j]/shellVolumeMpc3 + complCorr[j]=selFn.compMz[zMask, :].mean(axis = 0)[mMask].mean() + validMask=(obsCounts > 0) + fracErr=obsCountsErr[validMask]/obsCounts[validMask] + results[label]={'log10MBinCentres': log10MBinCentres[validMask], + 'predCounts': predCounts[validMask], + 'obsCounts': obsCounts[validMask], + 'obsCountsErr': obsCountsErr[validMask], + 'predNumDensity': predNumDensity[validMask], + 'obsNumDensity': obsNumDensity[validMask], + 'obsNumDensityErr': fracErr*obsNumDensity[validMask], + # Completeness corrected + 'corr_obsCounts': obsCounts[validMask]/complCorr[validMask], + 'corr_obsCountsErr': fracErr*(obsCounts[validMask]/complCorr[validMask]), + 'corr_obsNumDensity': obsNumDensity[validMask]/complCorr[validMask], + 'corr_obsNumDensityErr': fracErr*(obsNumDensity[validMask]/complCorr[validMask]), + } + +# Counts comparison plot (just N as a function of mass) ----------------------------------------------------- +plotSettings.update_rcParams() +plt.figure(figsize=(9,6.5)) +ax=plt.axes([0.15, 0.12, 0.84, 0.85]) +for key, symb in zip(results.keys(), symbs): + plotLog10MBinCentres=results[key]['log10MBinCentres'] + pred=results[key]['predCounts'] + obs=results[key]['obsCounts'] + obsErr=results[key]['obsCountsErr'] + corr_obs=results[key]['corr_obsCounts'] + corr_obsErr=results[key]['corr_obsCountsErr'] + plt.errorbar(plotLog10MBinCentres, obs, yerr = obsErr, color = 'none', markeredgecolor = 'k', + elinewidth = 3, fmt = symb, ms = 6, zorder = 900) + plt.errorbar(plotLog10MBinCentres, corr_obs, yerr = corr_obsErr, + elinewidth = 3, fmt = symb, ms = 6, zorder = 900, label = key) + plt.plot(plotLog10MBinCentres, pred, 'k-') +plt.semilogy() +plt.ylim(0.1, 5e5) +plt.xlim(14.0, log10MBinEdges.max()) +plt.xlabel("log$_{10}$($M_{\\rm %s}$ / $M_{\odot}$)" % (deltaLabel)) +plt.ylabel("$N$") +plt.legend() +plt.savefig("Recovered_%s_counts.png" % (massCol)) +plt.close() + +# Counts per unit volume (N per Mpc^3) ---------------------------------------------------------------------- +plotSettings.update_rcParams() +plt.figure(figsize=(9,6.5)) +ax=plt.axes([0.15, 0.12, 0.84, 0.85]) +for key, symb in zip(results.keys(), symbs): + plotLog10MBinCentres=results[key]['log10MBinCentres'] + pred=results[key]['predNumDensity'] + obs=results[key]['obsNumDensity'] + obsErr=results[key]['obsNumDensityErr'] + corr_obs=results[key]['corr_obsNumDensity'] + corr_obsErr=results[key]['corr_obsNumDensityErr'] + plt.errorbar(plotLog10MBinCentres, obs, yerr = obsErr, color = 'none', markeredgecolor = 'k', + elinewidth = 3, fmt = symb, ms = 6, zorder = 900) + plt.errorbar(plotLog10MBinCentres, corr_obs, yerr = corr_obsErr, + elinewidth = 3, fmt = symb, ms = 6, zorder = 900, label = key) + plt.plot(plotLog10MBinCentres, pred, 'k-') +plt.semilogy() +#plt.ylim(0.1, 5e5) +plt.xlim(14.0, log10MBinEdges.max()) +plt.xlabel("log$_{10}$($M_{\\rm %s}$ / $M_{\odot}$)" % (deltaLabel)) +plt.ylabel("$N$ (Mpc$^{3}$)") +plt.legend() +plt.savefig("Recovered_%s_numDensity.png" % (massCol)) +plt.close() + +IPython.embed() +sys.exit() diff --git a/nemo/MockSurvey.py b/nemo/MockSurvey.py index 801f4eb..aa8441c 100644 --- a/nemo/MockSurvey.py +++ b/nemo/MockSurvey.py @@ -1,7 +1,7 @@ """ -This module defines the MockSurvey class, used for obtaining de-biased cluster mass estimates and -selection function calculations. +This module defines the MockSurvey class, used for mass function calculations, obtaining de-biased cluster +mass estimates, selection function calculations, and generating mock catalogs. """ @@ -13,8 +13,9 @@ import pylab as plt import subprocess from astropy.cosmology import FlatLambdaCDM -from colossus.cosmology import cosmology -from colossus.lss import mass_function +import pyccl as ccl +#from colossus.cosmology import cosmology +#from colossus.lss import mass_function from . import signals from . import catalogs import pickle @@ -24,18 +25,68 @@ from scipy import stats from astLib import * import time - + #------------------------------------------------------------------------------------------------------------ class MockSurvey(object): + """An object that provides routines calculating cluster counts (using `CCL `_) + and generating mock catalogs for a given set of cosmological and mass scaling relation parameters. + The Tinker et al. (2008) halo mass function is used (hardcoded at present, but in principle this can + easily be swapped for any halo mass function supported by CCL). - def __init__(self, minMass, areaDeg2, zMin, zMax, H0, Om0, Ob0, sigma_8, zStep = 0.01, enableDrawSample = False): - """Initialise a MockSurvey object. This first calculates the probability of drawing a cluster of - given M500, z, assuming the Tinker mass function, and the given (generous) selection limits. - An additional selection function can be dialled in later when using drawSample. - - NOTE: We've hard coded everything to use M500 wrt critical density at this point. - - NOTE: MockSurvey.mf.m has factor of h^-1 in it. + Attributes: + areaDeg2 (:obj:`float`): Survey area in square degrees. + zBinEdges (:obj:`np.ndarray`): Defines the redshift bins for the cluster counts. + z (:obj:`np.ndarray`): Centers of the redshift bins. + log10M (:obj:`np.ndarray`): Centers of the log10 mass bins for the cluster counts + (in MSun, with mass defined according to `delta` and `rhoType`). + a (:obj:`np.ndarray`): Scale factor (1/(1+z)). + delta (:obj:``float`): Overdensity parameter, used for mass definition (e.g., 200, 500). + rhoType (:obj:`str`): Density definition, either 'matter' or 'critical', used for mass definition. + mdef (:obj:`pyccl.halos.massdef.MassDef`): CCL mass definition object, defined by `delta` and `rhoType`. + transferFunction (:obj:`str`): Transfer function to use, as understood by CCL (e.g., 'eisenstein_hu', + 'boltzmann_camb'). + H0 (:obj:`float`): The Hubble constant at redshift 0, in km/s/Mpc. + Om0 (:obj:`float`): Dimensionless total (dark + baryonic) matter density parameter at redshift 0. + Ob0 (:obj:`float`): Dimensionless baryon matter density parameter at redshift 0. + sigma8 (:obj:`float`): Defines the amplitude of the matter power spectrum. + ns (:obj:`float`): Scalar spectral index of matter power spectrum. + volumeMpc3 (:obj:`float`): Co-moving volume in Mpc3 for the given survey area and cosmological + parameters. + numberDensity (:obj:`np.ndarray`): Number density of clusters (per cubic Mpc) on the + (z, log10M) grid. + clusterCount (:obj:`np.ndarray`): Cluster counts on the (z, log10M) grid. + numClusters (:obj:`float`): Total number of clusters in the survey area above the minimum mass + limit. + numClustersByRedshift (:obj:`np.ndarray`): Number of clusters in the survey area above the + minimum mass limit, as a function of redshift. + + """ + def __init__(self, minMass, areaDeg2, zMin, zMax, H0, Om0, Ob0, sigma8, ns, zStep = 0.01, + enableDrawSample = False, delta = 500, rhoType = 'critical', + transferFunction = 'boltzmann_camb'): + """Create a MockSurvey object, for performing calculations of cluster counts or generating mock + catalogs. The Tinker et al. (2008) halo mass function is used (hardcoded at present, but in + principle this can easily be swapped for any halo mass function supported by CCL). + + Args: + minMass (:obj:`float`): The minimum mass, in MSun. This should be set considerably lower than + the actual survey completeness limit, otherwise completeness calculations will be wrong. + areaDeg2 (:obj:`float`): Specifies the survey area in square degrees, which scales the + resulting cluster counts accordingly. + zMin (:obj:`float`): Minimum redshift for the (z, log10M) grid. + zMax (:obj:`float`): Maximum redshift for the (z, log10M) grid. + H0 (:obj:`float`): The Hubble constant at redshift 0, in km/s/Mpc. + Om0 (:obj:`float`): Dimensionless total (dark + baryonic) matter density parameter at redshift 0. + Ob0 (:obj:`float`): Dimensionless baryon matter density parameter at redshift 0. + sigma8 (:obj:`float`): Defines the amplitude of the matter power spectrum. + ns (:obj:`float`): Scalar spectral index of matter power spectrum. + zStep (:obj:`float`, optional): Sets the linear spacing between redshift bins. + enableDrawSample (:obj:`bool`, optional): This needs to be set to True to enable use of the + :func:`self.drawSample` function. Setting this to False avoids some overhead. + delta (:obj:``float`): Overdensity parameter, used for mass definition (e.g., 200, 500). + rhoType (:obj:`str`): Density definition, either 'matter' or 'critical', used for mass definition. + transferFunction (:obj:`str`): Transfer function to use, as understood by CCL (e.g., 'eisenstein_hu', + 'boltzmann_camb'). """ @@ -45,44 +96,68 @@ def __init__(self, minMass, areaDeg2, zMin, zMax, H0, Om0, Ob0, sigma_8, zStep = self.areaDeg2=areaDeg2 self.zBinEdges=zRange self.z=(zRange[:-1]+zRange[1:])/2. + self.a=1./(1+self.z) - params={'flat': True, 'H0': H0, 'Om0': Om0, 'Ob0': Ob0, 'sigma8': sigma_8, 'ns': 0.95, - 'persistence': ''} - self.cosmoModel=cosmology.setCosmology('nemo', params) - self.cosmoModel.checkForChangedCosmology() - + self.delta=delta + self.rhoType=rhoType + self.mdef=ccl.halos.MassDef(self.delta, self.rhoType) + self.transferFunction=transferFunction + + self.H0=-1 + self.Om0=-1 + self.Ob0=-1 + self.sigma8=-1 + self.ns=-1 + self._get_new_cosmo(H0, Om0, Ob0, sigma8, ns) + + # NOTE: This is just MSun now (NOT MSun/h) self.log10M=np.arange(13, 16, 0.01) - self.M=np.power(10, self.log10M)*self.cosmoModel.h - self.mdef='500c' - self.model='tinker08' - + self.M=np.power(10, self.log10M) + self.enableDrawSample=enableDrawSample - self.update(H0, Om0, Ob0, sigma_8) - + self.update(H0, Om0, Ob0, sigma8, ns) + + + def _get_new_cosmo(self, H0, Om0, Ob0, sigma8, ns): + if ((self.H0 != H0) or (self.Om0 != Om0) or + (self.Ob0 != Ob0) or (self.sigma8 != sigma8)): + self.H0=H0 + self.Om0=Om0 + self.Ob0=Ob0 + self.sigma8=sigma8 + self.ns=ns + self.cosmoModel = ccl.Cosmology(Omega_c=Om0-Ob0, + Omega_b=Ob0, + h=0.01*H0, + sigma8=sigma8, + n_s=ns, + transfer_function=self.transferFunction) + self.mfunc = ccl.halos.MassFuncTinker08(self.cosmoModel, + self.mdef) + - def update(self, H0, Om0, Ob0, sigma_8): - """Recalculate cluster counts if cosmological parameters updated. + def update(self, H0, Om0, Ob0, sigma8, ns): + """Recalculate cluster counts for the updated cosmological parameters given. + + Args: + H0 (:obj:`float`): The Hubble constant at redshift 0, in km/s/Mpc. + Om0 (:obj:`float`): Dimensionless total (dark + baryonic) matter density parameter at redshift 0. + Ob0 (:obj:`float`): Dimensionless baryon matter density parameter at redshift 0. + sigma8 (:obj:`float`): Defines the amplitude of the matter power spectrum. + ns (:obj:`float`): Scalar spectral index of matter power spectrum. """ - self.H0=H0 - self.Om0=Om0 - self.Ob0=Ob0 - self.sigma_8=sigma_8 + self._get_new_cosmo(H0, Om0, Ob0, sigma8, ns) - params={'flat': True, 'H0': H0, 'Om0': Om0, 'Ob0': Ob0, 'sigma8': sigma_8, 'ns': 0.95, - 'persistence': ''} - self.cosmoModel=cosmology.setCosmology('nemo', params) - self.cosmoModel.checkForChangedCosmology() - self._doClusterCount() # For quick Q, fRel calc (these are in MockSurvey rather than SelFn as used by drawSample) self.theta500Splines=[] self.fRelSplines=[] - self.Ez=self.cosmoModel.Ez(self.z) - self.DAz=self.cosmoModel.angularDiameterDistance(self.z)/self.cosmoModel.h - self.criticalDensity=(self.cosmoModel.rho_c(self.z)*np.power(1000, 3))*np.power(self.cosmoModel.h, 2) + self.Ez=ccl.h_over_h0(self.cosmoModel,self.a) + self.DAz=ccl.angular_diameter_distance(self.cosmoModel,self.a) + self.criticalDensity=ccl.physical_constants.RHO_CRITICAL*(self.Ez*self.cosmoModel['h'])**2 for k in range(len(self.z)): zk=self.z[k] interpLim_minLog10M=self.log10M.min() @@ -96,12 +171,12 @@ def update(self, H0, Om0, Ob0, sigma_8): Ez=self.Ez[k] R500Mpc=np.power((3*fitM500s)/(4*np.pi*500*criticalDensity), 1.0/3.0) fitTheta500s=np.degrees(np.arctan(R500Mpc/DA))*60.0 - fitFRels=signals.calcFRel(zk, fitM500s, self.cosmoModel) + fitFRels=signals.calcFRel(zk, fitM500s, Ez) tckLog10MToTheta500=interpolate.splrep(np.log10(fitM500s), fitTheta500s) tckLog10MToFRel=interpolate.splrep(np.log10(fitM500s), fitFRels) self.theta500Splines.append(tckLog10MToTheta500) self.fRelSplines.append(tckLog10MToFRel) - + # Stuff to enable us to draw mock samples (see drawSample) # Interpolators here need to be updated each time we change cosmology if self.enableDrawSample == True: @@ -118,24 +193,24 @@ def update(self, H0, Om0, Ob0, sigma_8): for i in range(len(self.z)): ngtm=self._cumulativeNumberDensity(self.z[i]) mask=ngtm > 0 - self.log10MRollers.append(_spline((ngtm[mask] / ngtm[0])[::-1], np.log10(self.M[mask][::-1]/self.cosmoModel.h), k=3)) - + self.log10MRollers.append(_spline((ngtm[mask] / ngtm[0])[::-1], np.log10(self.M[mask][::-1]), k=3)) def _cumulativeNumberDensity(self, z): """Returns N > M (per cubic Mpc), using Colossus routines. """ - - dndlnM=mass_function.massFunction(self.M/self.cosmoModel.h, z, mdef = self.mdef, - model = self.model, q_out = 'dndlnM') + + h=self.cosmoModel['h'] + dndlnM=self.mfunc.get_mass_function(self.cosmoModel, + self.M, 1/(1+z)) / np.log(10) #/ h**3 dndM=dndlnM/self.M - ngtm=integrate.cumtrapz(dndlnM[::-1], np.log(self.M/self.cosmoModel.h), initial = 0)[::-1] + ngtm=integrate.cumtrapz(dndlnM[::-1], np.log(self.M), initial = 0)[::-1] MUpper=np.arange(np.log(self.M[-1]), np.log(10**18), np.log(self.M[1])-np.log(self.M[0])) extrapolator=_spline(np.log(self.M), np.log(dndlnM), k=1) MF_extr=extrapolator(MUpper) intUpper=integrate.simps(np.exp(MF_extr), dx=MUpper[2] - MUpper[1], even='first') - ngtm=ngtm+intUpper*self.cosmoModel.h + ngtm=ngtm+intUpper return ngtm @@ -147,7 +222,7 @@ def _comovingVolume(self, z): NOTE: Assumes flat cosmology """ - return (4/3)*np.pi*np.power(self.cosmoModel.comovingDistance(0, z)/self.cosmoModel.h, 3) + return 4.18879020479 * ccl.comoving_radial_distance(self.cosmoModel, 1./(1+z))**3 def _doClusterCount(self): @@ -156,8 +231,10 @@ def _doClusterCount(self): """ zRange=self.zBinEdges - self.M=np.power(10, self.log10M)*self.cosmoModel.h - + h = self.cosmoModel['h'] + self.M=np.power(10, self.log10M) # in M_sun + norm_mfunc=1. / np.log(10) + # Number density by z and total cluster count (in redshift shells) # Can use to make P(m, z) plane numberDensity=[] @@ -166,19 +243,19 @@ def _doClusterCount(self): for i in range(len(zRange)-1): zShellMin=zRange[i] zShellMax=zRange[i+1] - zShellMid=(zShellMax+zShellMin)/2. - dndlnM=mass_function.massFunction(self.M/self.cosmoModel.h, zShellMid, mdef = self.mdef, - model = self.model, q_out = 'dndlnM') - dndM=dndlnM/self.M + zShellMid=(zShellMax+zShellMin)/2. + dndlnM=self.mfunc.get_mass_function(self.cosmoModel, self.M, + 1./(1+zShellMid)) * norm_mfunc + dndM = dndlnM / self.M # NOTE: this differs from hmf by several % at the high-mass end (binning or interpolation?) - n=(dndM*self.cosmoModel.h**4)*np.gradient(self.M/self.cosmoModel.h) + n=dndM * np.gradient(self.M) numberDensity.append(n) shellVolumeMpc3=self._comovingVolume(zShellMax)-self._comovingVolume(zShellMin) shellVolumeMpc3=shellVolumeMpc3*(self.areaSr/(4*np.pi)) - totalVolumeMpc3=totalVolumeMpc3+shellVolumeMpc3 + totalVolumeMpc3+=shellVolumeMpc3 clusterCount.append(n*shellVolumeMpc3) numberDensity=np.array(numberDensity) - clusterCount=np.array(clusterCount) + clusterCount=np.array(clusterCount) self.volumeMpc3=totalVolumeMpc3 self.numberDensity=numberDensity self.clusterCount=clusterCount @@ -186,29 +263,45 @@ def _doClusterCount(self): self.numClustersByRedshift=np.sum(clusterCount, axis = 1) - def calcNumClustersExpected(self, M500Limit = 0.1, zMin = 0.0, zMax = 2.0, selFn = None): - """Calculate the number of clusters expected above a given mass limit. If selFn is not None, apply - the selection function (in which case M500Limit isn't important, so long as it is low). - - selFn should be an (M, z) grid that corresponds with self.log10M, self.z - - NOTE: units of M500Limit are 1e14 MSun. + def calcNumClustersExpected(self, MLimit = 1e13, zMin = 0.0, zMax = 2.0, compMz = None): + """Calculate the number of clusters expected above a given mass limit, for the + mass definition set when the MockSurvey object was constructed. + + Args: + MLimit (:obj:`float`, optional): Mass limit above which to count clusters, in MSun. + zMin (:obj:`float`, optional): Count clusters above this minimum redshift. + zMax (:obj:`float`, optional): Count clusters below this maximum redshift. + compMz (:obj:`np.ndarray`, optional): If given, a 2d array with the same dimensions + and binning as the (z, log10M) grid, as calculated by the + :class:`nemo.completeness.SelFn` class, that describes the completeness as + a function of mass and redshift. + + Returns: + The number of clusters in the survey area, according to the chose sample selection + cuts. """ - if type(selFn) == np.ndarray: - numClusters=selFn*self.clusterCount + if type(compMz) == np.ndarray: + numClusters=compMz*self.clusterCount else: numClusters=self.clusterCount zMask=np.logical_and(np.greater(self.z, zMin), np.less(self.z, zMax)) - mMask=np.greater(self.M/self.cosmoModel.h, M500Limit*1e14) + mMask=np.greater(self.M, MLimit) return numClusters[:, mMask][zMask].sum() def getPLog10M(self, z): - """Returns P(log10M) at given z, which corresponds to self.log10M. + """Returns the log10(mass) probability distribution at the given z, for the logarithmic mass + binning and mass definition set when the MockSurvey object was constructed. + + Args: + z (:obj:`float`): Redshift at which to calculate P(log10(M)). + + Returns: + Array corresponding to the log10(mass) probability distribution. """ numberDensity=self._cumulativeNumberDensity(z) @@ -220,48 +313,69 @@ def getPLog10M(self, z): def drawSample(self, y0Noise, scalingRelationDict, tckQFitDict, wcs = None, photFilterLabel = None, tileName = None, SNRLimit = None, makeNames = False, z = None, numDraws = None, areaDeg2 = None, applySNRCut = False, applyPoissonScatter = True, - applyIntrinsicScatter = True, applyNoiseScatter = True): - """Draw a cluster sample from the mass function, generate mock y0~ values by applying the given - scaling relation parameters, and then (optionally, if both SNRLimit is given and applySNRCut - is True) apply the survey selection function. - - Here, y0Noise can be either a single number (if using e.g. survey average) or an RMSMap - (2d array). Coords will be generated if an RMSMap and a wcs are supplied. - - scalingRelationDict should contain keys 'tenToA0', 'B0', 'Mpivot', 'sigma_int' (this is the - contents of massOptions in nemo .yml config files). - - Most likely you should set SNRLimit to thresholdSigma, as given in the nemo .yml config file - (the resulting catalog could be cut in z, fixed_SNR afterwards anyway). - - photFilterLabel and tileName are only used for adding a 'template' key to each object: useful - for quickly checking which tile (tileName) an object is in. - - If z is given, the sample is drawn from only that redshift. The default (z = None) is to use - the range given by self.z - - If numDraws = None, the number of draws is set by self.numClustersByRedshift. If numDraws is - given, the number of draws will be divided equally between each z. - - If areaDeg2 is given, the cluster counts will be scaled accordingly (otherwise, they - correspond to self.areaDeg2). This will be ignored if numDraws is also set. - - Use applyPoissonScatter, applyIntrinsicScatter, applyNoiseScatter to control whether Poisson - noise (in the expected counts / number of draws from the mass function), intrinsic scatter, - and/or measurement noise scatter will be applied (i.e., if all three options are set to False, - fixed_y_c values will be the same as true_y_c, although each object will still have an error - bar in the output catalog, corresponding to where it is found in the RMS map). - - This routine is used in the nemoMock script. - - Returns catalog as an astropy Table (and an array of length self.z corresponding to low mass limit - if numDraws is used). Returns None if the catalog would contain zero clusters. + applyIntrinsicScatter = True, applyNoiseScatter = True, seed = None): + """Draw a cluster sample from the mass function, generating mock y0~ values (called `fixed_y_c` in + Nemo catalogs) by applying the given scaling relation parameters, and then (optionally) applying + a survey selection function. + + Args: + y0Noise (:obj:`float` or :obj:`np.ndarray`): Either a single number (if using e.g., a survey + average) or a noise map (2d array). A noise map must be provided here if you want the + output catalog to contain RA, dec coordinates (in addition, a WCS object must also be + provided - see below). + scalingRelationDict (:obj:`dict`): A dictionary containing keys 'tenToA0', 'B0', 'Mpivot', + 'sigma_int' that describes the scaling relation between y0~ and mass (this is the + format of `massOptions` in Nemo .yml config files). + tckQFitDict (:obj:`dict`): A dictionary of interpolation spline knots indexed by tileName, + that can be used to estimate Q, the filter mismatch function (see + :func:`nemo.signals.loadQ`). + wcs (:obj:`astWCS.WCS`, optional): WCS object corresponding to `y0Noise`, if `y0Noise` is + as noise map (2d image array). Needed if you want the output catalog to contain RA, dec + coordinates. + photFilterLabel (:obj:`str`, optional): Name of the reference filter (as defined in the + Nemo .yml config file) that is used to define y0~ (`fixed_y_c`) and the filter mismatch + function, Q. + tileName (:obj:`str`, optional): Name of the tile for which the sample will be generated. + SNRLimit (:obj:`float`, optional): Signal-to-noise detection threshold used for the + output catalog (corresponding to a cut on `fixed_SNR` in Nemo catalogs). Only applied + if `applySNRCut` is also True (yes, this can be cleaned up). + makeNames (:obj:`bool`, optional): If True, add names of the form MOCK CL JHHMM.m+/-DDMM + to the output catalog. + z (:obj:`float`, optional): If given produce a sample at the nearest z in the MockSurvey + z grid. The default behaviour is to use the full redshift grid specified by `self.z`. + numDraws (:obj:`int`, optional): If given, the number of draws to perform from the mass + function, divided equally among the redshift bins. The default is to use the values + contained in `self.numClustersByRedshift`. + areaDeg2 (:obj:`float`, optional): If given, the cluster counts will be scaled to this + area. Otherwise, they correspond to `self.areaDeg2`. This parameter will be ignored + if `numDraws` is also given. + applySNRCut (:obj:`bool`, optional): If True, cut the output catalog according to the + `fixed_SNR` threshold set by `SNRLimit`. + applyPoissonScatter (:obj:`bool`, optional): If True, add Poisson noise to the cluster + counts (implemented by modifiying the number of draws from the mass function). + applyIntrinsicScatter (:obj:`bool`, optional): If True, apply intrinsic scatter to the + SZ measurements (`fixed_y_c`), as set by the `sigma_int` parameter in + `scalingRelationDict`. + applyNoiseScatter (:obj:`bool`, optional): If True, apply measurement noise, generated + from the given noise level or noise map (`y0Noise`), to the output SZ measurements + (`fixed_y_c`). + seed (:obj:`int`): If given, use this value for the random seed (may be useful for + testing). + + Returns: + A catalog as an :obj:`astropy.table.Table` object, in the same format as produced by + the main `nemo` script. + + Notes: + If both `applyIntrinsicScatter`, `applyNoiseScatter` are set to False, then the output + catalog `fixed_y_c` values will be exactly the same as `true_y_c`, although each object + will still have an error bar listed in the output catalog, corresponding to its location + in the noise map (if given). """ - # WARNING: testing only! - #print("WARNING: np.random.seed set to fixed value in drawSample - you don't want this if not testing!") - #np.random.seed(100) + if seed is not None: + np.random.seed(seed) if z is None: zRange=self.z @@ -280,15 +394,11 @@ def drawSample(self, y0Noise, scalingRelationDict, tckQFitDict, wcs = None, phot else: numClustersByRedshift[k]=np.random.poisson(int(round(self.numClustersByRedshift[zIndex]))) - if areaDeg2 is not None: - numClustersByRedshift=np.array(np.round(numClustersByRedshift*(areaDeg2/self.areaDeg2)), dtype = int) - - numClusters=numClustersByRedshift.sum() - - # This happens in the case of negligible but not quite zero area - if numClusters == 0: - return None + if areaDeg2 != None: + numClustersByRedshift=int(round(numClustersByRedshift*(areaDeg2/self.areaDeg2))) + numClusters=numClustersByRedshift.sum() + if numDraws != None: numClusters=numDraws @@ -304,7 +414,7 @@ def drawSample(self, y0Noise, scalingRelationDict, tckQFitDict, wcs = None, phot coordIndices=np.random.randint(0, len(xsInMask), numClusters) ys=ysInMask[coordIndices] xs=xsInMask[coordIndices] - if wcs is not None: + if wcs != None: RADecCoords=wcs.pix2wcs(xs, ys) RADecCoords=np.array(RADecCoords) RAs=RADecCoords[:, 0] @@ -360,8 +470,6 @@ def drawSample(self, y0Noise, scalingRelationDict, tckQFitDict, wcs = None, phot log10Ms_zk=self.log10MRollers[k](np.random.random_sample(numClusters_zk)) log10Ms_zk[log10Ms_zk < self.log10M.min()]=self.log10M.min() log10Ms_zk[log10Ms_zk > self.log10M.max()]=self.log10M.max() - if len(log10Ms_zk) == 0: - continue theta500s_zk=interpolate.splev(log10Ms_zk, self.theta500Splines[k], ext = 3) Qs_zk=interpolate.splev(theta500s_zk, tckQFitDict[tileName], ext = 3) @@ -383,7 +491,7 @@ def drawSample(self, y0Noise, scalingRelationDict, tckQFitDict, wcs = None, phot else: measured_y0s_zk=scattered_y0s_zk except: - raise Exception("Negative y0 values (probably spline related) for H0 = %.6f Om0 = %.6f sigma_8 = %.6f at z = %.3f" % (self.H0, self.Om0, self.sigma_8, zk)) + raise Exception("Negative y0 values (probably spline related) for H0 = %.6f Om0 = %.6f sigma8 = %.6f at z = %.3f" % (self.H0, self.Om0, self.sigma8, zk)) true_y0s[mask]=true_y0s_zk measured_y0s[mask]=measured_y0s_zk diff --git a/nemo/MockSurveyColossus.py b/nemo/MockSurveyColossus.py new file mode 100644 index 0000000..a904546 --- /dev/null +++ b/nemo/MockSurveyColossus.py @@ -0,0 +1,420 @@ +""" + +This module defines the MockSurvey class, used for obtaining de-biased cluster mass estimates and +selection function calculations. + +""" + +import os +import sys +import numpy as np +#import IPython +import astropy.table as atpy +import pylab as plt +import subprocess +from astropy.cosmology import FlatLambdaCDM +from colossus.cosmology import cosmology +from colossus.lss import mass_function +from . import signals +from . import catalogs +import pickle +from scipy import interpolate +from scipy import integrate +from scipy.interpolate import InterpolatedUnivariateSpline as _spline +from scipy import stats +from astLib import * +import time + +#------------------------------------------------------------------------------------------------------------ +class MockSurvey(object): + + def __init__(self, minMass, areaDeg2, zMin, zMax, H0, Om0, Ob0, sigma_8, ns, zStep = 0.01, enableDrawSample = False): + """Initialise a MockSurvey object. This first calculates the probability of drawing a cluster of + given M500, z, assuming the Tinker mass function, and the given (generous) selection limits. + An additional selection function can be dialled in later when using drawSample. + + NOTE: We've hard coded everything to use M500 wrt critical density at this point. + + NOTE: MockSurvey.mf.m has factor of h^-1 in it. + + """ + + zRange=np.arange(zMin, zMax+zStep, zStep) + areaSr=np.radians(np.sqrt(areaDeg2))**2 + self.areaSr=areaSr + self.areaDeg2=areaDeg2 + self.zBinEdges=zRange + self.z=(zRange[:-1]+zRange[1:])/2. + + params={'flat': True, 'H0': H0, 'Om0': Om0, 'Ob0': Ob0, 'sigma8': sigma_8, 'ns': ns, + 'persistence': ''} + self.cosmoModel=cosmology.setCosmology('nemo', params) + self.cosmoModel.checkForChangedCosmology() + + # NOTE: + # self.log10M has a factor of h in it + # self.M does not + # We feed self.M/h into Colossus + # This is confusing - fix! + self.log10M=np.arange(13, 16, 0.01) + self.M=np.power(10, self.log10M)*self.cosmoModel.h + self.mdef='500c' + self.model='tinker08' + + self.enableDrawSample=enableDrawSample + self.update(H0, Om0, Ob0, sigma_8, ns) + + + def update(self, H0, Om0, Ob0, sigma_8, ns): + """Recalculate cluster counts if cosmological parameters updated. + + """ + + self.H0=H0 + self.Om0=Om0 + self.Ob0=Ob0 + self.sigma_8=sigma_8 + self.ns=ns + + params={'flat': True, 'H0': H0, 'Om0': Om0, 'Ob0': Ob0, 'sigma8': sigma_8, 'ns': ns, + 'persistence': ''} + self.cosmoModel=cosmology.setCosmology('nemo', params) + self.cosmoModel.checkForChangedCosmology() + + self._doClusterCount() + + # For quick Q, fRel calc (these are in MockSurvey rather than SelFn as used by drawSample) + self.theta500Splines=[] + self.fRelSplines=[] + self.Ez=self.cosmoModel.Ez(self.z) + self.DAz=self.cosmoModel.angularDiameterDistance(self.z)/self.cosmoModel.h + self.criticalDensity=(self.cosmoModel.rho_c(self.z)*np.power(1000, 3))*np.power(self.cosmoModel.h, 2) + for k in range(len(self.z)): + zk=self.z[k] + interpLim_minLog10M=self.log10M.min() + interpLim_maxLog10M=self.log10M.max() + interpPoints=100 + fitM500s=np.power(10, np.linspace(interpLim_minLog10M, interpLim_maxLog10M, interpPoints)) + fitTheta500s=np.zeros(len(fitM500s)) + fitFRels=np.zeros(len(fitM500s)) + criticalDensity=self.criticalDensity[k] + DA=self.DAz[k] + Ez=self.Ez[k] + R500Mpc=np.power((3*fitM500s)/(4*np.pi*500*criticalDensity), 1.0/3.0) + fitTheta500s=np.degrees(np.arctan(R500Mpc/DA))*60.0 + fitFRels=signals.calcFRel(zk, fitM500s, self.Ez) + tckLog10MToTheta500=interpolate.splrep(np.log10(fitM500s), fitTheta500s) + tckLog10MToFRel=interpolate.splrep(np.log10(fitM500s), fitFRels) + self.theta500Splines.append(tckLog10MToTheta500) + self.fRelSplines.append(tckLog10MToFRel) + + # Stuff to enable us to draw mock samples (see drawSample) + # Interpolators here need to be updated each time we change cosmology + if self.enableDrawSample == True: + + # For drawing from overall z distribution + zSum=self.clusterCount.sum(axis = 1) + pz=np.cumsum(zSum)/self.numClusters + self.zRoller=_spline(pz, self.z, k = 3) + + # For drawing from each log10M distribution at each point on z grid + # And quick fRel, Q calc using interpolation + # And we may as well have E(z), DA on the z grid also + self.log10MRollers=[] + for i in range(len(self.z)): + ngtm=self._cumulativeNumberDensity(self.z[i]) + mask=ngtm > 0 + self.log10MRollers.append(_spline((ngtm[mask] / ngtm[0])[::-1], np.log10(self.M[mask][::-1]/self.cosmoModel.h), k=3)) + + + def _cumulativeNumberDensity(self, z): + """Returns N > M (per cubic Mpc), using Colossus routines. + + """ + + dndlnM=mass_function.massFunction(self.M/self.cosmoModel.h, z, mdef = self.mdef, + model = self.model, q_out = 'dndlnM') + dndM=dndlnM/self.M + ngtm=integrate.cumtrapz(dndlnM[::-1], np.log(self.M/self.cosmoModel.h), initial = 0)[::-1] + + MUpper=np.arange(np.log(self.M[-1]), np.log(10**18), np.log(self.M[1])-np.log(self.M[0])) + extrapolator=_spline(np.log(self.M), np.log(dndlnM), k=1) + MF_extr=extrapolator(MUpper) + intUpper=integrate.simps(np.exp(MF_extr), dx=MUpper[2] - MUpper[1], even='first') + ngtm=ngtm+intUpper*self.cosmoModel.h + + return ngtm + + + def _comovingVolume(self, z): + """Returns co-moving volume in Mpc^3 (all sky) to some redshift z, using Colossus routines (taking + care of the fact that Colossus returns all distances in Mpc/h). + + NOTE: Assumes flat cosmology + + """ + return (4/3)*np.pi*np.power(self.cosmoModel.comovingDistance(0, z)/self.cosmoModel.h, 3) + + + def _doClusterCount(self): + """Updates cluster count etc. after mass function object is updated. + + """ + + zRange=self.zBinEdges + self.M=np.power(10, self.log10M)*self.cosmoModel.h + + # Number density by z and total cluster count (in redshift shells) + # Can use to make P(m, z) plane + numberDensity=[] + clusterCount=[] + totalVolumeMpc3=0. + for i in range(len(zRange)-1): + zShellMin=zRange[i] + zShellMax=zRange[i+1] + zShellMid=(zShellMax+zShellMin)/2. + dndlnM=mass_function.massFunction(self.M/self.cosmoModel.h, zShellMid, mdef = self.mdef, + model = self.model, q_out = 'dndlnM') + dndM=dndlnM/self.M + # NOTE: this differs from hmf by several % at the high-mass end (binning or interpolation?) + n=(dndM*self.cosmoModel.h**4)*np.gradient(self.M/self.cosmoModel.h) + numberDensity.append(n) + shellVolumeMpc3=self._comovingVolume(zShellMax)-self._comovingVolume(zShellMin) + shellVolumeMpc3=shellVolumeMpc3*(self.areaSr/(4*np.pi)) + totalVolumeMpc3=totalVolumeMpc3+shellVolumeMpc3 + clusterCount.append(n*shellVolumeMpc3) + numberDensity=np.array(numberDensity) + clusterCount=np.array(clusterCount) + self.volumeMpc3=totalVolumeMpc3 + self.numberDensity=numberDensity + self.clusterCount=clusterCount + self.numClusters=np.sum(clusterCount) + self.numClustersByRedshift=np.sum(clusterCount, axis = 1) + + + def calcNumClustersExpected(self, M500Limit = 0.1, zMin = 0.0, zMax = 2.0, selFn = None): + """Calculate the number of clusters expected above a given mass limit. If selFn is not None, apply + the selection function (in which case M500Limit isn't important, so long as it is low). + + selFn should be an (M, z) grid that corresponds with self.log10M, self.z + + NOTE: units of M500Limit are 1e14 MSun. + + """ + + if type(selFn) == np.ndarray: + numClusters=selFn*self.clusterCount + else: + numClusters=self.clusterCount + + zMask=np.logical_and(np.greater(self.z, zMin), np.less(self.z, zMax)) + mMask=np.greater(self.M/self.cosmoModel.h, M500Limit*1e14) + + return numClusters[:, mMask][zMask].sum() + + + def getPLog10M(self, z): + """Returns P(log10M) at given z, which corresponds to self.log10M. + + """ + numberDensity=self._cumulativeNumberDensity(z) + PLog10M=numberDensity/np.trapz(numberDensity, self.M) + + return PLog10M + + + def drawSample(self, y0Noise, scalingRelationDict, tckQFitDict, wcs = None, photFilterLabel = None, + tileName = None, SNRLimit = None, makeNames = False, z = None, numDraws = None, + areaDeg2 = None, applySNRCut = False, applyPoissonScatter = True, + applyIntrinsicScatter = True, applyNoiseScatter = True): + """Draw a cluster sample from the mass function, generate mock y0~ values by applying the given + scaling relation parameters, and then (optionally, if both SNRLimit is given and applySNRCut + is True) apply the survey selection function. + + Here, y0Noise can be either a single number (if using e.g. survey average) or an RMSMap + (2d array). Coords will be generated if an RMSMap and a wcs are supplied. + + scalingRelationDict should contain keys 'tenToA0', 'B0', 'Mpivot', 'sigma_int' (this is the + contents of massOptions in nemo .yml config files). + + Most likely you should set SNRLimit to thresholdSigma, as given in the nemo .yml config file + (the resulting catalog could be cut in z, fixed_SNR afterwards anyway). + + photFilterLabel and tileName are only used for adding a 'template' key to each object: useful + for quickly checking which tile (tileName) an object is in. + + If z is given, the sample is drawn from only that redshift. The default (z = None) is to use + the range given by self.z + + If numDraws = None, the number of draws is set by self.numClustersByRedshift. If numDraws is + given, the number of draws will be divided equally between each z. + + If areaDeg2 is given, the cluster counts will be scaled accordingly (otherwise, they + correspond to self.areaDeg2). This will be ignored if numDraws is also set. + + Use applyPoissonScatter, applyIntrinsicScatter, applyNoiseScatter to control whether Poisson + noise (in the expected counts / number of draws from the mass function), intrinsic scatter, + and/or measurement noise scatter will be applied (i.e., if all three options are set to False, + fixed_y_c values will be the same as true_y_c, although each object will still have an error + bar in the output catalog, corresponding to where it is found in the RMS map). + + This routine is used in the nemoMock script. + + Returns catalog as an astropy Table (and an array of length self.z corresponding to low mass limit + if numDraws is used). Returns None if the catalog would contain zero clusters. + + """ + + # WARNING: testing only! + #print("WARNING: np.random.seed set to fixed value in drawSample - you don't want this if not testing!") + #np.random.seed(100) + + if z is None: + zRange=self.z + else: + # Pick the nearest z on the grid + zIndex=np.argmin(abs(z-self.z)) + zRange=[self.z[zIndex]] + + # Add Poisson noise (we do by z to keep things simple on the z grid later) + numClustersByRedshift=np.zeros(len(zRange), dtype = int) + for k in range(len(zRange)): + zk=zRange[k] + zIndex=np.argmin(abs(zk-self.z)) + if applyPoissonScatter == False: + numClustersByRedshift[k]=int(round(self.numClustersByRedshift[zIndex])) + else: + numClustersByRedshift[k]=np.random.poisson(int(round(self.numClustersByRedshift[zIndex]))) + + if areaDeg2 is not None: + numClustersByRedshift=np.array(np.round(numClustersByRedshift*(areaDeg2/self.areaDeg2)), dtype = int) + + numClusters=numClustersByRedshift.sum() + + # This happens in the case of negligible but not quite zero area + if numClusters == 0: + return None + + if numDraws != None: + numClusters=numDraws + + tenToA0, B0, Mpivot, sigma_int=[scalingRelationDict['tenToA0'], scalingRelationDict['B0'], + scalingRelationDict['Mpivot'], scalingRelationDict['sigma_int']] + + # If given y0Noise as RMSMap, draw coords (assuming clusters aren't clustered - which they are...) + # NOTE: switched to using valid part of RMSMap here rather than areaMask - we need to fix the latter to same area + # It isn't a significant issue though + if type(y0Noise) == np.ndarray and y0Noise.ndim == 2: + RMSMap=y0Noise + ysInMask, xsInMask=np.where(RMSMap != 0) + coordIndices=np.random.randint(0, len(xsInMask), numClusters) + ys=ysInMask[coordIndices] + xs=xsInMask[coordIndices] + if wcs is not None: + RADecCoords=wcs.pix2wcs(xs, ys) + RADecCoords=np.array(RADecCoords) + RAs=RADecCoords[:, 0] + decs=RADecCoords[:, 1] + y0Noise=RMSMap[ys, xs] + else: + y0Noise=np.ones(numClusters)*y0Noise + RAs=np.zeros(numClusters) + decs=np.zeros(numClusters) + + ## WARNING: For testing only! + #y0Noise[:]=1e-6 + + # Fancy names or not? + if makeNames == True: + names=[] + for RADeg, decDeg in zip(RAs, decs): + names.append(catalogs.makeName(RADeg, decDeg, prefix = 'MOCK-CL')) + else: + names=np.arange(numClusters)+1 + + # New way - on the redshift grid + t0=time.time() + currentIndex=0 + measured_y0s=np.zeros(y0Noise.shape) + true_y0s=np.zeros(y0Noise.shape) + log10Ms=np.zeros(y0Noise.shape) + zs=np.zeros(y0Noise.shape) + zErrs=np.zeros(y0Noise.shape) + minTrueMass=np.zeros(len(zRange)) # if using numDraws + for k in range(len(zRange)): + + zk=zRange[k] + zIndex=np.argmin(abs(zk-self.z)) + if numDraws != None: + numClusters_zk=int(round(numDraws/len(zRange))) + else: + numClusters_zk=numClustersByRedshift[k] + if numClusters_zk == 0: + continue + + # Some fiddling here to avoid rounding issues with array sizes (+/-1 draw here shouldn't make a difference) + nextIndex=currentIndex+numClusters_zk + if nextIndex >= len(y0Noise): + nextIndex=len(y0Noise)-1 + mask=np.arange(currentIndex, nextIndex) + numClusters_zk=len(mask) + y0Noise_zk=y0Noise[mask] + currentIndex=nextIndex + + # For some cosmo parameters, splined masses can end up outside of valid range, so catch this + #log10Ms_zk=self.log10MRollers[k](np.random.uniform(0, maxDraw, numClusters_zk)) + log10Ms_zk=self.log10MRollers[k](np.random.random_sample(numClusters_zk)) + log10Ms_zk[log10Ms_zk < self.log10M.min()]=self.log10M.min() + log10Ms_zk[log10Ms_zk > self.log10M.max()]=self.log10M.max() + if len(log10Ms_zk) == 0: + continue + + theta500s_zk=interpolate.splev(log10Ms_zk, self.theta500Splines[k], ext = 3) + Qs_zk=interpolate.splev(theta500s_zk, tckQFitDict[tileName], ext = 3) + + # For some cosmo parameters, fRel can wander outside its range for crazy masses + # So we just cap it at 0.1 here just to avoid -ve in log + fRels_zk=interpolate.splev(log10Ms_zk, self.fRelSplines[k], ext = 3) + fRels_zk[fRels_zk <= 0]=0.1 + fRels_zk[fRels_zk > 1]=1.0 + + try: + true_y0s_zk=tenToA0*np.power(self.Ez[k], 2)*np.power(np.power(10, log10Ms_zk)/Mpivot, 1+B0)*Qs_zk*fRels_zk + if applyIntrinsicScatter == True: + scattered_y0s_zk=np.exp(np.random.normal(np.log(true_y0s_zk), sigma_int, len(true_y0s_zk))) + else: + scattered_y0s_zk=true_y0s_zk + if applyNoiseScatter == True: + measured_y0s_zk=np.random.normal(scattered_y0s_zk, y0Noise_zk) + else: + measured_y0s_zk=scattered_y0s_zk + except: + raise Exception("Negative y0 values (probably spline related) for H0 = %.6f Om0 = %.6f sigma_8 = %.6f at z = %.3f" % (self.H0, self.Om0, self.sigma_8, zk)) + + true_y0s[mask]=true_y0s_zk + measured_y0s[mask]=measured_y0s_zk + log10Ms[mask]=log10Ms_zk + zs[mask]=zk + + tab=atpy.Table() + tab.add_column(atpy.Column(names, 'name')) + tab.add_column(atpy.Column(RAs, 'RADeg')) + tab.add_column(atpy.Column(decs, 'decDeg')) + tab.add_column(atpy.Column(np.power(10, log10Ms)/1e14, 'true_M500')) + tab.add_column(atpy.Column(true_y0s/1e-4, 'true_fixed_y_c')) + tab.add_column(atpy.Column(measured_y0s/1e-4, 'fixed_y_c')) + tab.add_column(atpy.Column(y0Noise/1e-4, 'fixed_err_y_c')) + tab.add_column(atpy.Column(measured_y0s/y0Noise, 'fixed_SNR')) + tab.add_column(atpy.Column(zs, 'redshift')) + tab.add_column(atpy.Column(zErrs, 'redshiftErr')) + if photFilterLabel is not None and tileName is not None: + tab.add_column(atpy.Column([photFilterLabel]*len(tab), 'template')) + tab.add_column(atpy.Column([tileName]*len(tab), 'tileName')) + + # Apply selection? + if applySNRCut == True: + selMask=measured_y0s > y0Noise*SNRLimit + tab=tab[selMask] + + return tab + diff --git a/nemo/completeness.py b/nemo/completeness.py index f117702..f1c4584 100644 --- a/nemo/completeness.py +++ b/nemo/completeness.py @@ -76,7 +76,8 @@ class SelFn(object): def __init__(self, selFnDir, SNRCut, configFileName = None, footprintLabel = None, zStep = 0.01, tileNames = None, enableDrawSample = False, downsampleRMS = True, - applyMFDebiasCorrection = True, setUpAreaMask = False, enableCompletenessCalc = True): + applyMFDebiasCorrection = True, setUpAreaMask = False, enableCompletenessCalc = True, + delta = 500, rhoType = 'critical'): """Initialise an object that contains a survey selection function. This class uses the output in the selFn/ directory (produced by the nemo and nemoSelFn commands) to @@ -196,10 +197,12 @@ def __init__(self, selFnDir, SNRCut, configFileName = None, footprintLabel = Non H0=70. Om0=0.30 Ob0=0.05 - sigma_8=0.8 - self.mockSurvey=MockSurvey.MockSurvey(minMass, self.totalAreaDeg2, zMin, zMax, H0, Om0, Ob0, sigma_8, - zStep = self.zStep, enableDrawSample = enableDrawSample) - self.update(H0, Om0, Ob0, sigma_8) + sigma8=0.8 + ns=0.95 + self.mockSurvey=MockSurvey.MockSurvey(minMass, self.totalAreaDeg2, zMin, zMax, H0, Om0, Ob0, sigma8, ns, + zStep = self.zStep, enableDrawSample = enableDrawSample, + delta = delta, rhoType = rhoType) + self.update(H0, Om0, Ob0, sigma8, ns) def _setUpAreaMask(self): @@ -295,7 +298,7 @@ def overrideRMS(self, RMS, obsFreqGHz = 148.0): sys.exit() - def update(self, H0, Om0, Ob0, sigma_8, scalingRelationDict = None): + def update(self, H0, Om0, Ob0, sigma8, ns, scalingRelationDict = None): """Re-calculates survey-average selection function given new set of cosmological / scaling relation parameters. This updates self.mockSurvey at the same time - i.e., this is the only thing that needs to be updated. @@ -313,7 +316,7 @@ def update(self, H0, Om0, Ob0, sigma_8, scalingRelationDict = None): if scalingRelationDict is not None: self.scalingRelationDict=scalingRelationDict - self.mockSurvey.update(H0, Om0, Ob0, sigma_8) + self.mockSurvey.update(H0, Om0, Ob0, sigma8, ns) compMzCube=[] for tileName in self.RMSDict.keys(): diff --git a/nemo/maps.py b/nemo/maps.py index 94b834f..d81920b 100644 --- a/nemo/maps.py +++ b/nemo/maps.py @@ -215,7 +215,8 @@ def addAutoTileDefinitions(parDict, DS9RegionFileName = None, cacheFileName = No parDict['tileDefinitions']['targetTileWidthDeg'], parDict['tileDefinitions']['targetTileHeightDeg']) print("... breaking map into %d tiles ..." % (len(parDict['tileDefinitions']))) - saveTilesDS9RegionsFile(parDict, DS9RegionFileName) + if DS9RegionFileName is not None: + saveTilesDS9RegionsFile(parDict, DS9RegionFileName) if cacheFileName is not None: stream=yaml.dump(parDict['tileDefinitions']) @@ -254,7 +255,7 @@ def loadTile(pathToTileImages, tileName, returnWCS = False): return data #------------------------------------------------------------------------------------------------------------- -def makeTileDir(parDict): +def makeTileDir(parDict, writeToDisk = True): """Update this later. Revised version. Instead of making a MEF, makes a directory for each map and puts individual tile images there. We'll only need to edit preprocessMapDict to handle the difference. Why are we doing this? Just in case reading from the same file is gumming up MPI runs on hippo when using lots @@ -270,7 +271,7 @@ def makeTileDir(parDict): only the I (temperature) part and save that in the tileDir file. This will need changing if hunting for polarized sources... - Returns unfilteredMapsDictList [input for filterMaps], list of extension names + Returns unfilteredMapsDictList [input for filterMaps], list of extension names, dictionary of clip coords NOTE: Under MPI, this should only be called by the rank = 0 process @@ -281,6 +282,7 @@ def makeTileDir(parDict): # Some of this is rather clunky... unfilteredMapsDictList=[] + clipCoordsDict={} if parDict['makeTileDir'] == False: tileNames=[] for mapDict in parDict['unfilteredMaps']: @@ -289,6 +291,8 @@ def makeTileDir(parDict): if tileNames == []: for ext in img: tileNames.append(ext.name) + clipCoordsDict[ext.name]={'clippedSection': [0, ext.header['NAXIS1'], 0, ext.header['NAXIS2']], + 'header': ext.header} else: for ext in img: if ext.name not in tileNames: @@ -348,10 +352,11 @@ def makeTileDir(parDict): tileOverlapDeg=parDict['tileOverlapDeg'] for mapType, inMapFileName, outMapFileName in zip(mapTypeList, inFileNames, outFileNames): mapData=None # only load the map if we have to - os.makedirs(outMapFileName, exist_ok = True) + if writeToDisk == True: + os.makedirs(outMapFileName, exist_ok = True) for c, name in zip(coordsList, tileNames): tileFileName=outMapFileName+os.path.sep+name+".fits" - if os.path.exists(tileFileName) == True: + if os.path.exists(tileFileName) == True and writeToDisk == True: continue if mapData is None: #deckImg=pyfits.HDUList() @@ -395,6 +400,10 @@ def makeTileDir(parDict): count=count+1 if count > 100: raise Exception("Triggered stupid bug in makeTileDir... this should be fixed properly") + if name not in clipCoordsDict: + clipCoordsDict[name]={'clippedSection': clip['clippedSection'], 'header': clip['wcs'].header} + else: + assert(clipCoordsDict[name]['clippedSection'] == clip['clippedSection']) # Old #clip=astImages.clipUsingRADecCoords(mapData, wcs, ra1, ra0, dec0, dec1) print("... adding %s [%d, %d, %d, %d ; %d, %d] ..." % (name, ra1, ra0, dec0, dec1, ra0-ra1, dec1-dec0)) @@ -434,7 +443,8 @@ def makeTileDir(parDict): zapMask[clip_y0:clip_y1, clip_x0:clip_x1]=1. clip['data']=clip['data']*zapMask #astImages.saveFITS("test.fits", zapMask, clip['wcs']) - astImages.saveFITS(tileFileName, clip['data'], clip['wcs']) + if writeToDisk == True: + astImages.saveFITS(tileFileName, clip['data'], clip['wcs']) #hdu=pyfits.ImageHDU(data = clip['data'].copy(), header = header, name = name) #deckImg.append(hdu) #deckImg.writeto(outMapFileName) @@ -444,8 +454,8 @@ def makeTileDir(parDict): for key, outFileName in zip(mapTypeList, outFileNames): mapDict[key]=outFileName unfilteredMapsDictList.append(mapDict.copy()) - - return unfilteredMapsDictList, tileNames + + return unfilteredMapsDictList, tileNames, clipCoordsDict #------------------------------------------------------------------------------------------------------------- def shrinkWCS(origShape, origWCS, scaleFactor): @@ -1394,7 +1404,7 @@ def makeModelImage(shape, wcs, catalog, beamFileName, obsFreqGHz = None, GNFWPar parameters can be given here (see gnfw.py). override (dict, optional): Used only by cluster catalogs. If a dictionary containing keys {'M500', 'redshift'} is given, all objects in the model image are forced to have the - corresponding angular size. Used by :meth:`positionRecoveryTest`. + corresponding angular size. Used by :meth:`sourceInjectionTest`. applyPixelWindow (bool, optional): If True, apply the pixel window function to the map. Returns: @@ -1850,9 +1860,9 @@ def saveFITS(outputFileName, mapData, wcs, compressed = False): Args: outputFileName (str): Filename of output FITS image. - mapData (:obj:`np.ndarray`): Map data array - wcs (:obj:`astWCS.WCS`): Map WCS object - compressed (bool): If True, writes a compressed image + mapData (:obj:`np.ndarray`): Map data array. + wcs (:obj:`astWCS.WCS`): Map WCS object. + compressed (bool): If True, writes a compressed image. """ diff --git a/nemo/pipelines.py b/nemo/pipelines.py index 2e4f38d..e6c138e 100644 --- a/nemo/pipelines.py +++ b/nemo/pipelines.py @@ -350,8 +350,9 @@ def makeMockClusterCatalog(config, numMocksToMake = 1, combineMocks = False, wri H0=70. Om0=0.30 Ob0=0.05 - sigma_8=0.8 - mockSurvey=MockSurvey.MockSurvey(minMass, totalAreaDeg2, zMin, zMax, H0, Om0, Ob0, sigma_8, enableDrawSample = True) + sigma8=0.8 + ns=0.95 + mockSurvey=MockSurvey.MockSurvey(minMass, totalAreaDeg2, zMin, zMax, H0, Om0, Ob0, sigma8, ns, enableDrawSample = True) if verbose: print(">>> Making mock catalogs ...") catList=[] diff --git a/nemo/signals.py b/nemo/signals.py index 4cbf420..5d12254 100644 --- a/nemo/signals.py +++ b/nemo/signals.py @@ -587,7 +587,7 @@ def calcWeightedFRel(z, M500, fRelWeightsDict): return fRel #------------------------------------------------------------------------------------------------------------ -def calcFRel(z, M500, cosmoModel, obsFreqGHz = 148.0): +def calcFRel(z, M500, Ez, obsFreqGHz = 148.0): """Calculates relativistic correction to SZ effect at specified frequency, given z, M500 in MSun. This assumes the Arnaud et al. (2005) M-T relation, and applies formulae of Itoh et al. (1998) @@ -608,7 +608,8 @@ def calcFRel(z, M500, cosmoModel, obsFreqGHz = 148.0): A=3.84e14 B=1.71 #TkeV=5.*np.power(((cosmoModel.efunc(z)*M500)/A), 1/B) # HMF/Astropy - TkeV=5.*np.power(((cosmoModel.Ez(z)*M500)/A), 1/B) # Colossus + #TkeV=5.*np.power(((cosmoModel.Ez(z)*M500)/A), 1/B) # Colossus + TkeV=5.*np.power(((Ez*M500)/A), 1/B) TKelvin=TkeV*((1000*e)/kB) # Itoh et al. (1998) eqns. 2.25 - 2.30 diff --git a/nemo/startUp.py b/nemo/startUp.py index f3c832e..df59ca1 100644 --- a/nemo/startUp.py +++ b/nemo/startUp.py @@ -11,6 +11,8 @@ import copy import astropy.io.fits as pyfits from astLib import astWCS +from nemo import signals +import pickle import time #import IPython from . import maps @@ -139,17 +141,20 @@ class NemoConfig(object): """ - def __init__(self, configFileName, makeOutputDirs = True, setUpMaps = True, selFnDir = None, - calcSelFn = True, sourceInjectionTest = False, MPIEnabled = False, + def __init__(self, config, makeOutputDirs = True, setUpMaps = True, writeTileDir = True, + selFnDir = None, calcSelFn = True, sourceInjectionTest = False, MPIEnabled = False, divideTilesByProcesses = True, verbose = True, strictMPIExceptions = True): """Creates an object that keeps track of nemo's configuration, maps, output directories etc.. Args: - configFileName (:obj:`str`): Path to a nemo .yml configuration file. + config (:obj:`str` or :obj:`dict`): Either the path to a nemo .yml configuration + file, or a dictionary containing nemo configuration parameters. makeOutputDirs (:obj:`bool`, optional): If True, create output directories (where maps, catalogs are stored). setUpMaps (:obj:`bool`, optional): If True, set-up data structures for handling maps (inc. breaking into tiles if wanted). + writeTileDir (:obj:`bool`, optional): If True and set-up to break maps into tiles, write + the tiles to disk with a subdirectory for each tile. selFnDir (:obj:`str`, optional): Path to the selFn directory (use to override the default location). calcSelFn (:obj:`bool`, optional): Overrides the value given in the config file if True. @@ -192,8 +197,14 @@ def mpi_excepthook(v, t, tb): self.comm=None self.size=1 - self.parDict=parseConfigFile(configFileName) - self.configFileName=configFileName + if type(config) == str: + self.parDict=parseConfigFile(config) + self.configFileName=config + elif type(config) == dict: + self.parDict=config + self.configFileName='' + else: + raise Exception("'config' must be either a path to a .yml file, or a dictionary of parameters.") # Handle a couple of optional command-line args. These only override if set to True, otherwise ignored if calcSelFn == True: @@ -228,9 +239,9 @@ def mpi_excepthook(v, t, tb): if 'outputDir' in list(self.parDict.keys()): self.rootOutDir=os.path.abspath(self.parDict['outputDir']) else: - if configFileName.find(".yml") == -1: + if self.configFileName.find(".yml") == -1 and makeOutputDirs == True: raise Exception("File must have .yml extension") - self.rootOutDir=os.path.abspath(configFileName.replace(".yml", "")) + self.rootOutDir=os.path.abspath(self.configFileName.replace(".yml", "")) self.filteredMapsDir=self.rootOutDir+os.path.sep+"filteredMaps" self.diagnosticsDir=self.rootOutDir+os.path.sep+"diagnostics" self.selFnDir=self.rootOutDir+os.path.sep+"selFn" @@ -254,23 +265,38 @@ def mpi_excepthook(v, t, tb): if setUpMaps == True: if self.rank == 0: - maps.addAutoTileDefinitions(self.parDict, DS9RegionFileName = self.selFnDir+os.path.sep+"tiles.reg", - cacheFileName = self.selFnDir+os.path.sep+"tileDefinitions.yml") - bcastUnfilteredMapsDictList, bcastTileNames=maps.makeTileDir(self.parDict) + if writeTileDir == True: + DS9RegionFileName=self.selFnDir+os.path.sep+"tiles.reg" + cacheFileName=self.selFnDir+os.path.sep+"tileDefinitions.yml" + else: + DS9RegionFileName=None + cacheFileName=None + maps.addAutoTileDefinitions(self.parDict, DS9RegionFileName = DS9RegionFileName, + cacheFileName = cacheFileName) + bcastUnfilteredMapsDictList, bcastTileNames, tileCoordsDict=maps.makeTileDir(self.parDict, + writeToDisk = writeTileDir) bcastParDict=self.parDict + bcastTileCoordsDict=tileCoordsDict + if writeTileDir == True: + with open(self.selFnDir+os.path.sep+"tileCoordsDict.pkl", "wb") as pickleFile: + pickler=pickle.Pickler(pickleFile) + pickler.dump(tileCoordsDict) else: bcastUnfilteredMapsDictList=None bcastTileNames=None bcastParDict=None + bcastTileCoordsDict=None if self.MPIEnabled == True: #self.comm.barrier() bcastParDict=self.comm.bcast(bcastParDict, root = 0) bcastUnfilteredMapsDictList=self.comm.bcast(bcastUnfilteredMapsDictList, root = 0) bcastTileNames=self.comm.bcast(bcastTileNames, root = 0) + bcastTileCoordsDict=self.comm.bcast(bcastTileCoordsDict, root = 0) self.comm.barrier() self.unfilteredMapsDictList=bcastUnfilteredMapsDictList self.tileNames=bcastTileNames self.parDict=bcastParDict + self.tileCoordsDict=bcastTileCoordsDict # For when we want to test on only a subset of tiles if 'tileNameList' in list(self.parDict.keys()): newList=[] @@ -282,13 +308,18 @@ def mpi_excepthook(v, t, tb): self.tileNames=newList else: # If we don't have maps, we would still want the list of tile names - from . import signals - QFitFileName=self.selFnDir+os.path.sep+"QFit.fits" - if os.path.exists(QFitFileName) == True: - tckQFitDict=signals.loadQ(QFitFileName) + if os.path.exists(self.selFnDir+os.path.sep+"tileCoordsDict.pkl") == True: + with open(self.selFnDir+os.path.sep+"tileCoordsDict.pkl", "rb") as pickleFile: + unpickler=pickle.Unpickler(pickleFile) + tileCoordsDict=unpickler.load() + self.tileCoordsDict=tileCoordsDict + self.tileNames=list(tileCoordsDict.keys()) + # Loading via Q might be able to be retired? + elif os.path.exists(self.selFnDir+os.path.sep+"QFit.fits") == True: + tckQFitDict=signals.loadQ(self.selFnDir+os.path.sep+"QFit.fits") self.tileNames=list(tckQFitDict.keys()) else: - raise Exception("Need to get tile names from %s if setUpMaps is False - but file not found." % (QFitFileName)) + raise Exception("Need to get tile names from %s if setUpMaps is False - but file not found." % (self.selFnDir+os.path.sep+"QFit.fits")) # For convenience, keep the full list of tile names # (for when we don't need to be running in parallel - see, e.g., signals.getFRelWeights) diff --git a/setup.py b/setup.py index 04b4654..f7bcf2f 100644 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ long_description="""Map filtering and SZ cluster detection and characterization pipeline.""", packages=['nemo'], package_data={'nemo': ['data/*']}, - scripts=['bin/nemo', 'bin/nemoMass', 'bin/nemoSelFn', 'bin/nemoMock', 'bin/nemoCatalogCheck', 'bin/nemoCosmo'], + scripts=['bin/nemo', 'bin/nemoMass', 'bin/nemoSelFn', 'bin/nemoMock', 'bin/nemoCatalogCheck', 'bin/nemoCosmo', 'bin/nemoModel'], ext_modules=[Extension("nemoCython", ["nemo/nemoCython.pyx"], include_dirs=[numpy.get_include()])], install_requires=["astropy >= 3.2", "numpy >= 1.10", @@ -36,6 +36,7 @@ "cython", "PyYAML", "colossus", + "pyccl >= 2.1", "mpi4py", "colorcet >= 1.0", "mahotas >= 1.4"]