From 6aaa03774147fca5f787310a48ac79efbbc24b9a Mon Sep 17 00:00:00 2001 From: jalasker Date: Fri, 6 Oct 2023 08:44:55 -0700 Subject: [PATCH 001/297] First attempt at refactoring code based on `today` rather than a preordered list of tiles --- bin/InitializeAltMTLsParallel.py | 11 +- bin/MakeBitweights.py | 12 - bin/dateLoopAltMTLBugFix.sh | 56 ++ py/LSS/SV3/altmtltools.py | 859 +++++++++++++++++++++++++------ py/LSS/SV3/fatools.py | 5 +- 5 files changed, 773 insertions(+), 170 deletions(-) create mode 100755 bin/dateLoopAltMTLBugFix.sh diff --git a/bin/InitializeAltMTLsParallel.py b/bin/InitializeAltMTLsParallel.py index 189f82e20..2c704b88b 100755 --- a/bin/InitializeAltMTLsParallel.py +++ b/bin/InitializeAltMTLsParallel.py @@ -148,11 +148,20 @@ def procFunc(nproc): log.info('non sv survey') mtlprestr = '' - if os.path.exists(outputMTLDir + '/{0}/{2}/{3}mtl-{2}-hp-{1}.ecsv'.format(args.survey.lower(),HPList[-1], args.obscon.lower(), mtlprestr)): + if args.usetmp: + finalDir = args.finalDir.format(nproc) + else: + #TEST THIS + finalDir = outputMTLDir.format(nproc) + + if os.path.exists(finalDir + '/{0}/{2}/{3}mtl-{2}-hp-{1}.ecsv'.format(args.survey.lower(),HPList[-1], args.obscon.lower(), mtlprestr)): log.info('Alt MTL for last HP in list exists. Exiting script') log.info(outputMTLDir + '/{0}/{2}/{3}mtl-{2}-hp-{1}.ecsv'.format(args.survey.lower(),HPList[-1], args.obscon.lower(), mtlprestr)) return 42 for hpnum in HPList: + if os.path.exists(finalDir + '/{0}/{2}/{3}mtl-{2}-hp-{1}.ecsv'.format(args.survey.lower(),hpnum, args.obscon.lower(), mtlprestr)) and (not args.overwrite): + log.info('Alt MTL for HP {0:d} already exists. Set -ow or --overwrite to force regeneration. '.format(hpnum)) + continue log.info('hpnum = {0}'.format(hpnum)) exampleLedger = args.exampleLedgerBase + '/{0}/{2}/{3}mtl-{2}-hp-{1}.ecsv'.format(args.survey.lower(),hpnum, args.obscon.lower(), mtlprestr) log.info('exampleLedger = {0}'.format(exampleLedger)) diff --git a/bin/MakeBitweights.py b/bin/MakeBitweights.py index 62ab3eace..f85c795eb 100755 --- a/bin/MakeBitweights.py +++ b/bin/MakeBitweights.py @@ -42,25 +42,13 @@ HPList = np.array(open(args.HPListFile,'r').readlines()[0].split(',')).astype(int) print(HPList) -#mtlBaseDir = '/global/cscratch1/sd/jlasker/TestGeneralizedAltMTLScripts/alt_mtls_64dirs/Univ{0:03d}/' -#outdir = '/global/cscratch1/sd/jlasker/TestGeneralizedAltMTLScripts/alt_mtls_64dirs/' -#bw = makeBitweights(mtlBaseDir, ndirs = 64, hplist = hplist, debug = False) -#writeBitweights(mtlBaseDir, ndirs = 128, hplist = sv3dark, debug = False, outdir = outdir, survey = 'sv3', obscon = 'dark', allFiles = True) -#writeBitweights(mtlBaseDir, ndirs = 128, hplist = sv3dark, debug = False, outdir = outdir, survey = 'sv3', obscon = 'bright', allFiles = True) -#writeBitweights(mtlBaseDir, ndirs = None, hplist = None, debug = False, outdir = None, obscon = "dark", survey = 'sv3', overwrite = False, allFiles = False, splitByReal = False, splitNChunks = None) def procFunc(nproc): thisHPList = np.array_split(HPList, args.ProcPerNode)[nproc] for hp in thisHPList: writeBitweights(mtlBaseDir, ndirs = args.ndir, hplist = [hp], debug = args.debug, verbose = args.verbose, outdir = args.outdir, survey = args.survey, obscon = args.obscon.lower(), allFiles = False, overwrite = args.overwrite) -#if survey.lower() == 'main': -# for hp in HPList: - -#else: -# writeBitweights(mtlBaseDir, ndirs = args.ndir, hplist = HPList, debug = args.debug, verbose = args.verbose, outdir = outdir, survey = survey, obscon = obscon.lower(), allFiles = True, overwrite = overwrite) -#writeBitweights(mtlBaseDir, ndirs = ndir, hplist = HPList, debug = False, outdir = outdir, survey = survey, obscon = obscon.lower(), allFiles = True, overwrite = overwrite, splitByReal = splitByReal, splitNChunks = splitNChunks) try: NNodes = int(os.getenv('SLURM_JOB_NUM_NODES')) except: diff --git a/bin/dateLoopAltMTLBugFix.sh b/bin/dateLoopAltMTLBugFix.sh new file mode 100755 index 000000000..17b1ce0ec --- /dev/null +++ b/bin/dateLoopAltMTLBugFix.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +echo "All Arguments" +echo $@ + +NObsDates=$1 + +NNodes=$2 + +path2LSS=$3 + +CVal=$4 + +QVal=$5 + +argstring=${@:6} + +echo 'argstring' +echo "$argstring" + + +#for i in $(seq 0 1 $NObsDates) +#do +# echo " NextDate" +# echo "" +# echo "" +# echo "" +# echo $i +# echo "" +# echo "" +# echo "" +if [ $QVal = 'interactive' ]; +then + + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 03:00:00 --dependency=afterany:15958547 $path2LSS/runAltMTLParallel.py $argstring +fi +if [ $QVal = 'regular' ]; +then + + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:15958547 $path2LSS/runAltMTLParallel.py $argstring +fi + +if [ $QVal = 'debug' ]; +then + + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 00:15:00 --dependency=afterany:15958547 $path2LSS/runAltMTLParallel.py $argstring +fi +#retcode=$? +#qR=0 #DO NOT CHANGE. This prevents further restarts after the first if qR is set to 1 at top. +#if [ $retcode -ne 0 ]; then +# echo 'something went wrong' +# echo $retcode +# exit 1234 +#fi + +#done \ No newline at end of file diff --git a/py/LSS/SV3/altmtltools.py b/py/LSS/SV3/altmtltools.py index b7832ddb5..4ad650a6b 100644 --- a/py/LSS/SV3/altmtltools.py +++ b/py/LSS/SV3/altmtltools.py @@ -1,17 +1,23 @@ from desiutil.iers import freeze_iers freeze_iers() - +from time import time +import healpy as hp +import pickle from astropy.table import Table,join +import astropy +import astropy.io import astropy.io.fits as pf import desitarget from desitarget import io, mtl from desitarget.cuts import random_fraction_of_trues +import memory_profiler +from memory_profiler import profile from desitarget.mtl import get_mtl_dir, get_mtl_tile_file_name,get_mtl_ledger_format from desitarget.mtl import get_zcat_dir, get_ztile_file_name, tiles_to_be_processed from desitarget.mtl import make_zcat,survey_data_model,update_ledger, get_utc_date from desitarget.targets import initial_priority_numobs, decode_targetid from desitarget.targetmask import obsconditions, obsmask -from desitarget.targetmask import desi_mask, bgs_mask, mws_mask +from desitarget.targetmask import desi_mask, bgs_mask, mws_mask, zwarn_mask from desiutil.log import get_logger import fitsio from LSS.bitweights import pack_bitweights @@ -37,6 +43,9 @@ os.environ['DESIMODEL'] = '/global/common/software/desi/cori/desiconda/current/code/desimodel/master' +mtlformatdict = {"PARALLAX": '%16.8f', 'PMRA': '%16.8f', 'PMDEC': '%16.8f'} + + zcatdatamodel = np.array([], dtype=[ ('RA', '>f8'), ('DEC', '>f8'), ('TARGETID', '>i8'), ('NUMOBS', '>i4'), ('Z', '>f8'), ('ZWARN', '>i8'), ('ZTILEID', '>i4') @@ -122,11 +131,28 @@ def processTileFile(infile, outfile, startDate, endDate): origtf.write(outfile, overwrite = True, format = 'ascii.ecsv') return 0 +def uniqueTimestampFATimePairs(tileList, withFlag = False): + output = [] + for t in tileList: + if withFlag: + datepair = (t['ORIGTIMESTAMP'], t['FAMTLTIME'], t['REPROCFLAG']) + else: + datepair = (t['ORIGTIMESTAMP'], t['FAMTLTIME']) -def uniqueArchiveDateZDatePairs(tileList): + if datepair in output: + continue + else: + output.append(datepair) + + return output +def uniqueArchiveDateZDatePairs(tileList, withFlag = False): output = [] for t in tileList: - datepair = (t['ZDATE'], t['ARCHIVEDATE']) + if withFlag: + datepair = (t['ZDATE'], t['ARCHIVEDATE'], t['REPROCFLAG']) + else: + datepair = (t['ZDATE'], t['ARCHIVEDATE']) + if datepair in output: continue else: @@ -207,7 +233,8 @@ def findTwin(altFiber, origFiberList, survey = 'sv3', obscon = 'dark'): ''' -def createFAmap(FAReal, FAAlt, TargAlt = None, changeFiberOpt = None, debug = False, verbose = False): +def createFAmap(FAReal, FAAlt, TargAlt = None, changeFiberOpt = None, debug = False, + verbose = False, mock = False, mockTrueZKey = None): # Options for 'changeFiberOpt': # None: do nothing different to version 1 # AllTwins: Find a twin fiber with a target of the @@ -339,6 +366,108 @@ def checkMTLChanged(MTLFile1, MTLFile2): print('Number targets with different SUBPRIORITY') print(NDiff3) +def makeTileTrackerFN(dirName, survey, obscon): + return dirName + '/{0}survey-{1}obscon-TileTracker.txt'.format(survey, obscon.upper()) + +def makeTileTracker(altMTLDir, survey = 'main', obscon = 'dark', retroactive = False, + overwrite = False, startDate = None, endDate = None): + # JL altMTLDir includes the UnivNNN + log.info('generating tile tracker file') + outputFN = makeTileTrackerFN(altMTLDir,survey, obscon) + if os.path.isfile(outputFN) and (not overwrite): + log.warning('Output File {0} already exists'.format(outputFN)) + log.warning('returning to AMTL initialization') + return 0 + if (startDate is None) or (startDate == ''): + startDate = 19990101 + if (endDate is None) or (endDate == ''): + endDate = 21991231 + startDate = int(startDate) + endDate = int(endDate) + surveyOpsTrunkDir = '/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/' + origMTLDoneTiles = Table.read(surveyOpsTrunkDir + '/mtl/mtl-done-tiles.ecsv') + #amtlTileFN = origMTLDir + '/mtl-done-tiles.ecsv' + origMTLDoneOverrides = Table.read(surveyOpsTrunkDir + '/mtl/mtl-done-overrides.ecsv') + #amtlOverrideFN = origMTLDir + '/mtl-done-overrides.ecsv' + origMTLTilesSpecStatus = Table.read(surveyOpsTrunkDir + '/ops/tiles-specstatus.ecsv') + ''' + if os.path.isfile(amtlTileFN): + altMTLDoneTiles = Table.read(amtlTileFN) + else: + altMTLDoneTiles = Table() + if os.path.isfile(amtlOverrideFN): + altMTLDoneOverrides = Table.read(amtlOverrideFN) + else: + altMTLDoneOverrides = Table() + ''' + TrimmedTiles = origMTLTilesSpecStatus[np.char.lower(origMTLTilesSpecStatus['SURVEY']) == survey.lower()] + TrimmedTiles = TrimmedTiles[np.char.lower(TrimmedTiles['FAPRGRM']) == obscon.lower()] + + TrimmedTiles.sort(keys = ['ARCHIVEDATE', 'LASTNIGHT']) + #TrimmedTiles.sort(keys = ['TIMESTAMP', 'LASTNIGHT']) + TrimmedTileIDs = TrimmedTiles['TILEID'] + #origMTLDoneTiles.sort(keys = ['ARCHIVEDATE', 'ZDATE']) + origMTLDoneTiles.sort(keys = ['TIMESTAMP', 'ZDATE']) + TILEID, ARCHIVEDATE, ZDATE,FAMTLDATE, ALTARCHIVEDATE, ORIGTIMESTAMP, REPROCFLAG, OVERRIDEFLAG, OBSCONS, SURVEYS = [],[],[],[],[],[],[],[],[],[] + for omtlDoneTile in origMTLDoneTiles: + #TILEID TIMESTAMP VERSION PROGRAM ZDATE ARCHIVEDATE + thisTileID = omtlDoneTile['TILEID'] + if not (thisTileID in TrimmedTileIDs): + continue + thists = str(thisTileID).zfill(6) + FAOrigName = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+thists[:3]+'/fiberassign-'+thists+'.fits.gz' + fhtOrig = fitsio.read_header(FAOrigName) + thisFAMTLTime = fhtOrig['MTLTIME'] + thisVersion = omtlDoneTile['VERSION'] + thisProgram = omtlDoneTile['PROGRAM'] + thisZDate = omtlDoneTile['ZDATE'] + thisArchiveDate = omtlDoneTile['ARCHIVEDATE'] + thisOrigTimestamp = omtlDoneTile['TIMESTAMP'] + if thisArchiveDate < startDate: + thisAltArchiveDate = thisArchiveDate + elif thisArchiveDate > endDate: + continue + else: + thisAltArchiveDate = None + + + thisReprocFlag = thisTileID in TILEID + #thisOverrideFlag = thisTileID in OverrideTileID + TILEID.append(thisTileID) + ARCHIVEDATE.append(thisArchiveDate) + ZDATE.append(thisZDate) + FAMTLDATE.append(thisFAMTLTime) + ALTARCHIVEDATE.append(thisAltArchiveDate) + ORIGTIMESTAMP.append(thisOrigTimestamp) + REPROCFLAG.append(thisReprocFlag) + OVERRIDEFLAG.append(None) + OBSCONS.append(obscon) + SURVEYS.append(survey) + + + + TilesToProcessNearlyInOrder = [TILEID, ARCHIVEDATE, ZDATE, FAMTLDATE, ALTARCHIVEDATE,ORIGTIMESTAMP, REPROCFLAG, OVERRIDEFLAG, OBSCONS, SURVEYS] + + t = Table(TilesToProcessNearlyInOrder, + names=('TILEID', 'ARCHIVEDATE', 'ZDATE', 'FAMTLTIME', 'ALTARCHIVEDATE', 'ORIGTIMESTAMP', 'REPROCFLAG', 'OVERRIDEFLAG', 'OBSCON', 'SURVEY'), + meta={'Name': 'AltMTLTileTracker', 'StartDate': startDate, 'EndDate': endDate}) + t.sort(['ORIGTIMESTAMP','FAMTLTIME']) + t.write(outputFN, format='ascii.ecsv') + return 1 + + + +def tiles_to_be_processed_alt(altmtldir, obscon = 'dark', survey = 'main'): + TileTrackerFN = makeTileTrackerFN(altmtldir, survey, obscon) + TileTracker = Table.read(TileTrackerFN, format = 'ascii.ecsv') + returnTiles = TileTracker[(TileTracker['OBSCON'] == obscon.upper()) | (TileTracker['OBSCON'] == obscon.lower())] + returnTiles = returnTiles[(returnTiles['SURVEY'] == survey.upper()) | (returnTiles['SURVEY'] == survey.lower())] + + returnTiles = returnTiles[returnTiles['ALTARCHIVEDATE'] == None] + + + return returnTiles + def trimToMTL(notMTL, MTL, debug = False, verbose = False): # JL trims a target file, which possesses all of the information in an MTL, down # JL to the columns allowed in the MTL data model. @@ -358,7 +487,7 @@ def trimToMTL(notMTL, MTL, debug = False, verbose = False): return notMTL - +#@profile def initializeAlternateMTLs(initMTL, outputMTL, nAlt = 2, genSubset = None, seed = 314159, obscon = 'DARK', survey = 'sv3', saveBackup = False, overwrite = False, startDate = None, endDate = None, ztilefile = '/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/ops/tiles-specstatus.ecsv', @@ -485,9 +614,14 @@ def initializeAlternateMTLs(initMTL, outputMTL, nAlt = 2, genSubset = None, seed log.info('pre creating output dir') if not os.path.exists(outputMTLDir): os.makedirs(outputMTLDir) - if not os.path.isfile(outputMTLDir + ztilefn): + if not os.path.isfile(finalDir.format(n) + '/' + ztilefn): processTileFile(ztilefile, outputMTLDir + ztilefn, startDate, endDate) #os.symlink(ztilefile, outputMTLDir + ztilefn) + thisTileTrackerFN = makeTileTrackerFN(finalDir.format(n), survey, obscon) + log.info('path to tiletracker = {0}'.format(thisTileTrackerFN)) + if not os.path.isfile(thisTileTrackerFN): + makeTileTracker(outputMTLDir, survey = survey, obscon = obscon, retroactive = False, + overwrite = False, startDate = startDate, endDate = endDate) subpriors = initialentries['SUBPRIORITY'] if (not reproducing) and shuffleSubpriorities: @@ -769,7 +903,7 @@ def quickRestartFxn(ndirs = 1, altmtlbasedir = None, survey = 'sv3', obscon = 'd restartMTLs = ls(altmtldirRestart +'/' + survey + '/' + obscon + '/' + '/orig/*') for fn in restartMTLs: copyfile(fn, altmtldirRestart +'/' + survey + '/' + obscon + '/' + fn.split('/')[-1]) - +#@profile def loop_alt_ledger(obscon, survey='sv3', zcatdir=None, mtldir=None, altmtlbasedir=None, ndirs = 3, numobs_from_ledger=True, secondary=False, singletile = None, singleDate = None, debugOrig = False, @@ -897,7 +1031,12 @@ def loop_alt_ledger(obscon, survey='sv3', zcatdir=None, mtldir=None, althpdirname = io.find_target_files(altmtldir, flavor="mtl", resolve=resolve, survey=survey, obscon=obscon, ender=form) # ADM grab an array of tiles that are yet to be processed. - tiles = tiles_to_be_processed(zcatdir, altmtltilefn, obscon, survey) + # JL use modified function to automatically switch between + # JL original processing and reprocessing using TileTracker + #tiles = tiles_to_be_processed(zcatdir, altmtltilefn, obscon, survey) + tiles = tiles_to_be_processed_alt(altmtldir, obscon, survey) + #names=('TILEID', 'ORIGARCHIVEDATE', 'ZDATE', 'ALTARCHIVEDATE', 'REPROCFLAG', 'OVERRIDEFLAG', 'OBSCON', 'SURVEY'), + # ADM stop if there are no tiles to process. if len(tiles) == 0: if (not multiproc) and (n != ndirs - 1): @@ -910,11 +1049,18 @@ def loop_alt_ledger(obscon, survey='sv3', zcatdir=None, mtldir=None, if not (singletile is None): tiles = tiles[tiles['TILEID'] == singletile] try: - sorttiles = np.sort(tiles, order = ['ARCHIVEDATE', 'ZDATE']) + #sorttiles = np.sort(tiles, order = ['ARCHIVEDATE', 'ZDATE']) + #tiles.sort(keys = ['ARCHIVEDATE', 'ZDATE']) + tiles.sort(keys = ['ORIGTIMESTAMP', 'FAMTLTIME']) + sorttiles = tiles except: - log.warn('sorting tiles on ARCHIVEDATE failed.') + log.info(len(tiles)) + log.info(tiles.dtype) + #log.warn('sorting tiles on ARCHIVEDATE, ZDATE failed.') + log.warn('sorting tiles on ARCHIVEDATE, ZDATE failed.') log.warn('currently we are aborting, but this may') - log.warn('change in the future to switching to order by ZDATE') + #log.warn('change in the future to switching to order by ZDATE') + log.warn('change in the future.')#' to switching to order by ZDATE') raise NotImplementedError('This pipeline does not currently handle tile lists with an unsortable ARCHIVEDATE or without any ARCHIVEDATE whatsoever. Exiting.') #sorttiles = np.sort(tiles, order = 'ZDATE') if testDoubleDate: @@ -924,29 +1070,41 @@ def loop_alt_ledger(obscon, survey='sv3', zcatdir=None, mtldir=None, log.info(tiles[tiles['TILEID' ] == 314]) log.info(tiles[tiles['TILEID' ] == 315]) tiles = tiles[cond1 | cond2 ] - datepairs = uniqueArchiveDateZDatePairs(sorttiles) - + datepairs = uniqueTimestampFATimePairs(sorttiles, withFlag = True) #if singleDate: # dates = np.sort(np.unique(sorttiles['ARCHIVEDATE'])) - if debug: - log.info('first and last 10 hopefully datepairs hopefully in order') - log.info(datepairs[0:10]) - log.info(datepairs[-10:-1]) - log.info('first and last 10 hopefully zdates hopefully not in order') - log.info(sorttiles['ZDATE'][0:10]) - log.info(sorttiles['ZDATE'][-10:-1]) + #if debug: + # log.info('first and last 10 hopefully datepairs hopefully in order') + # log.info(datepairs[0:10]) + # log.info(datepairs[-10:-1]) + # log.info('first and last 10 hopefully (orig)zdates hopefully not in order') + # + # #log.info(sorttiles['ZDATE'][0:10]) + # #log.info(sorttiles['ZDATE'][-10:-1]) #else: - for zd,ad in datepairs: - dateTiles = sorttiles[sorttiles['ARCHIVEDATE'] == ad] + #for zd,ad,reprocFlag in datepairs: + for ots,famtlt,reprocFlag in datepairs: #zdates = np.sort(np.unique(dateTiles['ZDATE'])) - dateTiles = dateTiles[dateTiles['ZDATE'] == zd] + log.info(len(sorttiles)) + log.info(sorttiles.dtype) + try: + dateTiles = sorttiles[sorttiles['ORIGTIMESTAMP'] == ots] + dateTiles = dateTiles[dateTiles['FAMTLTIME'] == famtlt] + dateTiles = dateTiles[dateTiles['REPROCFLAG'] == reprocFlag] + except: + dateTiles = sorttiles[sorttiles['ORIGTIMESTAMP'] == ots] + dateTiles = dateTiles[dateTiles['FAMTLTIME'] == famtlt] + if debug: - log.info('inside dateLoop. ZDate is {0}'.format(zd)) - log.info('inside dateLoop. archiveDate is {0}'.format(ad)) + log.info('inside dateLoop. ORIGTIMESTAMP is {0}'.format(ots)) + log.info('inside dateLoop. FAMTLTIME is {0}'.format(famtlt)) log.info('singleDate = {0}'.format(singleDate)) - assert(len(np.unique(dateTiles['ARCHIVEDATE'])) == 1) - assert(len(np.unique(dateTiles['ZDATE'])) == 1) + + + + assert(len(np.unique(dateTiles['ORIGTIMESTAMP'])) == 1) + assert(len(np.unique(dateTiles['FAMTLTIME'])) == 1) OrigFAs = [] AltFAs = [] AltFAs2 = [] @@ -968,136 +1126,218 @@ def loop_alt_ledger(obscon, survey='sv3', zcatdir=None, mtldir=None, #JL THIS SHOULD ONLY BE USED IN DIRECTORY NAMES. THE ACTUAL RUNDATE VALUE SHOULD INCLUDE A TIME fadate = ''.join(fadate.split('T')[0].split('-')) fbadirbase = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/' - if getosubp: - #JL When we are trying to reproduce a prior survey and/or debug, create a separate - #JL directory in fbadirbase + /orig/ to store the reproduced FA files. - FAAltName = fbadirbase + '/orig/fba-' + ts+ '.fits' - fbadir = fbadirbase + '/orig/' - else: - - #JL For normal "alternate" operations, store the fiber assignmens - #JL in the fbadirbase directory. - - FAAltName = fbadirbase + '/fba-' + ts+ '.fits' - fbadir = fbadirbase - - #JL Sometimes fiberassign leaves around temp files if a run is aborted. - #JL This command removes those temp files to prevent endless crashes. - if os.path.exists(FAAltName + '.tmp'): - os.remove(FAAltName + '.tmp') - #JL If the alternate fiberassignment was already performed, don't repeat it - #JL Unless the 'redoFA' flag is set to true - if redoFA or (not os.path.exists(FAAltName)): - if verbose and os.path.exists(FAAltName): - log.info('repeating fiberassignment') - elif verbose: - log.info('fiberassignment not found, running fiberassignment') - if verbose: - log.info(ts) - log.info(altmtldir + survey.lower()) - log.info(fbadir) - log.info(getosubp) - log.info(redoFA) - if getosubp and verbose: - log.info('checking contents of fiberassign directory before calling get_fba_from_newmtl') - log.info(glob.glob(fbadir + '/*' )) - get_fba_fromnewmtl(ts,mtldir=altmtldir + survey.lower() + '/',outdir=fbadirbase, getosubp = getosubp, overwriteFA = redoFA, verbose = verbose, mock = mock)#, targets = targets) - command_run = (['bash', fbadir + 'fa-' + ts + '.sh']) - if verbose: - log.info('fa command_run') - log.info(command_run) - result = subprocess.run(command_run, capture_output = True) - else: - log.info('not repeating fiberassignment') - OrigFAs.append(pf.open(FAOrigName)[1].data) - AltFAs.append(pf.open(FAAltName)[1].data) - AltFAs2.append(pf.open(FAAltName)[2].data) - TSs.append(ts) - fadates.append(fadate) - # ADM create the catalog of updated redshifts. - zcat = make_zcat(zcatdir, dateTiles, obscon, survey) - # ADM insist that for an MTL loop with real observations, the zcat - # ADM must conform to the data model. In particular, it must include - # ADM ZTILEID, and other columns addes for the Main Survey. These - # ADM columns may not be needed for non-ledger simulations. - # ADM Note that the data model differs with survey type. - zcatdm = survey_data_model(zcatdatamodel, survey=survey) - if zcat.dtype.descr != zcatdm.dtype.descr: - msg = "zcat data model must be {} not {}!".format( - zcatdm.dtype.descr, zcat.dtype.descr) - log.critical(msg) - raise ValueError(msg) - # ADM useful to know how many targets were updated. - _, _, _, _, sky, _ = decode_targetid(zcat["TARGETID"]) - ntargs, nsky = np.sum(sky == 0), np.sum(sky) - msg = "Update state for {} targets".format(ntargs) - msg += " (the zcats also contain {} skies with +ve TARGETIDs)".format(nsky) - log.info(msg) - - A2RMap = {} - R2AMap = {} - for ofa, afa, afa2 in zip (OrigFAs, AltFAs, AltFAs2): - if changeFiberOpt is None: - #if debug: - # tempsortofa = np.sort(ofa, order = 'FIBER') - # tempsortafa = np.sort(afa, order = 'FIBER') - # - # - # tempsortofa = np.sort(ofa, order = 'TARGETID') - # tempsortafa = np.sort(afa, order = 'TARGETID') - - A2RMapTemp, R2AMapTemp = createFAmap(ofa, afa, changeFiberOpt = changeFiberOpt) + log.info(t['REPROCFLAG']) + log.info(type(t['REPROCFLAG'])) + log.info(t["REPROCFLAG"] == True) + log.info(ts) + if t['REPROCFLAG']: + + log.info('reproc flag true') + if getosubp: + FAMapName = fbadirbase + '/orig/famap-' + ts + '.pickle' + else: + FAMapName = fbadirbase + '/famap-' + ts + '.pickle' + with open(FAMapName,'rb') as fl: + (A2RMap, R2AMap) = pickle.load(fl,fix_imports = True) + + #zcat = make_zcat(zcatdir, dateTiles, obscon, survey) + zcat = make_zcat(zcatdir, [t], obscon, survey, allow_overlaps = True) + log.info('ts = {0}'.format(ts)) + altZCat = makeAlternateZCat(zcat, R2AMap, A2RMap) + reprocess_alt_ledger(altmtldir + '/{0}/{1}/'.format(survey.lower(), obscon.lower()), altZCat, fbadirbase, t, obscon=obscon) + if verbose or debug: + log.info('if main, should sleep 1 second') + thisUTCDate = get_utc_date(survey=survey) + if survey == "main": + sleep(1) + if verbose or debug: + log.info('has slept one second') + dateTiles["TIMESTAMP"] = thisUTCDate + if verbose or debug: + log.info('now writing to amtl_tile_tracker') + write_amtl_tile_tracker(altmtldir, [t], thisUTCDate, obscon = obscon, survey = survey) else: - raise NotImplementedError('changeFiberOpt has not yet been implemented') - - FAOrigName = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz' - - fbadirbase = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/' + log.info('ts = {0}'.format(ts)) + log.info('t[reprocflag] (should be false if here)= {0}'.format(t['REPROCFLAG'])) + + #if str(ts) == str(3414).zfill(6): + # raise ValueError('Not only do I create the backup here but I also need to fix the reproc flag') + if getosubp: - FAAltName = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/orig/fba-' + ts+ '.fits' - fbadir = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/orig/' + #JL When we are trying to reproduce a prior survey and/or debug, create a separate + #JL directory in fbadirbase + /orig/ to store the reproduced FA files. + FAAltName = fbadirbase + '/orig/fba-' + ts+ '.fits' + #FAMapName = fbadirbase + '/orig/famap-' + ts + '.pickle' + fbadir = fbadirbase + '/orig/' else: - FAAltName = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/fba-' + ts+ '.fits' - fbadir = fbadirbase - - A2RMapTemp, R2AMapTemp = createFAmap(ofa, afa, TargAlt = afa2, changeFiberOpt = changeFiberOpt) - A2RMap.update(A2RMapTemp) - R2AMap.update(R2AMapTemp) - - altZCat = makeAlternateZCat(zcat, R2AMap, A2RMap) - - + #JL For normal "alternate" operations, store the fiber assignments + #JL in the fbadirbase directory. - # ADM update the appropriate ledger. - if mock: - if targets is None: - raise ValueError('If processing mocks, you MUST specify a target file') - - update_ledger(althpdirname, altZCat, obscon=obscon.upper(), - numobs_from_ledger=numobs_from_ledger, targets = targets) - elif targets is None: - update_ledger(althpdirname, altZCat, obscon=obscon.upper(), - numobs_from_ledger=numobs_from_ledger) - else: - update_ledger(althpdirname, altZCat, obscon=obscon.upper(), - numobs_from_ledger=numobs_from_ledger, targets = targets) - if verbose or debug: - log.info('if main, should sleep 1 second') - if survey == "main": - sleep(1) - if verbose or debug: - log.info('has slept one second') - dateTiles["TIMESTAMP"] = get_utc_date(survey=survey) - if verbose or debug: - log.info('now writing to mtl_tile_file') - io.write_mtl_tile_file(altmtltilefn,dateTiles) - if verbose or debug: - log.info('has written to mtl_tile_file') - if singleDate: - #return 1 - return althpdirname, altmtltilefn, ztilefn, tiles - return althpdirname, altmtltilefn, ztilefn, tiles + FAAltName = fbadirbase + '/fba-' + ts+ '.fits' + #FAMapName = fbadirbase + '/famap-' + ts + '.pickle' + fbadir = fbadirbase + log.info('FAOrigName = {0}'.format(FAOrigName)) + + log.info('FAAltName = {0}'.format(FAAltName)) + + #JL Sometimes fiberassign leaves around temp files if a run is aborted. + #JL This command removes those temp files to prevent endless crashes. + if os.path.exists(FAAltName + '.tmp'): + os.remove(FAAltName + '.tmp') + #JL If the alternate fiberassignment was already performed, don't repeat it + #JL Unless the 'redoFA' flag is set to true + log.info('redoFA = {0}'.format(redoFA)) + log.info('FAAltName = {0}'.format(FAAltName)) + + if redoFA or (not os.path.exists(FAAltName)): + if verbose and os.path.exists(FAAltName): + log.info('repeating fiberassignment') + elif verbose: + log.info('fiberassignment not found, running fiberassignment') + if verbose: + log.info(ts) + log.info(altmtldir + survey.lower()) + log.info(fbadir) + log.info(getosubp) + log.info(redoFA) + if getosubp and verbose: + log.info('checking contents of fiberassign directory before calling get_fba_from_newmtl') + log.info(glob.glob(fbadir + '/*' )) + get_fba_fromnewmtl(ts,mtldir=altmtldir + survey.lower() + '/',outdir=fbadirbase, getosubp = getosubp, overwriteFA = redoFA, verbose = verbose, mock = mock)#, targets = targets) + command_run = (['bash', fbadir + 'fa-' + ts + '.sh']) + if verbose: + log.info('fa command_run') + log.info(command_run) + result = subprocess.run(command_run, capture_output = True) + else: + log.info('not repeating fiberassignment') + log.info('adding fiberassignments to arrays') + OrigFAs.append(pf.open(FAOrigName)[1].data) + AltFAs.append(pf.open(FAAltName)[1].data) + AltFAs2.append(pf.open(FAAltName)[2].data) + TSs.append(ts) + fadates.append(fadate) + # ADM create the catalog of updated redshifts. + log.info('making zcats') + zcat = make_zcat(zcatdir, [t], obscon, survey) + # ADM insist that for an MTL loop with real observations, the zcat + # ADM must conform to the data model. In particular, it must include + # ADM ZTILEID, and other columns addes for the Main Survey. These + # ADM columns may not be needed for non-ledger simulations. + # ADM Note that the data model differs with survey type. + zcatdm = survey_data_model(zcatdatamodel, survey=survey) + if zcat.dtype.descr != zcatdm.dtype.descr: + msg = "zcat data model must be {} not {}!".format( + zcatdm.dtype.descr, zcat.dtype.descr) + log.critical(msg) + raise ValueError(msg) + # ADM useful to know how many targets were updated. + _, _, _, _, sky, _ = decode_targetid(zcat["TARGETID"]) + ntargs, nsky = np.sum(sky == 0), np.sum(sky) + msg = "Update state for {} targets".format(ntargs) + msg += " (the zcats also contain {} skies with +ve TARGETIDs)".format(nsky) + log.info(msg) + + A2RMap = {} + R2AMap = {} + log.info('beginning loop through FA files') + for ofa, afa, afa2, ts in zip (OrigFAs, AltFAs, AltFAs2, TSs): + log.info('ts = {0}'.format(ts)) + if changeFiberOpt is None: + #if debug: + # tempsortofa = np.sort(ofa, order = 'FIBER') + # tempsortafa = np.sort(afa, order = 'FIBER') + # + # + # tempsortofa = np.sort(ofa, order = 'TARGETID') + # tempsortafa = np.sort(afa, order = 'TARGETID') + + A2RMapTemp, R2AMapTemp = createFAmap(ofa, afa, changeFiberOpt = changeFiberOpt) + else: + raise NotImplementedError('changeFiberOpt has not yet been implemented') + + #FAOrigName = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz' + + A2RMapTemp, R2AMapTemp = createFAmap(ofa, afa, TargAlt = afa2, changeFiberOpt = changeFiberOpt) + + fbadirbase = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/' + if getosubp: + FAAltName = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/orig/fba-' + ts+ '.fits' + FAMapName = fbadirbase + '/orig/famap-' + ts + '.pickle' + fbadir = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/orig/' + else: + + FAAltName = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/fba-' + ts+ '.fits' + FAMapName = fbadirbase + '/famap-' + ts + '.pickle' + fbadir = fbadirbase + + + if debug: + log.info('ts = {0}'.format(ts)) + log.info('FAMapName = {0}'.format(FAMapName)) + #JL This line (updating rather than checking for already existing keys) only works if + #JL there are only single passes of tiles per loop iteration. This is the case, but may not + #JL always be true. + log.info('updating map dicts') + A2RMap.update(A2RMapTemp) + R2AMap.update(R2AMapTemp) + + if redoFA or (not (os.path.isfile(FAMapName))): + log.info('dumping out fiber map to pickle file') + with open(FAMapName, 'wb') as handle: + pickle.dump((A2RMapTemp, R2AMapTemp), handle, protocol=pickle.HIGHEST_PROTOCOL) + log.info('---') + log.info('---') + log.info('---') + log.info('---') + log.info('---') + log.info('unique keys in R2AMap = {0:d}'.format(np.unique(R2AMap.keys()).shape[0])) + log.info('---') + log.info('---') + log.info('---') + log.info('---') + log.info('---') + log.info('---') + log.info('---') + + altZCat = makeAlternateZCat(zcat, R2AMap, A2RMap) + + + + # ADM update the appropriate ledger. + if mock: + if targets is None: + raise ValueError('If processing mocks, you MUST specify a target file') + + update_ledger(althpdirname, altZCat, obscon=obscon.upper(), + numobs_from_ledger=numobs_from_ledger, targets = targets) + elif targets is None: + update_ledger(althpdirname, altZCat, obscon=obscon.upper(), + numobs_from_ledger=numobs_from_ledger) + else: + update_ledger(althpdirname, altZCat, obscon=obscon.upper(), + numobs_from_ledger=numobs_from_ledger, targets = targets) + if verbose or debug: + log.info('if main, should sleep 1 second') + thisUTCDate = get_utc_date(survey=survey) + if survey == "main": + sleep(1) + if verbose or debug: + log.info('has slept one second') + dateTiles["TIMESTAMP"] = thisUTCDate + if verbose or debug: + log.info('now writing to amtl_tile_tracker') + #io.write_mtl_tile_file(altmtltilefn,dateTiles) + #write_amtl_tile_tracker(altmtldir, dateTiles, thisUTCDate, obscon = obscon, survey = survey) + log.info('changes are being registered') + write_amtl_tile_tracker(altmtldir, [t], thisUTCDate, obscon = obscon, survey = survey) + if verbose or debug: + log.info('has written to amtl_tile_tracker') + if singleDate: + #return 1 + return althpdirname, altmtltilefn, ztilefn, tiles + return althpdirname, altmtltilefn, ztilefn, tiles def plotMTLProb(mtlBaseDir, ndirs = 10, hplist = None, obscon = 'dark', survey = 'sv3', outFileName = None, outFileType = '.png', jupyter = False, debug = False, verbose = False): """Plots probability that targets were observed among {ndirs} alternate realizations @@ -1180,7 +1420,7 @@ def plotMTLProb(mtlBaseDir, ndirs = 10, hplist = None, obscon = 'dark', survey = if not jupyter: plt.close() - +#@profile def makeBitweights(mtlBaseDir, ndirs = 64, hplist = None, obscon = 'dark', survey = 'sv3', debug = False, obsprob = False, splitByReal = False, verbose = False): """Takes a set of {ndirs} realizations of DESI/SV3 and converts their MTLs into bitweights and an optional PROBOBS, the probability that the target was observed over the realizations @@ -1459,4 +1699,313 @@ def writeBitweights(mtlBaseDir, ndirs = None, hplist = None, debug = False, outd data.write(fn, overwrite = overwrite) - \ No newline at end of file +def reprocess_alt_ledger(hpdirname, zcat, fbadirbase, tile, obscon="DARK"): + """ + Reprocess HEALPixel-split ledgers for targets with new redshifts. + + Parameters + ---------- + hpdirname : :class:`str` + Full path to a directory containing an MTL ledger that has been + partitioned by HEALPixel (i.e. as made by `make_ledger`). + zcat : :class:`~astropy.table.Table`, optional + Redshift catalog table with columns ``TARGETID``, ``NUMOBS``, + ``Z``, ``ZWARN``, ``ZTILEID``, and ``msaddcols`` at the top of + the code for the Main Survey. + obscon : :class:`str`, optional, defaults to "DARK" + A string matching ONE obscondition in the desitarget bitmask yaml + file (i.e. in `desitarget.targetmask.obsconditions`), e.g. "DARK" + Governs how priorities are set using "obsconditions". Basically a + check on whether the files in `hpdirname` are as expected. + + Returns + ------- + :class:`dict` + A dictionary where the keys are the integer TILEIDs and the values + are the TIMESTAMP at which that tile was reprocessed. + + """ + + #if getosubp: + # FAMapName = fbadirbase + '/orig/famap-' + ts + '.pickle' + #else: + # FAMapName = fbadirbase + '/famap-' + ts + '.pickle' + #with open(FAMapName,'rb') as fl: + # (A2RMapTemp, R2AMapTemp) = pickle.load(fl,fix_imports = True) + t0 = time() + log.info("Reprocessing based on zcat with {} entries...t={:.1f}s" + .format(len(zcat), time()-t0)) + + # ADM the output dictionary. + timedict = {} + + # ADM bits that correspond to a "bad" observation in the zwarn_mask. + Mxbad = "BAD_SPECQA|BAD_PETALQA|NODATA" + + # ADM find the general format for the ledger files in `hpdirname`. + # ADM also returning the obsconditions. + fileform, oc = io.find_mtl_file_format_from_header(hpdirname, returnoc=True) + # ADM also find the format for any associated override ledgers. + overrideff = io.find_mtl_file_format_from_header(hpdirname, + forceoverride=True) + + # ADM check the obscondition is as expected. + if obscon != oc: + msg = "File is type {} but requested behavior is {}".format(oc, obscon) + log.critical(msg) + raise RuntimeError(msg) + + # ADM check the zcat has unique TARGETID/TILEID combinations. + tiletarg = [str(tt["ZTILEID"]) + "-" + str(tt["TARGETID"]) for tt in zcat] + if len(set(tiletarg)) != len(tiletarg): + msg = "Passed zcat does NOT have unique TARGETID/TILEID combinations!!!" + log.critical(msg) + raise RuntimeError(msg) + + # ADM record the set of tiles that are being reprocessed. + reproctiles = set(zcat["ZTILEID"]) + + # ADM read ALL targets from the relevant ledgers. + log.info("Reading (all instances of) targets for {} tiles...t={:.1f}s" + .format(len(reproctiles), time()-t0)) + nside = desitarget.mtl._get_mtl_nside() + theta, phi = np.radians(90-zcat["DEC"]), np.radians(zcat["RA"]) + pixnum = hp.ang2pix(nside, theta, phi, nest=True) + pixnum = list(set(pixnum)) + targets = io.read_mtl_in_hp(hpdirname, nside, pixnum, unique=False) + + # ADM remove OVERRIDE entries, which should never need reprocessing. + targets, _ = desitarget.mtl.remove_overrides(targets) + + # ADM sort by TIMESTAMP to ensure tiles are listed chronologically. + targets = targets[np.argsort(targets["TIMESTAMP"])] + + # ADM for speed, we only need to work with targets with a zcat entry. + ntargs = len(targets) + nuniq = len(set(targets["TARGETID"])) + log.info("Read {} targets with {} unique TARGETIDs...t={:.1f}s" + .format(ntargs, nuniq, time()-t0)) + log.info("Limiting targets to {} (unique) TARGETIDs in the zcat...t={:.1f}s" + .format(len(set(zcat["TARGETID"])), time()-t0)) + s = set(zcat["TARGETID"]) + ii = np.array([tid in s for tid in targets["TARGETID"]]) + targets = targets[ii] + nuniq = len(set(targets["TARGETID"])) + log.info("Retained {}/{} targets with {} unique TARGETIDs...t={:.1f}s" + .format(len(targets), ntargs, nuniq, time()-t0)) + + # ADM split off the updated target states from the unobserved states. + _, ii = np.unique(targets["TARGETID"], return_index=True) + unobs = targets[sorted(ii)] + # ADM this should remove both original UNOBS states and any resets + # ADM to UNOBS due to reprocessing data that turned out to be bad. + targets = targets[targets["ZTILEID"] != -1] + # ADM every target should have been unobserved at some point. + if len(set(targets["TARGETID"]) - set(unobs["TARGETID"])) != 0: + msg = "Some targets don't have a corresponding UNOBS state!!!" + log.critical(msg) + raise RuntimeError(msg) + # ADM each target should have only one UNOBS state. + if len(set(unobs["TARGETID"])) != len(unobs["TARGETID"]): + msg = "Passed ledgers have multiple UNOBS states!!!" + log.critical(msg) + raise RuntimeError(msg) + + log.info("{} ({}) targets are in the unobserved (observed) state...t={:.1f}s" + .format(len(unobs), len(targets), time()-t0)) + + # ADM store first-time-through tile order to reproduce processing. + # ADM ONLY WORKS because we sorted by TIMESTAMP, above! + _, ii = np.unique(targets["ZTILEID"], return_index=True) + # ADM remember to sort ii so that the first tiles appear first. + orderedtiles = targets["ZTILEID"][sorted(ii)] + + # ADM assemble a zcat for all previous and reprocessed observations. + zcatfromtargs = np.zeros(len(targets), dtype=zcat.dtype) + for col in zcat.dtype.names: + zcatfromtargs[col] = targets[col] + # ADM note that we'll retain the TIMESTAMPed order of the old ledger + # ADM entries and new redshifts will (deliberately) be listed last. + allzcat = np.concatenate([zcatfromtargs, zcat]) + log.info("Assembled a zcat of {} total observations...t={:.1f}s" + .format(len(allzcat), time()-t0)) + + # ADM determine the FINAL observation for each TILED-TARGETID combo. + # ADM must flip first as np.unique finds the FIRST unique entries. + allzcat = np.flip(allzcat) + # ADM create a unique hash of TILEID and TARGETID. + tiletarg = [str(tt["ZTILEID"]) + "-" + str(tt["TARGETID"]) for tt in allzcat] + # ADM find the final unique combination of TILEID and TARGETID. + _, ii = np.unique(tiletarg, return_index=True) + # ADM make sure to retain exact reverse-ordering. + ii = sorted(ii) + # ADM condition on indexes-of-uniqueness and flip back. + allzcat = np.flip(allzcat[ii]) + log.info("Found {} final TARGETID/TILEID combinations...t={:.1f}s" + .format(len(allzcat), time()-t0)) + + # ADM mock up a dictionary of timestamps in advance. This is faster + # ADM as no delays need to be built into the code. + now = get_utc_date(survey="main") + timestamps = {t: desitarget.mtl.add_to_iso_date(now, s) for s, t in enumerate(orderedtiles)} + + # ADM make_mtl() expects zcats to be in Table form. + allzcat = Table(allzcat) + # ADM a merged target list to track and record the final states. + mtl = Table(unobs) + # ADM to hold the final list of updates per-tile. + donemtl = [] + + # ADM loop through the tiles in order and update the MTL state. + for tileid in orderedtiles: + # ADM the timestamp for this tile. + timestamp = timestamps[tileid] + + # ADM restrict to the observations on this tile. + zcatmini = allzcat[allzcat["ZTILEID"] == tileid] + # ADM check there are only unique TARGETIDs on each tile! + if len(set(zcatmini["TARGETID"])) != len(zcatmini): + msg = "There are duplicate TARGETIDs on tile {}".format(tileid) + log.critical(msg) + raise RuntimeError(msg) + + # ADM update NUMOBS in the zcat using previous MTL totals. + mii, zii = desitarget.mtl.match(mtl["TARGETID"], zcatmini["TARGETID"]) + zcatmini["NUMOBS"][zii] = mtl["NUMOBS"][mii] + 1 + + # ADM restrict to just objects in the zcat that match an UNOBS + # ADM target (i,e that match something in the MTL). + log.info("Processing {}/{} observations from zcat on tile {}...t={:.1f}s" + .format(len(zii), len(zcatmini), tileid, time()-t0)) + log.info("(i.e. removed secondaries-if-running-primaries or vice versa)") + zcatmini = zcatmini[zii] + + # ADM ------ + # ADM NOTE: We could use trimtozcat=False without matching, and + # ADM just continually update the overall mtl list. But, make_mtl + # ADM doesn't track NUMOBS just NUMOBS_MORE, so we need to add + # ADM complexity somewhere, hence trimtozcat=True/matching-back. + # ADM ------ + # ADM push the observations on this tile through MTL. + zmtl = desitarget.mtl.make_mtl(mtl, oc, zcat=zcatmini, trimtozcat=True, trimcols=True) + + # ADM match back to overall merged target list to update states. + mii, zii = desitarget.mtl.match(mtl["TARGETID"], zmtl["TARGETID"]) + # ADM update the overall merged target list. + for col in mtl.dtype.names: + mtl[col][mii] = zmtl[col][zii] + # ADM also update the TIMESTAMP for changes on this tile. + mtl["TIMESTAMP"][mii] = timestamp + + # ADM trimtozcat=True discards BAD observations. Retain these. + tidmiss = list(set(zcatmini["TARGETID"]) - set(zmtl["TARGETID"])) + tii = desitarget.mtl.match_to(zcatmini["TARGETID"], tidmiss) + zbadmiss = zcatmini[tii] + # ADM check all of the missing observations are, indeed, bad. + if np.any(zbadmiss["ZWARN"] & zwarn_mask.mask(Mxbad) == 0): + msg = "Some objects skipped by make_mtl() on tile {} are not BAD!!!" + msg = msg.format(tileid) + log.critical(msg) + raise RuntimeError(msg) + log.info("Adding back {} bad observations from zcat...t={:.1f}s" + .format(len(zbadmiss), time()-t0)) + + # ADM update redshift information in MTL for bad observations. + mii, zii = desitarget.mtl.match(mtl["TARGETID"], zbadmiss["TARGETID"]) + # ADM update the overall merged target list. + # ADM Never update NUMOBS or NUMOBS_MORE using bad observations. + for col in set(zbadmiss.dtype.names) - set(["NUMOBS", "NUMOBS_MORE"]): + mtl[col][mii] = zbadmiss[col][zii] + # ADM also update the TIMESTAMP for changes on this tile. + mtl["TIMESTAMP"][mii] = timestamp + + # ADM record the information to add to the output ledgers... + donemtl.append(mtl[mtl["ZTILEID"] == tileid]) + + # ADM if this tile was actually reprocessed (rather than being a + # ADM later overlapping tile) record the TIMESTAMP... + if tileid in reproctiles: + timedict[tileid] = timestamp + + # ADM collect the results. + mtl = Table(np.concatenate(donemtl)) + + # ADM re-collect everything on pixels for writing to ledgers. + nside = desitarget.mtl._get_mtl_nside() + theta, phi = np.radians(90-mtl["DEC"]), np.radians(mtl["RA"]) + pixnum = hp.ang2pix(nside, theta, phi, nest=True) + + # ADM loop through the pixels and update the ledger, depending + # ADM on whether we're working with .fits or .ecsv files. + ender = get_mtl_ledger_format() + for pix in set(pixnum): + # ADM grab the targets in the pixel. + ii = pixnum == pix + mtlpix = mtl[ii] + + # ADM the correct filenames for this pixel number. + fn = fileform.format(pix) + overfn = overrideff.format(pix) + + # ADM if an override ledger exists, update it and recover its + # ADM relevant MTL entries. + if os.path.exists(overfn): + overmtl = process_overrides(overfn) + # ADM add any override entries TO THE END OF THE LEDGER. + mtlpix = vstack([mtlpix, overmtl]) + + # ADM if we're working with .ecsv, simply append to the ledger. + if ender == 'ecsv': + f = open(fn, "a") + astropy.io.ascii.write(mtlpix, f, format='no_header', formats=mtlformatdict) + f.close() + # ADM otherwise, for FITS, we'll have to read in the whole file. + else: + ledger, hd = fitsio.read(fn, extname="MTL", header=True) + done = np.concatenate([ledger, mtlpix.as_array()]) + fitsio.write(fn+'.tmp', done, extname='MTL', header=hd, clobber=True) + os.rename(fn+'.tmp', fn) + + return timedict + + +def write_amtl_tile_tracker(dirname, tiles, timestamp, obscon = 'dark', survey = 'main'): + """Write AMTL Processing times into TileTrackers + + Parameters + ---------- + dirname : :class:`str` + The path to the AMTL directory. + tiles : :class`astropy.table or numpy.recarray` + The tiles which were processed in this AMTL loop iteration + timestamp : :class:`str` + the time at which the AMTL updates were performed + obscon : :class:`str` + The observing conditions of the tiles that were processed. "dark" or "bright" + survey : :class:`str` + The survey of the tiles that were processed. "main" or "sv3" + + Returns + ------- + :class:`int` + The number of targets that were written to file. + :class:`str` + The name of the file to which targets were written. + """ + #if len(tiles) == 1: + # tiles = [tiles] + TileTrackerFN = makeTileTrackerFN(dirname, survey, obscon) + log.info(TileTrackerFN) + if os.path.isfile(TileTrackerFN): + TileTracker = Table.read(TileTrackerFN, format = 'ascii.ecsv') + + for t in tiles: + tileid = t['TILEID'] + reprocFlag = t['REPROCFLAG'] + cond = (TileTracker['TILEID'] == tileid) & (TileTracker['REPROCFLAG'] == reprocFlag) + log.info('for tile {0}, number of matching tiles = {1}'.format(tileid, np.sum(cond))) + debugTrap = np.copy(TileTracker['ALTARCHIVEDATE']) + TileTracker['ALTARCHIVEDATE'][cond] = timestamp + + assert(not (np.all(TileTracker['ALTARCHIVEDATE'] is None))) + TileTracker.write(TileTrackerFN, format = 'ascii.ecsv', overwrite = True) \ No newline at end of file diff --git a/py/LSS/SV3/fatools.py b/py/LSS/SV3/fatools.py index 128cac00e..e87b06352 100644 --- a/py/LSS/SV3/fatools.py +++ b/py/LSS/SV3/fatools.py @@ -267,7 +267,7 @@ def redo_fba_fromorig(tileid,outdir=None,faver=None, verbose = False,survey='mai fo.close() -def get_fba_fromnewmtl(tileid,mtldir=None,getosubp=False,outdir=None,faver=None, overwriteFA = False,newdir=None, verbose = False, mock = False): +def get_fba_fromnewmtl(tileid,mtldir=None,getosubp=False,outdir=None,faver=None, overwriteFA = False,newdir=None, verbose = False, mock = False, mtltime = None): ts = str(tileid).zfill(6) #get info from origin fiberassign file fht = fitsio.read_header('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz') @@ -368,7 +368,8 @@ def get_fba_fromnewmtl(tileid,mtldir=None,getosubp=False,outdir=None,faver=None, tarfn, tdirMain+prog, survey = 'main', - mock = mock) + mock = mock, + mtltime=mtltime) else: log.critical('invalid input directory. must contain either sv3, main, or holding') raise ValueError('indir must contain either sv3, main, or holding') From 11bb93ed7ecdd7a3f29f85cb2e90ae0ddb75c873 Mon Sep 17 00:00:00 2001 From: jalasker Date: Fri, 6 Oct 2023 08:45:47 -0700 Subject: [PATCH 002/297] First attempt at refactoring code based on `today` rather than a preordered list of tiles. Added missing files to commit. --- bin/dateLoopAltMTLBugFix.sh | 6 +- py/LSS/SV3/altmtltools.py | 766 +++++++++++++++++++++--------------- py/LSS/SV3/fatools.py | 20 +- 3 files changed, 459 insertions(+), 333 deletions(-) diff --git a/bin/dateLoopAltMTLBugFix.sh b/bin/dateLoopAltMTLBugFix.sh index 17b1ce0ec..1663b97f9 100755 --- a/bin/dateLoopAltMTLBugFix.sh +++ b/bin/dateLoopAltMTLBugFix.sh @@ -32,18 +32,18 @@ echo "$argstring" if [ $QVal = 'interactive' ]; then - srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 03:00:00 --dependency=afterany:15958547 $path2LSS/runAltMTLParallel.py $argstring + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 03:00:00 --dependency=afterany:16533190 $path2LSS/runAltMTLParallel.py $argstring fi if [ $QVal = 'regular' ]; then - srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:15958547 $path2LSS/runAltMTLParallel.py $argstring + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:16533190 $path2LSS/runAltMTLParallel.py $argstring fi if [ $QVal = 'debug' ]; then - srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 00:15:00 --dependency=afterany:15958547 $path2LSS/runAltMTLParallel.py $argstring + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 00:15:00 --dependency=afterany:16533190 $path2LSS/runAltMTLParallel.py $argstring fi #retcode=$? #qR=0 #DO NOT CHANGE. This prevents further restarts after the first if qR is set to 1 at top. diff --git a/py/LSS/SV3/altmtltools.py b/py/LSS/SV3/altmtltools.py index 4ad650a6b..ed328ab92 100644 --- a/py/LSS/SV3/altmtltools.py +++ b/py/LSS/SV3/altmtltools.py @@ -61,7 +61,42 @@ ('VERSION', 'i8'), ('ARCHIVEDATE', '>i8')]) +def datesInMonthForYear(yyyy): + # if divisible by 4 + if (yyyy % 4) == 0: + # if not divisible by 100, leap year + if not ((yyyy % 100) == 0): + monthLengths = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] + # if divisible by 100 and 400, leap year + elif ((yyyy % 400) == 0): + monthLengths = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] + # if divisble by 100 and not 400, no leap year + else: + monthLengths = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] + else: + monthLengths = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] + return monthLengths + +def nextDate(date): + # JL takes NITE in YYYYMMDD form and increments to the next date + yyyy, mm, dd = int(str(date)[0:4]), int(str(date)[4:6]), int(str(date)[6:]) + log.info('date = {0}'.format(date)) + monthLengths = datesInMonthForYear(yyyy) + log.info('monthLengths array is {0}'.format(monthLengths)) + log.info('yyyy, mm, dd = {0}, {1}, {2}'.format(yyyy, mm, dd)) + if dd == monthLengths[mm - 1]: + if mm == 12: + mm = '01' + yyyy = str(yyyy+1) + else: + mm = str(mm+1).zfill(2) + + dd = '01' + else: + dd = str(dd + 1).zfill(2) + log.info('yyyy, mm, dd = {0}, {1}, {2}'.format(yyyy, mm, dd)) + return ''.join([str(yyyy), str(mm).zfill(2), str(dd).zfill(2)]) def evaluateMask(bits, mask, evalMultipleBits = False): if evalMultipleBits: @@ -135,9 +170,9 @@ def uniqueTimestampFATimePairs(tileList, withFlag = False): output = [] for t in tileList: if withFlag: - datepair = (t['ORIGTIMESTAMP'], t['FAMTLTIME'], t['REPROCFLAG']) + datepair = (t['ORIGMTLTIMESTAMP'], t['FAMTLTIME'], t['REPROCFLAG']) else: - datepair = (t['ORIGTIMESTAMP'], t['FAMTLTIME']) + datepair = (t['ORIGMTLTIMESTAMP'], t['FAMTLTIME']) if datepair in output: continue @@ -367,10 +402,11 @@ def checkMTLChanged(MTLFile1, MTLFile2): print(NDiff3) def makeTileTrackerFN(dirName, survey, obscon): - return dirName + '/{0}survey-{1}obscon-TileTracker.txt'.format(survey, obscon.upper()) + return dirName + '/{0}survey-{1}obscon-TileTracker.ecsv'.format(survey, obscon.upper()) def makeTileTracker(altMTLDir, survey = 'main', obscon = 'dark', retroactive = False, overwrite = False, startDate = None, endDate = None): + # JL altMTLDir includes the UnivNNN log.info('generating tile tracker file') outputFN = makeTileTrackerFN(altMTLDir,survey, obscon) @@ -408,7 +444,7 @@ def makeTileTracker(altMTLDir, survey = 'main', obscon = 'dark', retroactive = F TrimmedTileIDs = TrimmedTiles['TILEID'] #origMTLDoneTiles.sort(keys = ['ARCHIVEDATE', 'ZDATE']) origMTLDoneTiles.sort(keys = ['TIMESTAMP', 'ZDATE']) - TILEID, ARCHIVEDATE, ZDATE,FAMTLDATE, ALTARCHIVEDATE, ORIGTIMESTAMP, REPROCFLAG, OVERRIDEFLAG, OBSCONS, SURVEYS = [],[],[],[],[],[],[],[],[],[] + TILEID, ARCHIVEDATE, ZDATE, FADATE,ALTFADATE, FAMTLDATE, ALTARCHIVEDATE, ORIGMTLDATE, ORIGMTLTIMESTAMP, REPROCFLAG, OVERRIDEFLAG, OBSCONS, SURVEYS = [],[],[],[],[],[],[],[],[],[],[],[],[] for omtlDoneTile in origMTLDoneTiles: #TILEID TIMESTAMP VERSION PROGRAM ZDATE ARCHIVEDATE thisTileID = omtlDoneTile['TILEID'] @@ -417,12 +453,12 @@ def makeTileTracker(altMTLDir, survey = 'main', obscon = 'dark', retroactive = F thists = str(thisTileID).zfill(6) FAOrigName = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+thists[:3]+'/fiberassign-'+thists+'.fits.gz' fhtOrig = fitsio.read_header(FAOrigName) - thisFAMTLTime = fhtOrig['MTLTIME'] thisVersion = omtlDoneTile['VERSION'] thisProgram = omtlDoneTile['PROGRAM'] thisZDate = omtlDoneTile['ZDATE'] thisArchiveDate = omtlDoneTile['ARCHIVEDATE'] - thisOrigTimestamp = omtlDoneTile['TIMESTAMP'] + thisOrigMTLTimestamp = omtlDoneTile['TIMESTAMP'] + thisOrigMTLDate = thisOrigMTLTimestamp.split('T')[0].replace('-', '') if thisArchiveDate < startDate: thisAltArchiveDate = thisArchiveDate elif thisArchiveDate > endDate: @@ -431,14 +467,32 @@ def makeTileTracker(altMTLDir, survey = 'main', obscon = 'dark', retroactive = F thisAltArchiveDate = None + thisReprocFlag = thisTileID in TILEID + if thisReprocFlag: + thisFAMTLTime = None + thisFADate = None + thisAltFADate = None + else: + thisFAMTLTime = fhtOrig['RUNDATE'] + + thisFADate = thisFAMTLTime.split('T')[0].replace('-', '') + if thisArchiveDate < startDate: + thisAltFADate = thisFADate + else: + thisAltArchiveDate = None + thisAltFADate = None + #thisOverrideFlag = thisTileID in OverrideTileID TILEID.append(thisTileID) ARCHIVEDATE.append(thisArchiveDate) ZDATE.append(thisZDate) + FADATE.append(thisFADate) FAMTLDATE.append(thisFAMTLTime) + ALTFADATE.append(thisAltFADate) ALTARCHIVEDATE.append(thisAltArchiveDate) - ORIGTIMESTAMP.append(thisOrigTimestamp) + ORIGMTLDATE.append(thisOrigMTLDate) + ORIGMTLTIMESTAMP.append(thisOrigMTLTimestamp) REPROCFLAG.append(thisReprocFlag) OVERRIDEFLAG.append(None) OBSCONS.append(obscon) @@ -446,24 +500,61 @@ def makeTileTracker(altMTLDir, survey = 'main', obscon = 'dark', retroactive = F - TilesToProcessNearlyInOrder = [TILEID, ARCHIVEDATE, ZDATE, FAMTLDATE, ALTARCHIVEDATE,ORIGTIMESTAMP, REPROCFLAG, OVERRIDEFLAG, OBSCONS, SURVEYS] + TilesToProcessNearlyInOrder = [TILEID, ARCHIVEDATE, ZDATE, ALTFADATE, FADATE, FAMTLDATE, ALTARCHIVEDATE, ORIGMTLDATE, ORIGMTLTIMESTAMP, REPROCFLAG, OVERRIDEFLAG, OBSCONS, SURVEYS] + + if survey.lower() == 'sv3': + firstSurveyDate = 20210404 + elif survey.lower() == 'main': + firstSurveyDate = 20210513 + else: + log.warning('SURVEY SHOULD BE EITHER `sv3` OR `main`, BUT WAS GIVEN AS {0}'.format(survey.lower())) + firstSurveyDate = 20200101 t = Table(TilesToProcessNearlyInOrder, - names=('TILEID', 'ARCHIVEDATE', 'ZDATE', 'FAMTLTIME', 'ALTARCHIVEDATE', 'ORIGTIMESTAMP', 'REPROCFLAG', 'OVERRIDEFLAG', 'OBSCON', 'SURVEY'), - meta={'Name': 'AltMTLTileTracker', 'StartDate': startDate, 'EndDate': endDate}) - t.sort(['ORIGTIMESTAMP','FAMTLTIME']) + names=('TILEID', 'ARCHIVEDATE', 'ZDATE', 'ALTFADATE', 'FADATE', 'FAMTLTIME', 'ALTARCHIVEDATE', 'ORIGMTLDATE', 'ORIGMTLTIMESTAMP', 'REPROCFLAG', 'OVERRIDEFLAG', 'OBSCON', 'SURVEY'), + meta={'Name': 'AltMTLTileTracker', 'StartDate': startDate, 'EndDate': endDate, 'Today': max(int(startDate), firstSurveyDate )}) + t.sort(['ORIGMTLTIMESTAMP','ZDATE']) + #t.sort(['ORIGMTLTIMESTAMP','FAMTLTIME']) t.write(outputFN, format='ascii.ecsv') return 1 -def tiles_to_be_processed_alt(altmtldir, obscon = 'dark', survey = 'main'): +def tiles_to_be_processed_alt(altmtldir, obscon = 'dark', survey = 'main', today = None, mode = 'fa'): + + + TileTrackerFN = makeTileTrackerFN(altmtldir, survey, obscon) TileTracker = Table.read(TileTrackerFN, format = 'ascii.ecsv') - returnTiles = TileTracker[(TileTracker['OBSCON'] == obscon.upper()) | (TileTracker['OBSCON'] == obscon.lower())] - returnTiles = returnTiles[(returnTiles['SURVEY'] == survey.upper()) | (returnTiles['SURVEY'] == survey.lower())] - returnTiles = returnTiles[returnTiles['ALTARCHIVEDATE'] == None] + if mode.lower() == 'fa': + dateKey = 'FADATE' + log.info('len(TileTracker) pre removal of Nones = {0}'.format(len(TileTracker))) + TileTracker = TileTracker[TileTracker[dateKey] != None] + log.info('len(TileTracker) post removal of Nones = {0}'.format(len(TileTracker))) + + elif mode.lower() == 'update': + dateKey = 'ORIGMTLDATE' + else: + raise ValueError('mode must be either `fa` or `update`. You provided {0}'.format(mode)) + + + if not (today is None): + log.info('dateKey = {0}'.format(dateKey)) + log.info('today = {0}'.format(today)) + log.info('TileTracker.shape = {0}'.format(len(TileTracker))) + log.info('some example dates = {0}'.format(TileTracker[dateKey][0:4])) + TileTracker = TileTracker[TileTracker[dateKey].astype(int) == int(today)] + + + indices = np.where( ((TileTracker['OBSCON'] == obscon.upper()) | (TileTracker['OBSCON'] == obscon.lower())) & (TileTracker['SURVEY'] == survey.upper()) | (TileTracker['SURVEY'] == survey.lower()) ) + log.info('indices = {0}'.format(indices)) + returnTiles = TileTracker[indices] + #returnTiles = returnTiles[np.where((returnTiles['SURVEY'] == survey.upper()) | (returnTiles['SURVEY'] == survey.lower()))] + if mode.lower() == 'update': + returnTiles = returnTiles[np.where(returnTiles['ALTARCHIVEDATE'] == None)] + if mode.lower() == 'fa': + returnTiles = returnTiles[np.where(returnTiles['ALTFADATE'] == None)] return returnTiles @@ -903,6 +994,273 @@ def quickRestartFxn(ndirs = 1, altmtlbasedir = None, survey = 'sv3', obscon = 'd restartMTLs = ls(altmtldirRestart +'/' + survey + '/' + obscon + '/' + '/orig/*') for fn in restartMTLs: copyfile(fn, altmtldirRestart +'/' + survey + '/' + obscon + '/' + fn.split('/')[-1]) + +def do_fiberassignment(altmtldir, survey = 'sv3', obscon = 'dark', today = None, + verbose = False, debug = False, getosubp = False, redoFA = False, mock = False): + FATiles = tiles_to_be_processed_alt(altmtldir, obscon = obscon, survey = survey, today = today, mode = 'fa') + if len(FATiles): + try: + log.info('FATiles[0] = {0}'.format(FATiles[0])) + except: + log.info('cannot access element 0 of FATiles') + log.info('FATiles = {0}'.format(FATiles)) + + + OrigFAs = [] + AltFAs = [] + AltFAs2 = [] + TSs = [] + fadates = [] + + + if len(FATiles): + log.info('len FATiles = {0}'.format(len(FATiles))) + pass + else: + return OrigFAs, AltFAs, AltFAs2, TSs, fadates, FATiles + for t in FATiles: + log.info('t = {0}'.format(t)) + #JL This loop takes each of the original fiberassignments for each of the tiles on $date + #JL and opens them to obtain information for the alternative fiber assignments. + #JL Then it runs the alternative fiber assignments, stores the results in an array (AltFAs) + #JL while also storing the original fiber assignment files in a different array (OrigFA) + + ts = str(t['TILEID']).zfill(6) + #JL Full path to the original fiber assignment from the real survey + FAOrigName = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz' + fhtOrig = fitsio.read_header(FAOrigName) + fadate = fhtOrig['RUNDATE'] + # e.g. DESIROOT/target/catalogs/dr9/1.0.0/targets/main/resolve/dark + targver = fhtOrig['TARG'].split('/targets')[0].split('/')[-1] + assert(not ('/' in targver)) + log.info('fadate = {0}'.format(fadate)) + #JL stripping out the time of fiber assignment to leave only the date + #JL THIS SHOULD ONLY BE USED IN DIRECTORY NAMES. THE ACTUAL RUNDATE VALUE SHOULD INCLUDE A TIME + fadate = ''.join(fadate.split('T')[0].split('-')) + log.info('fadate stripped = {0}'.format(fadate)) + fbadirbase = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/' + + log.info('fbadirbase = {0}'.format(fbadirbase)) + log.info('ts = {0}'.format(ts)) + log.info('t[reprocflag] (should be false if here)= {0}'.format(t['REPROCFLAG'])) + assert(not bool(t['REPROCFLAG'])) + #if str(ts) == str(3414).zfill(6): + # raise ValueError('Not only do I create the backup here but I also need to fix the reproc flag') + + if getosubp: + #JL When we are trying to reproduce a prior survey and/or debug, create a separate + #JL directory in fbadirbase + /orig/ to store the reproduced FA files. + FAAltName = fbadirbase + '/orig/fba-' + ts+ '.fits' + #FAMapName = fbadirbase + '/orig/famap-' + ts + '.pickle' + fbadir = fbadirbase + '/orig/' + else: + + #JL For normal "alternate" operations, store the fiber assignments + #JL in the fbadirbase directory. + + FAAltName = fbadirbase + '/fba-' + ts+ '.fits' + #FAMapName = fbadirbase + '/famap-' + ts + '.pickle' + fbadir = fbadirbase + if verbose or debug: + log.info('FAOrigName = {0}'.format(FAOrigName)) + + log.info('FAAltName = {0}'.format(FAAltName)) + + #JL Sometimes fiberassign leaves around temp files if a run is aborted. + #JL This command removes those temp files to prevent endless crashes. + if os.path.exists(FAAltName + '.tmp'): + os.remove(FAAltName + '.tmp') + #JL If the alternate fiberassignment was already performed, don't repeat it + #JL Unless the 'redoFA' flag is set to true + if verbose or debug: + log.info('redoFA = {0}'.format(redoFA)) + log.info('FAAltName = {0}'.format(FAAltName)) + + if redoFA or (not os.path.exists(FAAltName)): + if verbose and os.path.exists(FAAltName): + log.info('repeating fiberassignment') + elif verbose: + log.info('fiberassignment not found, running fiberassignment') + if verbose: + log.info(ts) + log.info(altmtldir + survey.lower()) + log.info(fbadir) + log.info(getosubp) + log.info(redoFA) + if getosubp and verbose: + log.info('checking contents of fiberassign directory before calling get_fba_from_newmtl') + log.info(glob.glob(fbadir + '/*' )) + get_fba_fromnewmtl(ts,mtldir=altmtldir + survey.lower() + '/',outdir=fbadirbase, getosubp = getosubp, overwriteFA = redoFA, verbose = verbose, mock = mock, targver = targver)#, targets = targets) + command_run = (['bash', fbadir + 'fa-' + ts + '.sh']) + if verbose: + log.info('fa command_run') + log.info(command_run) + result = subprocess.run(command_run, capture_output = True) + else: + log.info('not repeating fiberassignment') + log.info('adding fiberassignments to arrays') + OrigFAs.append(pf.open(FAOrigName)[1].data) + AltFAs.append(pf.open(FAAltName)[1].data) + AltFAs2.append(pf.open(FAAltName)[2].data) + TSs.append(ts) + fadates.append(fadate) + + return OrigFAs, AltFAs, AltFAs2, TSs, fadates, FATiles + +def make_fibermaps(altmtldir, OrigFAs, AltFAs, AltFAs2, TSs, fadates, tiles, survey = 'sv3', obscon = 'dark', changeFiberOpt = None, verbose = False, debug = False, getosubp = False, redoFA = False, today = None): + A2RMap = {} + R2AMap = {} + if verbose: + log.info('beginning loop through FA files') + for ofa, afa, afa2, ts, fadate, t in zip(OrigFAs, AltFAs, AltFAs2, TSs, fadates, tiles): + log.info('ts = {0}'.format(ts)) + if changeFiberOpt is None: + A2RMap, R2AMap = createFAmap(ofa, afa, changeFiberOpt = changeFiberOpt) + else: + raise NotImplementedError('changeFiberOpt has not yet been implemented') + + #FAOrigName = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz' + + A2RMap, R2AMap = createFAmap(ofa, afa, TargAlt = afa2, changeFiberOpt = changeFiberOpt) + + fbadirbase = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/' + if getosubp: + FAAltName = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/orig/fba-' + ts+ '.fits' + FAMapName = fbadirbase + '/orig/famap-' + ts + '.pickle' + fbadir = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/orig/' + else: + + FAAltName = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/fba-' + ts+ '.fits' + FAMapName = fbadirbase + '/famap-' + ts + '.pickle' + fbadir = fbadirbase + + + if debug: + log.info('ts = {0}'.format(ts)) + log.info('FAMapName = {0}'.format(FAMapName)) + + + if redoFA or (not (os.path.isfile(FAMapName))): + if verbose: + log.info('dumping out fiber map to pickle file') + with open(FAMapName, 'wb') as handle: + pickle.dump((A2RMap, R2AMap), handle, protocol=pickle.HIGHEST_PROTOCOL) + thisUTCDate = get_utc_date(survey=survey) + if verbose: + log.info('---') + log.info('unique keys in R2AMap = {0:d}'.format(np.unique(R2AMap.keys()).shape[0])) + log.info('---') + + log.info('---') + log.info('unique keys in A2RMap = {0:d}'.format(np.unique(A2RMap.keys()).shape[0])) + log.info('---') + retval = write_amtl_tile_tracker(altmtldir, [t], thisUTCDate, today, obscon = obscon, survey = survey, mode = 'fa') + log.info('write_amtl_tile_tracker retval = {0}'.format(retval)) + + return A2RMap, R2AMap +def update_alt_ledger(altmtldir,althpdirname, altmtltilefn, survey = 'sv3', obscon = 'dark', today = None, + getosubp = False, zcatdir = None, mock = False, numobs_from_ledger = True, targets = None, verbose = False, debug = False): + if verbose or debug: + log.info('today = {0}'.format(today)) + log.info('obscon = {0}'.format(obscon)) + log.info('survey = {0}'.format(survey)) + UpdateTiles = tiles_to_be_processed_alt(altmtldir, obscon = obscon, survey = survey, today = today, mode = 'update') + log.info('updatetiles = {0}'.format(UpdateTiles)) + # ADM grab the zcat directory (in case we're relying on $ZCAT_DIR). + zcatdir = get_zcat_dir(zcatdir) + # ADM And contruct the associated ZTILE filename. + ztilefn = os.path.join(zcatdir, get_ztile_file_name()) + if len(UpdateTiles): + pass + else: + return althpdirname, altmtltilefn, ztilefn, None + for t in UpdateTiles: + if t['REPROCFLAG']: + raise ValueError('Make sure backup is made and reprocessing logic is correct before beginning reprocessing.') + ts = str(t['TILEID']).zfill(6) + + fbadirbase = altmtldir + '/fa/' + survey.upper() + '/' + t['FADATE'] + '/' + log.info('t = {0}'.format(t)) + log.info('fbadirbase = {0}'.format(fbadirbase)) + log.info('ts = {0}'.format(ts)) + + if getosubp: + FAMapName = fbadirbase + '/orig/famap-' + ts + '.pickle' + else: + FAMapName = fbadirbase + '/famap-' + ts + '.pickle' + + log.info('FAMapName = {0}'.format(FAMapName)) + with open(FAMapName,'rb') as fl: + (A2RMap, R2AMap) = pickle.load(fl,fix_imports = True) + + # ADM create the catalog of updated redshifts. + log.info('making zcats') + log.info('zcatdir = {0}'.format(zcatdir)) + log.info('t = {0}'.format(t)) + zcat = make_zcat(zcatdir, [t], obscon, survey) + + altZCat = makeAlternateZCat(zcat, R2AMap, A2RMap, debug = debug, verbose = verbose) + # ADM insist that for an MTL loop with real observations, the zcat + # ADM must conform to the data model. In particular, it must include + # ADM ZTILEID, and other columns addes for the Main Survey. These + # ADM columns may not be needed for non-ledger simulations. + # ADM Note that the data model differs with survey type. + zcatdm = survey_data_model(zcatdatamodel, survey=survey) + if zcat.dtype.descr != zcatdm.dtype.descr: + msg = "zcat data model must be {} not {}!".format( + zcatdm.dtype.descr, zcat.dtype.descr) + log.critical(msg) + raise ValueError(msg) + # ADM useful to know how many targets were updated. + _, _, _, _, sky, _ = decode_targetid(zcat["TARGETID"]) + ntargs, nsky = np.sum(sky == 0), np.sum(sky) + msg = "Update state for {} targets".format(ntargs) + msg += " (the zcats also contain {} skies with +ve TARGETIDs)".format(nsky) + log.info(msg) + didUpdateHappen = False + # ADM update the appropriate ledger. + if mock: + + if targets is None: + raise ValueError('If processing mocks, you MUST specify a target file') + log.info('update loc a') + update_ledger(althpdirname, altZCat, obscon=obscon.upper(), + numobs_from_ledger=numobs_from_ledger, targets = targets) + didUpdateHappen = True + elif targets is None: + log.info('update loc b') + update_ledger(althpdirname, altZCat, obscon=obscon.upper(), + numobs_from_ledger=numobs_from_ledger) + didUpdateHappen = True + else: + log.info('update loc c') + update_ledger(althpdirname, altZCat, obscon=obscon.upper(), + numobs_from_ledger=numobs_from_ledger, targets = targets) + didUpdateHappen = True + assert(didUpdateHappen) + if verbose or debug: + log.info('if main, should sleep 1 second') + thisUTCDate = get_utc_date(survey=survey) + if survey == "main": + sleep(1) + if verbose or debug: + log.info('has slept one second') + t["ALTARCHIVEDATE"] = thisUTCDate + if verbose or debug: + log.info('now writing to amtl_tile_tracker') + #io.write_mtl_tile_file(altmtltilefn,dateTiles) + #write_amtl_tile_tracker(altmtldir, dateTiles, thisUTCDate, obscon = obscon, survey = survey) + log.info('changes are being registered') + log.info('altmtldir = {0}'.format(altmtldir)) + log.info('t = {0}'.format(t)) + log.info('thisUTCDate = {0}'.format(thisUTCDate)) + log.info('today = {0}'.format(today)) + retval = write_amtl_tile_tracker(altmtldir, [t], thisUTCDate, today, obscon = obscon, survey = survey, mode = 'update') + log.info('write_amtl_tile_tracker retval = {0}'.format(retval)) + if verbose or debug: + log.info('has written to amtl_tile_tracker') + + return althpdirname, altmtltilefn, ztilefn, UpdateTiles #@profile def loop_alt_ledger(obscon, survey='sv3', zcatdir=None, mtldir=None, altmtlbasedir=None, ndirs = 3, numobs_from_ledger=True, @@ -976,6 +1334,7 @@ def loop_alt_ledger(obscon, survey='sv3', zcatdir=None, mtldir=None, e.g., :func:`~LSS.SV3.altmtltools.initializeAlternateMTLs()`. """ + if mock: if targets is None: raise ValueError('If processing mocks, you MUST specify a target file') @@ -989,8 +1348,10 @@ def loop_alt_ledger(obscon, survey='sv3', zcatdir=None, mtldir=None, import logging logger=mp.log_to_stderr(logging.DEBUG) - if quickRestart: - quickRestartFxn(ndirs = ndirs, altmtlbasedir = altmtlbasedir, survey = survey, obscon = obscon, multiproc = multiproc, nproc = nproc) + + ### JL - Start of directory/loop variable construction ### + + # ADM first grab all of the relevant files. # ADM grab the MTL directory (in case we're relying on $MTL_DIR). mtldir = get_mtl_dir(mtldir) @@ -1007,10 +1368,7 @@ def loop_alt_ledger(obscon, survey='sv3', zcatdir=None, mtldir=None, else: log.info(msg.format("PRIMARY", obscon, survey)) - # ADM grab the zcat directory (in case we're relying on $ZCAT_DIR). - zcatdir = get_zcat_dir(zcatdir) - # ADM And contruct the associated ZTILE filename. - ztilefn = os.path.join(zcatdir, get_ztile_file_name()) + if altmtlbasedir is None: log.critical('This will automatically find the alt mtl dir in the future but fails now. Bye.') @@ -1021,322 +1379,70 @@ def loop_alt_ledger(obscon, survey='sv3', zcatdir=None, mtldir=None, iterloop = range(nproc, nproc+1) else: iterloop = range(ndirs) + ### JL - End of directory/loop variable construction ### + + + + if quickRestart: + quickRestartFxn(ndirs = ndirs, altmtlbasedir = altmtlbasedir, survey = survey, obscon = obscon, multiproc = multiproc, nproc = nproc) + + ### JL - this loop is through all realizations serially or (usually) one realization parallelized for n in iterloop: if debugOrig: altmtldir = altmtlbasedir else: altmtldir = altmtlbasedir + '/Univ{0:03d}/'.format(n) altmtltilefn = os.path.join(altmtldir, get_mtl_tile_file_name(secondary=secondary)) - + althpdirname = io.find_target_files(altmtldir, flavor="mtl", resolve=resolve, - survey=survey, obscon=obscon, ender=form) - # ADM grab an array of tiles that are yet to be processed. - # JL use modified function to automatically switch between - # JL original processing and reprocessing using TileTracker - #tiles = tiles_to_be_processed(zcatdir, altmtltilefn, obscon, survey) - tiles = tiles_to_be_processed_alt(altmtldir, obscon, survey) - #names=('TILEID', 'ORIGARCHIVEDATE', 'ZDATE', 'ALTARCHIVEDATE', 'REPROCFLAG', 'OVERRIDEFLAG', 'OBSCON', 'SURVEY'), - - # ADM stop if there are no tiles to process. - if len(tiles) == 0: - if (not multiproc) and (n != ndirs - 1): - continue - else: - if singleDate: - return 151 - else: - return althpdirname, mtltilefn, ztilefn, tiles + survey=survey, obscon=obscon, ender=form) + + altMTLTileTrackerFN = makeTileTrackerFN(altmtldir, survey = survey, obscon = obscon) + altMTLTileTracker = Table.read(altMTLTileTrackerFN) + today = altMTLTileTracker.meta['Today'] + endDate = altMTLTileTracker.meta['EndDate'] if not (singletile is None): tiles = tiles[tiles['TILEID'] == singletile] - try: - #sorttiles = np.sort(tiles, order = ['ARCHIVEDATE', 'ZDATE']) - #tiles.sort(keys = ['ARCHIVEDATE', 'ZDATE']) - tiles.sort(keys = ['ORIGTIMESTAMP', 'FAMTLTIME']) - sorttiles = tiles - except: - log.info(len(tiles)) - log.info(tiles.dtype) - #log.warn('sorting tiles on ARCHIVEDATE, ZDATE failed.') - log.warn('sorting tiles on ARCHIVEDATE, ZDATE failed.') - log.warn('currently we are aborting, but this may') - #log.warn('change in the future to switching to order by ZDATE') - log.warn('change in the future.')#' to switching to order by ZDATE') - raise NotImplementedError('This pipeline does not currently handle tile lists with an unsortable ARCHIVEDATE or without any ARCHIVEDATE whatsoever. Exiting.') - #sorttiles = np.sort(tiles, order = 'ZDATE') + if testDoubleDate: + raise NotImplementedError('this block needs to be moved for new organization of tiletracker.') log.info('Testing Rosette with Doubled Date only') cond1 = ((tiles['TILEID'] >= 298) & (tiles['TILEID'] <= 324)) cond2 = ((tiles['TILEID'] >= 475) & (tiles['TILEID'] <= 477)) log.info(tiles[tiles['TILEID' ] == 314]) log.info(tiles[tiles['TILEID' ] == 315]) tiles = tiles[cond1 | cond2 ] - datepairs = uniqueTimestampFATimePairs(sorttiles, withFlag = True) - #if singleDate: - # dates = np.sort(np.unique(sorttiles['ARCHIVEDATE'])) - #if debug: - # log.info('first and last 10 hopefully datepairs hopefully in order') - # log.info(datepairs[0:10]) - # log.info(datepairs[-10:-1]) - # log.info('first and last 10 hopefully (orig)zdates hopefully not in order') - # - # #log.info(sorttiles['ZDATE'][0:10]) - # #log.info(sorttiles['ZDATE'][-10:-1]) - #else: - - #for zd,ad,reprocFlag in datepairs: - for ots,famtlt,reprocFlag in datepairs: - #zdates = np.sort(np.unique(dateTiles['ZDATE'])) - log.info(len(sorttiles)) - log.info(sorttiles.dtype) - try: - dateTiles = sorttiles[sorttiles['ORIGTIMESTAMP'] == ots] - dateTiles = dateTiles[dateTiles['FAMTLTIME'] == famtlt] - dateTiles = dateTiles[dateTiles['REPROCFLAG'] == reprocFlag] - except: - dateTiles = sorttiles[sorttiles['ORIGTIMESTAMP'] == ots] - dateTiles = dateTiles[dateTiles['FAMTLTIME'] == famtlt] - - if debug: - log.info('inside dateLoop. ORIGTIMESTAMP is {0}'.format(ots)) - log.info('inside dateLoop. FAMTLTIME is {0}'.format(famtlt)) - log.info('singleDate = {0}'.format(singleDate)) + + #for ots,famtlt,reprocFlag in datepairs: + while int(today) <= int(endDate): + log.info('----------') + log.info('----------') + log.info('----------') + log.info('today = {0}'.format(today)) + log.info('----------') + log.info('----------') + log.info('----------') + log.info('----------') + + OrigFAs, AltFAs, AltFAs2, TSs, fadates, tiles = do_fiberassignment(altmtldir, survey = survey, obscon = obscon, today = today,verbose = verbose, debug = debug, getosubp = getosubp, redoFA = redoFA, mock = mock) + if len(OrigFAs): + A2RMap, R2AMap = make_fibermaps(altmtldir, OrigFAs, AltFAs, AltFAs2, TSs, fadates, tiles, changeFiberOpt = changeFiberOpt, verbose = verbose, debug = debug, survey = survey , obscon = obscon, getosubp = getosubp, redoFA = redoFA, today = today) + althpdirname, altmtltilefn, ztilefn, tiles = update_alt_ledger(altmtldir,althpdirname, altmtltilefn, survey = survey, obscon = obscon, today = today,getosubp = getosubp, zcatdir = zcatdir, mock = mock, numobs_from_ledger = numobs_from_ledger, targets = targets, verbose = verbose, debug = debug) + retval = write_amtl_tile_tracker(altmtldir, None, None, today, obscon = obscon, survey = survey, mode = 'endofday') + log.info('write_amtl_tile_tracker retval = {0}'.format(retval)) + + today = nextDate(today) + log.info('----------') + log.info('----------') + log.info('----------') + log.info('moving to next day: {0}'.format(today)) + log.info('----------') + log.info('----------') + log.info('----------') - assert(len(np.unique(dateTiles['ORIGTIMESTAMP'])) == 1) - assert(len(np.unique(dateTiles['FAMTLTIME'])) == 1) - OrigFAs = [] - AltFAs = [] - AltFAs2 = [] - TSs = [] - fadates = [] - - for t in dateTiles: - #JL This loop takes each of the original fiberassignments for each of the tiles on $date - #JL and opens them to obtain information for the alternative fiber assignments. - #JL Then it runs the alternative fiber assignments, stores the results in an array (AltFAs) - #JL while also storing the original fiber assignment files in a different array (OrigFA) - - ts = str(t['TILEID']).zfill(6) - #JL Full path to the original fiber assignment from the real survey - FAOrigName = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz' - fhtOrig = fitsio.read_header(FAOrigName) - fadate = fhtOrig['RUNDATE'] - #JL stripping out the time of fiber assignment to leave only the date - #JL THIS SHOULD ONLY BE USED IN DIRECTORY NAMES. THE ACTUAL RUNDATE VALUE SHOULD INCLUDE A TIME - fadate = ''.join(fadate.split('T')[0].split('-')) - fbadirbase = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/' - log.info(t['REPROCFLAG']) - log.info(type(t['REPROCFLAG'])) - log.info(t["REPROCFLAG"] == True) - log.info(ts) - if t['REPROCFLAG']: - - log.info('reproc flag true') - if getosubp: - FAMapName = fbadirbase + '/orig/famap-' + ts + '.pickle' - else: - FAMapName = fbadirbase + '/famap-' + ts + '.pickle' - with open(FAMapName,'rb') as fl: - (A2RMap, R2AMap) = pickle.load(fl,fix_imports = True) - - #zcat = make_zcat(zcatdir, dateTiles, obscon, survey) - zcat = make_zcat(zcatdir, [t], obscon, survey, allow_overlaps = True) - log.info('ts = {0}'.format(ts)) - altZCat = makeAlternateZCat(zcat, R2AMap, A2RMap) - reprocess_alt_ledger(altmtldir + '/{0}/{1}/'.format(survey.lower(), obscon.lower()), altZCat, fbadirbase, t, obscon=obscon) - if verbose or debug: - log.info('if main, should sleep 1 second') - thisUTCDate = get_utc_date(survey=survey) - if survey == "main": - sleep(1) - if verbose or debug: - log.info('has slept one second') - dateTiles["TIMESTAMP"] = thisUTCDate - if verbose or debug: - log.info('now writing to amtl_tile_tracker') - write_amtl_tile_tracker(altmtldir, [t], thisUTCDate, obscon = obscon, survey = survey) - else: - log.info('ts = {0}'.format(ts)) - log.info('t[reprocflag] (should be false if here)= {0}'.format(t['REPROCFLAG'])) - - #if str(ts) == str(3414).zfill(6): - # raise ValueError('Not only do I create the backup here but I also need to fix the reproc flag') - - if getosubp: - #JL When we are trying to reproduce a prior survey and/or debug, create a separate - #JL directory in fbadirbase + /orig/ to store the reproduced FA files. - FAAltName = fbadirbase + '/orig/fba-' + ts+ '.fits' - #FAMapName = fbadirbase + '/orig/famap-' + ts + '.pickle' - fbadir = fbadirbase + '/orig/' - else: - - #JL For normal "alternate" operations, store the fiber assignments - #JL in the fbadirbase directory. - - FAAltName = fbadirbase + '/fba-' + ts+ '.fits' - #FAMapName = fbadirbase + '/famap-' + ts + '.pickle' - fbadir = fbadirbase - log.info('FAOrigName = {0}'.format(FAOrigName)) - - log.info('FAAltName = {0}'.format(FAAltName)) - - #JL Sometimes fiberassign leaves around temp files if a run is aborted. - #JL This command removes those temp files to prevent endless crashes. - if os.path.exists(FAAltName + '.tmp'): - os.remove(FAAltName + '.tmp') - #JL If the alternate fiberassignment was already performed, don't repeat it - #JL Unless the 'redoFA' flag is set to true - log.info('redoFA = {0}'.format(redoFA)) - log.info('FAAltName = {0}'.format(FAAltName)) - - if redoFA or (not os.path.exists(FAAltName)): - if verbose and os.path.exists(FAAltName): - log.info('repeating fiberassignment') - elif verbose: - log.info('fiberassignment not found, running fiberassignment') - if verbose: - log.info(ts) - log.info(altmtldir + survey.lower()) - log.info(fbadir) - log.info(getosubp) - log.info(redoFA) - if getosubp and verbose: - log.info('checking contents of fiberassign directory before calling get_fba_from_newmtl') - log.info(glob.glob(fbadir + '/*' )) - get_fba_fromnewmtl(ts,mtldir=altmtldir + survey.lower() + '/',outdir=fbadirbase, getosubp = getosubp, overwriteFA = redoFA, verbose = verbose, mock = mock)#, targets = targets) - command_run = (['bash', fbadir + 'fa-' + ts + '.sh']) - if verbose: - log.info('fa command_run') - log.info(command_run) - result = subprocess.run(command_run, capture_output = True) - else: - log.info('not repeating fiberassignment') - log.info('adding fiberassignments to arrays') - OrigFAs.append(pf.open(FAOrigName)[1].data) - AltFAs.append(pf.open(FAAltName)[1].data) - AltFAs2.append(pf.open(FAAltName)[2].data) - TSs.append(ts) - fadates.append(fadate) - # ADM create the catalog of updated redshifts. - log.info('making zcats') - zcat = make_zcat(zcatdir, [t], obscon, survey) - # ADM insist that for an MTL loop with real observations, the zcat - # ADM must conform to the data model. In particular, it must include - # ADM ZTILEID, and other columns addes for the Main Survey. These - # ADM columns may not be needed for non-ledger simulations. - # ADM Note that the data model differs with survey type. - zcatdm = survey_data_model(zcatdatamodel, survey=survey) - if zcat.dtype.descr != zcatdm.dtype.descr: - msg = "zcat data model must be {} not {}!".format( - zcatdm.dtype.descr, zcat.dtype.descr) - log.critical(msg) - raise ValueError(msg) - # ADM useful to know how many targets were updated. - _, _, _, _, sky, _ = decode_targetid(zcat["TARGETID"]) - ntargs, nsky = np.sum(sky == 0), np.sum(sky) - msg = "Update state for {} targets".format(ntargs) - msg += " (the zcats also contain {} skies with +ve TARGETIDs)".format(nsky) - log.info(msg) - - A2RMap = {} - R2AMap = {} - log.info('beginning loop through FA files') - for ofa, afa, afa2, ts in zip (OrigFAs, AltFAs, AltFAs2, TSs): - log.info('ts = {0}'.format(ts)) - if changeFiberOpt is None: - #if debug: - # tempsortofa = np.sort(ofa, order = 'FIBER') - # tempsortafa = np.sort(afa, order = 'FIBER') - # - # - # tempsortofa = np.sort(ofa, order = 'TARGETID') - # tempsortafa = np.sort(afa, order = 'TARGETID') - - A2RMapTemp, R2AMapTemp = createFAmap(ofa, afa, changeFiberOpt = changeFiberOpt) - else: - raise NotImplementedError('changeFiberOpt has not yet been implemented') - - #FAOrigName = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz' - - A2RMapTemp, R2AMapTemp = createFAmap(ofa, afa, TargAlt = afa2, changeFiberOpt = changeFiberOpt) - - fbadirbase = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/' - if getosubp: - FAAltName = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/orig/fba-' + ts+ '.fits' - FAMapName = fbadirbase + '/orig/famap-' + ts + '.pickle' - fbadir = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/orig/' - else: - - FAAltName = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/fba-' + ts+ '.fits' - FAMapName = fbadirbase + '/famap-' + ts + '.pickle' - fbadir = fbadirbase - - - if debug: - log.info('ts = {0}'.format(ts)) - log.info('FAMapName = {0}'.format(FAMapName)) - #JL This line (updating rather than checking for already existing keys) only works if - #JL there are only single passes of tiles per loop iteration. This is the case, but may not - #JL always be true. - log.info('updating map dicts') - A2RMap.update(A2RMapTemp) - R2AMap.update(R2AMapTemp) - - if redoFA or (not (os.path.isfile(FAMapName))): - log.info('dumping out fiber map to pickle file') - with open(FAMapName, 'wb') as handle: - pickle.dump((A2RMapTemp, R2AMapTemp), handle, protocol=pickle.HIGHEST_PROTOCOL) - log.info('---') - log.info('---') - log.info('---') - log.info('---') - log.info('---') - log.info('unique keys in R2AMap = {0:d}'.format(np.unique(R2AMap.keys()).shape[0])) - log.info('---') - log.info('---') - log.info('---') - log.info('---') - log.info('---') - log.info('---') - log.info('---') - - altZCat = makeAlternateZCat(zcat, R2AMap, A2RMap) - - - - # ADM update the appropriate ledger. - if mock: - if targets is None: - raise ValueError('If processing mocks, you MUST specify a target file') - - update_ledger(althpdirname, altZCat, obscon=obscon.upper(), - numobs_from_ledger=numobs_from_ledger, targets = targets) - elif targets is None: - update_ledger(althpdirname, altZCat, obscon=obscon.upper(), - numobs_from_ledger=numobs_from_ledger) - else: - update_ledger(althpdirname, altZCat, obscon=obscon.upper(), - numobs_from_ledger=numobs_from_ledger, targets = targets) - if verbose or debug: - log.info('if main, should sleep 1 second') - thisUTCDate = get_utc_date(survey=survey) - if survey == "main": - sleep(1) - if verbose or debug: - log.info('has slept one second') - dateTiles["TIMESTAMP"] = thisUTCDate - if verbose or debug: - log.info('now writing to amtl_tile_tracker') - #io.write_mtl_tile_file(altmtltilefn,dateTiles) - #write_amtl_tile_tracker(altmtldir, dateTiles, thisUTCDate, obscon = obscon, survey = survey) - log.info('changes are being registered') - write_amtl_tile_tracker(altmtldir, [t], thisUTCDate, obscon = obscon, survey = survey) - if verbose or debug: - log.info('has written to amtl_tile_tracker') - if singleDate: - #return 1 - return althpdirname, altmtltilefn, ztilefn, tiles + return althpdirname, altmtltilefn, ztilefn, tiles def plotMTLProb(mtlBaseDir, ndirs = 10, hplist = None, obscon = 'dark', survey = 'sv3', outFileName = None, outFileType = '.png', jupyter = False, debug = False, verbose = False): @@ -1969,7 +2075,7 @@ def reprocess_alt_ledger(hpdirname, zcat, fbadirbase, tile, obscon="DARK"): return timedict -def write_amtl_tile_tracker(dirname, tiles, timestamp, obscon = 'dark', survey = 'main'): +def write_amtl_tile_tracker(dirname, tiles, timestamp, today, obscon = 'dark', survey = 'main', mode = 'fa'): """Write AMTL Processing times into TileTrackers Parameters @@ -1999,13 +2105,27 @@ def write_amtl_tile_tracker(dirname, tiles, timestamp, obscon = 'dark', survey = if os.path.isfile(TileTrackerFN): TileTracker = Table.read(TileTrackerFN, format = 'ascii.ecsv') + if mode.lower() == 'update': + dateKey = 'ALTARCHIVEDATE' + elif mode.lower() == 'fa': + dateKey = 'ALTFADATE' + elif mode.lower() == 'endofday': + TileTracker.meta['Today'] = today + TileTracker.write(TileTrackerFN, format = 'ascii.ecsv', overwrite = True) + return 'only wrote today in metadata' for t in tiles: tileid = t['TILEID'] reprocFlag = t['REPROCFLAG'] cond = (TileTracker['TILEID'] == tileid) & (TileTracker['REPROCFLAG'] == reprocFlag) log.info('for tile {0}, number of matching tiles = {1}'.format(tileid, np.sum(cond))) - debugTrap = np.copy(TileTracker['ALTARCHIVEDATE']) - TileTracker['ALTARCHIVEDATE'][cond] = timestamp + #debugTrap = np.copy(TileTracker[dateKey]) + TileTracker[dateKey][cond] = timestamp - assert(not (np.all(TileTracker['ALTARCHIVEDATE'] is None))) - TileTracker.write(TileTrackerFN, format = 'ascii.ecsv', overwrite = True) \ No newline at end of file + assert(not (np.all(TileTracker[dateKey] is None))) + + if mode == 'update': + todaysTiles = TileTracker[TileTracker['ORIGMTLDATE'] == today] + #if np.sum(todaysTiles['ALTARCHIVEDATE'] == None) == 0: + + TileTracker.write(TileTrackerFN, format = 'ascii.ecsv', overwrite = True) + return 'wrote more than just today in metadata' \ No newline at end of file diff --git a/py/LSS/SV3/fatools.py b/py/LSS/SV3/fatools.py index e87b06352..eefa6d4af 100644 --- a/py/LSS/SV3/fatools.py +++ b/py/LSS/SV3/fatools.py @@ -26,11 +26,15 @@ #hardcode target directories; these are fixed +#JL - These change once at the very beginning of observations. +#JL - Adding a format string to change versioning of photometry. skydir = '/global/cfs/cdirs/desi/target/catalogs/dr9/0.57.0/skies' -skydirMain = '/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/skies' +#skydirMain = '/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/skies' +skydirMain = '/global/cfs/cdirs/desi/target/catalogs/dr9/{0}/skies' tdir = '/global/cfs/cdirs/desi/target/catalogs/dr9/0.57.0/targets/sv3/resolve/' -tdirMain = '/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' +#tdirMain = '/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' +tdirMain = '/global/cfs/cdirs/desi/target/catalogs/dr9/{0}/targets/main/resolve/' # AR default REF_EPOCH for PMRA=PMDEC=REF_EPOCH=0 objects gaia_ref_epochs = {"dr2": 2015.5} @@ -267,7 +271,7 @@ def redo_fba_fromorig(tileid,outdir=None,faver=None, verbose = False,survey='mai fo.close() -def get_fba_fromnewmtl(tileid,mtldir=None,getosubp=False,outdir=None,faver=None, overwriteFA = False,newdir=None, verbose = False, mock = False, mtltime = None): +def get_fba_fromnewmtl(tileid,mtldir=None,getosubp=False,outdir=None,faver=None, overwriteFA = False,newdir=None, verbose = False, mock = False, targver = '1.1.1'): ts = str(tileid).zfill(6) #get info from origin fiberassign file fht = fitsio.read_header('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz') @@ -366,10 +370,9 @@ def get_fba_fromnewmtl(tileid,mtldir=None,getosubp=False,outdir=None,faver=None, gaiadr, fht['PMCORR'], tarfn, - tdirMain+prog, + tdirMain.format(targver)+prog, survey = 'main', - mock = mock, - mtltime=mtltime) + mock = mock) else: log.critical('invalid input directory. must contain either sv3, main, or holding') raise ValueError('indir must contain either sv3, main, or holding') @@ -558,10 +561,13 @@ def altcreate_mtl( try: log.info('shape of read_targets_in_tiles output') - log.info(d.shape) + log.info(d.shape) + ntargs = d.shape except: log.info('len of read_targets_in_tiles output post failure of shape') log.info(len(d)) + ntargs = len(d) + assert(ntargs) # AR mtl: removing by hand BACKUP_BRIGHT for sv3/BACKUP # AR mtl: using an indirect way to find if program=backup, # AR mtl: to avoid the need of an extra program argument From 5992bfa1575e982acb820149c69752e852369620 Mon Sep 17 00:00:00 2001 From: jalasker Date: Fri, 6 Oct 2023 08:58:57 -0700 Subject: [PATCH 003/297] First attempt at refactoring code based on `today` rather than a preordered list of tiles. Added more missing files to commit. --- bin/MockAltMTLScriptMain.sh | 303 +++++++++++++++++++++++++++++++++++ py/LSS/SV3/amtlplots.py | 165 +++++++++++++++++++ py/LSS/SV3/reprotests.py | 5 + py/LSS/cosmodesi_io_tools.py | 15 +- 4 files changed, 486 insertions(+), 2 deletions(-) create mode 100755 bin/MockAltMTLScriptMain.sh create mode 100644 py/LSS/SV3/reprotests.py diff --git a/bin/MockAltMTLScriptMain.sh b/bin/MockAltMTLScriptMain.sh new file mode 100755 index 000000000..3c366de46 --- /dev/null +++ b/bin/MockAltMTLScriptMain.sh @@ -0,0 +1,303 @@ +#!/bin/bash +start=`date +%s.%N` + +#simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written +simName='JL_MockDebugNoTargFile053123' + +#Location where you have cloned the LSS Repo +path2LSS=~/.local/desicode/LSS/bin/ + +# Flags for debug/verbose mode/profiling code time usage. +# Uncomment second set of options to turn on the modes +#debug='' +#verbose='' +#profile='' +debug='--debug' +verbose='--verbose' +profile='--profile' + +#Uncomment second option if running on mocks +#mock='' +mock='--mock' + +#ALTMTLHOME is a home directory for all of your alternate MTLs. Default is your scratch directory +#There will be an environment variable $ALTMTLHOME for the "survey alt MTLs" +#However, you should specify your own directory to a. not overwrite the survey alt MTLs +# and b. keep your alt MTLs somewhere that you have control/access + +#Uncomment the following line to set your own/nonscratch directory +#ALTMTLHOME=/path/to/your/directory/ + + +if [[ "${NERSC_HOST}" == "cori" ]]; then + CVal='haswell' + QVal='interactive' + ProcPerNode=32 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$CSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi +elif [[ "${NERSC_HOST}" == "perlmutter" ]]; then + srunConfig='-C cpu -q regular' + CVal='cpu' + QVal='interactive' + ProcPerNode=128 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$PSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi + +else + echo "This code is only supported on NERSC Cori and NERSC Perlmutter. Goodbye" + exit 1234 +fi + + + + +#Options for InitializeAltMTLs + +#Random seed. Change to any integer you want (or leave the same) +#If seed is different between two otherwise identical runs, the initial MTLs will also be different +#seed is also saved in output directory +seed=151 + +#Number of realizations to generate. Ideally a multiple of 64 for bitweights +#However, you can choose smaller numbers for debugging +ndir=2 + +#Uncomment second option if you want to clobber already existing files for Alt MTL generation +overwrite='' +#overwrite='--overwrite' + +#Observing conditions for generating MTLs (should be all caps "DARK" or "BRIGHT") +obscon='DARK' + +#Survey to generate MTLs for (should be lowercase "sv3" or "main", sv2, sv1, and cmx are untested and will likely fail) +survey='main' +#survey='sv3' +#StartDate options are default None (empty strings). Uncommenting the second options will set them to the SV3 start and end dates. +startDate='' +endDate='' +#startDate=20210406 +#endDate=20210625 + +#For rundate formatting in simName, either manually modify the string below +#to be the desired date or comment that line out and uncomment the +#following line to autogenerate date strings. +#To NOT use any date string specification, use the third line, an empty string +#datestring='071322' +#datestring=`date +%y%m%d` +datestring='' + +#Can save time in MTL generation by first writing files to local tmp directory and then copying over later +#uncommenting the second option will directly write to your output directory +usetmp='' +#usetmp='--dontUseTemp' + +if [ -z $usetmp ] +then + outputMTLDirBaseBase=`mktemp -d /dev/shm/"$USER"_tempdirXXXX` +else + outputMTLDirBaseBase=$ALTMTLHOME +fi +printf -v outputMTLDirBase "$outputMTLDirBaseBase/$simName/" $datestring $ndir $survey +printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $survey + +#List of healpixels to create Alt MTLs for +hpListFile="$path2LSS/MainSurveyHPList_mock.txt" +#hpListFile="$path2LSS/SV3HPList.txt" + +#These two options only are considered if the obscon is BRIGHT +#First option indicates whether to shuffle the top level priorities +#of BGS_FAINT/BGS_FAINT_HIP. Uncomment section option to turn off shuffling of bright time priorities +#Second option indicates what fraction/percent +#of BGS_FAINT to promote to BGS_FAINT_HIP. Default is 20%, same as SV3 + +shuffleBrightPriorities='--shuffleBrightPriorities' +#shuffleBrightPriorities='' + +PromoteFracBGSFaint=0.2 + +# location of original MTLs to shuffle. +# Default directory is a read only mount of the CFS filesystem +# You can only access that directory from compute nodes. +# Do NOT use the commented out directory (the normal mount of CFS) +# unless the read only mount is broken +#exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ +#exampleLedgerBase=/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ +#exampleLedgerBase=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ +exampleLedgerBase=$SCRATCH/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ +#Options for DateLoopAltMTL and runAltMTLParallel + +#Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). +#Default = Empty String/False. Uncomment second option if you want to restart from the first observations +echo "Fix QR resetting for new argparse usage" +qR='' +#qR='-qr' + +#Number of observation dates to loop through +#Defaults to 40 dates for SV3 +NObsDates=400 + +#Number of nodes to run on. This will launch up to 64*N jobs +#if that number of alternate universes have already been generated +#Calculated automatically from number of sims requested and number of processes per node. Be careful if setting manually +NNodes=$(( ($ndir + $ProcPerNode - 1 )/$ProcPerNode )) +#echo $NNodes +#getosubp: grab subpriorities from the original (exampleledgerbase) MTLs +#This should only be turned on for SV testing/debugging purposes +#This should not be required for main survey debugging. +getosubp='' +#getosubp='--getosubp' + +#shuffleSubpriorities(reproducing) must be left as empty strings to ensure +#subpriorities are shuffled. debug mode for main survey +#will only require these flags to be set by uncommenting second options + +#dontShuffleSubpriorities='' +#reproducing='' +dontShuffleSubpriorities='--dontShuffleSubpriorities' +reproducing='--reproducing' +#Include secondary targets? +secondary='' +#secondary='--secondary' + + +#If running from mocks, must set target directory. +#Otherwise this is optional +targfile='' +#targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory +#targfile='--targfile=/cscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA1.fits' #WITHOUT PHOTSYS +#targfile='--targfile=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' #WITH PHOTSYS + + +#Default is use numobs from ledger. Uncomment second option to set numobs NOT from ledger +numobs_from_ledger='' +#numobs_from_ledger='--NumObsNotFromLedger' + +#Uncomment second line to force redo fiber assignment if it has already been done. +redoFA='' +#redoFA='--redoFA' + + +#Options for MakeBitweightsParallel +#True/False(1/0) as to whether to split bitweight calculation +#among nodes by MPI between realizations +splitByReal=0 + +#Split the calculation of bitweights into splitByChunk +#chunks of healpixels. +splitByChunk=100 + +#Set to true if you want to clobber already existing bitweight files +overwrite2=1 + +#Actual running of scripts + +#Copy this script to output directory for reproducbility +thisFileName=$outputMTLFinalDestination/$0 + +echo $thisFileName + +if [ -f "$thisFileName" ] +then + echo "File is found. Checking to see it is identical to the original." + cmp $0 $thisFileName + comp=$? + if [[ $comp -eq 1 ]] + then + echo "Files are not identical." + echo "If this is intended, please delete or edit the original copied script at $thisFileName" + echo "If this is unintended, you can reuse the original copied script at that same location" + echo "goodbye" + exit 3141 + elif [[ $comp -eq 0 ]] + then + echo "files are same, continuing" + else + echo "Something has gone very wrong. Exit code for cmp was $a" + exit $a + fi +else + echo "Copied script is not found. Copying now, making directories as needed." + mkdir -p $outputMTLFinalDestination + cp $SLURM_SUBMIT_DIR $0 $outputMTLFinalDestination/$0 +fi + +if [ -d "$outputMTLFinalDestination" ] +then + echo "output final directory exists" + echo $outputMTLFinalDestination +else + echo "output final directory does not exist. Creating and copying script there" + mkdir -p $outputMTLFinalDestination + cp $0 $outputMTLFinalDestination +fi + +if [ -z $getosubp ] +then + touch $outputMTLFinalDestination/GetOSubpTrue +fi + +printf -v OFIM "%s/Initialize%sAltMTLsParallelOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $date + +echo "srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=120000 $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --promoteFracBGSFaint=$PromoteFracBGSFaint --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose $startDate $endDate >& $OFIM" +srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=120000 $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose $startDate $endDate >& $OFIM +if [ $? -ne 0 ]; then + exit 1234 + endInit=`date +%s.%N` + runtimeInit=$( echo "$endInit - $start" | bc -l ) + echo "runtime for initialization" + echo $runtimeInit +fi + +endInit=`date +%s.%N` +runtimeInit=$( echo "$endInit - $start" | bc -l ) +echo "runtime for initialization" +echo $runtimeInit + +printf -v OFDL "%s/dateLoop%sAltMTLOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring + +runtimeInit=$( echo "$endInit - $start" | bc -l ) +argstring="--altMTLBaseDir=$outputMTLFinalDestination --obscon=$obscon --survey=$survey --ProcPerNode=$ProcPerNode $numobs_from_ledger $redoFA $getosubp $debug $verbose $secondary $mock $targfile" +nohup bash $path2LSS/dateLoopAltMTL.sh $NObsDates $NNodes $path2LSS $CVal $QVal $qR $argstring >& $OFDL +exit 123454 + +endDL=`date +%s.%N` + +if [ $? -ne 0 ]; then + runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) + echo "runtime for Dateloop of $NObsDates days" + echo $runtimeDateLoop + exit 12345 +fi + +runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +echo "runtime for Dateloop of $NObsDates days" +echo $runtimeDateLoop + +if [ $splitByReal -ne 0 ]; then + printf -v OFBW "%s/MakeBitweights%sOutputCase1%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring + srun --nodes=1 -C $CVal -q $QVal -A desi -t 04:00:00 --mem=120000 $path2LSS/MakeBitweights.py $survey $obscon $ndir $splitByReal $splitByChunk $hpListFile $outputMTLFinalDestination $overwrite2 >& $OFBW +else + printf -v OFBW "%s/MakeBitweights%sOutputCase2%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring + srun --nodes=1 -C $CVal -q $QVal -A desi -t 04:00:00 --mem=120000 $path2LSS/MakeBitweights.py $survey $obscon $ndir $splitByReal $splitByChunk $hpListFile $outputMTLFinalDestination $overwrite2 >& $OFBW +fi + +endBW=`date +%s.%N` + + + +runtimeInit=$( echo "$endInit - $start" | bc -l ) +runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +runtimeBitweights=$( echo "$endBW - $endDL" | bc -l ) + +echo "runtime for initialization" +echo $runtimeInit +echo "runtime for Dateloop of $NObsDates days" +echo $runtimeDateLoop +echo "runtime for making bitweights" +echo $runtimeBitweights diff --git a/py/LSS/SV3/amtlplots.py b/py/LSS/SV3/amtlplots.py index e69de29bb..9df9f75d6 100644 --- a/py/LSS/SV3/amtlplots.py +++ b/py/LSS/SV3/amtlplots.py @@ -0,0 +1,165 @@ +import fitsio +from astropy.table import Table, join, vstack +import matplotlib.pyplot as plt +import numpy as np +import sys +import importlib.util +import scipy.stats as stats +import desitarget.io +import matplotlib as mpl +from desitarget.geomask import is_in_hp, nside2nside, pixarea2nside +from desimodel.footprint import is_point_in_desi, tiles2pix +from desitarget.mtl import add_to_iso_date + + +def tile2timestamp(MTLDir, TileID): + doneTilesFile = Table.read(MTLDir + 'mtl-done-tiles.ecsv') + cond = doneTilesFile['TILEID'] == TileID + return doneTilesFile[cond]['TIMESTAMP'] + +def warpCoordsAroundCenter(RAs, Decs, RACent = None): + if RACent is None: + print('WARNING: Taking RA Center to be mean of all RA Coordinates.') + RACent = np.mean(RAs) + + RACorr = RACent + (RACent - RAs)/np.cos(np.deg2rad(Decs)) + return RACorr, Decs + +def TB2Colors(mtl): + TB = mtl['DESI_TARGET'] + numobs = mtl['NUMOBS'] + ttypes = ['ELG', 'QSO', 'LRG'] + tbits = [2,4,1] + tcolors = ['g', 'b', 'r'] + ColorArray = np.zeros(TB.shape, dtype = str) + for ttype, tbit, tc in zip(ttypes, tbits, tcolors): + for tb, no, ii in zip(TB, numobs, range(TB.shape[0])): + isTtype = (tb & tbit) == tbit + if isTtype: + #print('assigning a color') + np.put(ColorArray, ii, tc) + + return ColorArray + + +def completenessPlot2D(BaseDir, HPList, obscon = 'dark', survey = 'main', isodate = None, spacing = 5, CompQty = 'PROB_OBS', + RALimits = (-180, +180), nbinsRA = 360, nbinsDec = 180, DecLimits = (-90, +90), radius = 2, RACent = None, DecCent = None, useBitweightFile = False, + figsize = (12,8), rosetteNum = 'Not Specified', datadir = '/global/cfs/cdirs/desi/public/edr/vac/edr/lss/v2.0/LSScats/full/', dataFN = 'all', verbose = True): + if not RACent is None: + assert(RALimits[0] < RACent) + assert(RALimits[1] > RACent) + + if not DecCent is None: + assert(DecLimits[0] < DecCent) + assert(DecLimits[1] > DecCent) + #MTL = desitarget.io.read_mtl_in_hp(BaseDir + 'Univ000/{0}/{1}/'.format(survey, obscon), 32, GoodHPList, unique=True, isodate=isodate, returnfn=False, initial=False, leq=False) + if (survey == 'sv3') and (BaseDir == '/global/cfs/cdirs/desi/public/edr/vac/edr/lss/v2.0/altmtl/'): + if useBitweightFile: + raise ValueError('Cannot use a separate bitweight file directly if using the EDR products.') + TIDsInHP = desitarget.io.read_targets_in_hp(BaseDir + 'Univ000/{0}/'.format(obscon), 32, HPList, columns=['TARGETID'], mtl = True) + elif useBitweightFile: + TIDsInHP = desitarget.io.read_targets_in_hp(BaseDir + 'Univ000/{0}/{1}/'.format(survey, obscon), 32, HPList, columns=['TARGETID'], mtl = True) + try: + BWFile = Table.read(BaseDir + '/BitweightFiles/{0}/{1}/{0}bw-{1}-allTiles.fits'.format(survey, obscon)) + except: + concatenateBWFiles(BaseDir, hplist, obscon = obscon, skipFailures=True, overwrite = True) + BWFile = Table.read(BaseDir + '/BitweightFiles/{0}/{1}/{0}bw-{1}-allTiles.fits'.format(survey, obscon)) + else: + TIDsInHP = desitarget.io.read_targets_in_hp(BaseDir + 'Univ000/{0}/{1}/'.format(survey, obscon), 32, HPList, columns=['TARGETID'], mtl = True) + + + TIDsInHP = Table([TIDsInHP['TARGETID']], names = ['TARGETID']) + if (obscon == 'dark') and (dataFN == 'all'): + dataFile = vstack((Table.read(datadir + 'LRG_full.dat.fits'), Table.read(datadir + 'ELGnotqso_full.dat.fits'), Table.read(datadir + 'QSO_full.dat.fits'))) + elif (obscon == 'bright') and (dataFN == 'all'): + dataFile = Table.read(datadir + 'BGS_ANY_full.dat.fits') + elif type(dataFN) == type('a'): + dataFile = Table.read(datadir + dataFN) + else: + dataFile = None + + if not (dataFile is None): + combo = join(TIDsInHP, dataFile, keys = ['TARGETID']) + else: + pass + + if useBitweightFile: + combo = join(combo, BWFile, keys = ['TARGETID']) + + if (survey == 'sv3'): + NRosettes, counts = np.unique(combo['ROSETTE_NUMBER'], return_counts = True) + if len(NRosettes) == 1: + ind = np.argmin(combo['ROSETTE_R']) + rc = combo['RA'][ind] + dc = combo['DEC'][ind] + RALimits = ((rc - radius/np.cos(np.radians(dc))), (rc + radius/np.cos(np.radians(dc)))) + DecLimits = ((dc - radius), (dc + radius)) + elif (len(NRosettes) > 1) & (len(NRosettes) < 5): + indRN = np.argmax(counts) + combo = combo[combo['ROSETTE_NUMBER'] == NRosettes[indRN]] + indRad = np.argmin(combo['ROSETTE_R']) + rc = combo['RA'][indRad] + dc = combo['DEC'][indRad] + RALimits = ((rc - radius/np.cos(np.radians(dc))), (rc + radius/np.cos(np.radians(dc)))) + DecLimits = ((dc - radius), (dc + radius)) + + + + + if RALimits is None: + binsRA = nbinsRA + else: + binsRA = np.linspace(RALimits[0], RALimits[-1], nbinsRA) + + if DecLimits is None: + binsDec = nbinsDec + else: + binsDec = np.linspace(DecLimits[0], DecLimits[-1], nbinsDec) + + if CompQty == 'diff': + hist2D, binsRA, binsDec, binnum = stats.binned_statistic_2d(combo['RA'], combo['DEC'], combo['PROB_OBS'] - combo['FRACZ_TILELOCID'], statistic = 'mean', bins = (binsRA, binsDec)) + #hist2DFZ, binsRA, binsDec, binnum = stats.binned_statistic_2d(combo['RA'], combo['DEC'], combo['FRACZ_TILELOCID'], statistic = 'mean', bins = (binsRA, binsDec)) + #hist2D = hist2DPO - hist2DFZ + else: + hist2D, binsRA, binsDec, binnum = stats.binned_statistic_2d(combo['RA'], combo['DEC'], combo[CompQty], statistic = 'mean', bins = (binsRA, binsDec)) + + plt.figure(figsize = figsize) + if dataFN == 'all': + plt.title('rosetteNum = {0}; all {1} tracers'.format(rosetteNum, obscon)) + else: + plt.title('rosetteNum = {0}; {1}'.format(rosetteNum, dataFN.split('.')[0])) + + + + + if CompQty == 'diff': + cmap = mpl.cm.RdBu + norm = mpl.colors.Normalize(vmin=-0.2, vmax=+0.2) + else: + cmap = mpl.cm.viridis + norm = mpl.colors.Normalize(vmin=0, vmax=1) + + mappable = mpl.cm.ScalarMappable(cmap = cmap, norm = norm) + + cb = plt.colorbar(mappable = mappable) + if CompQty == 'diff': + cb.set_label(label='PROB_OBS - FRACZ_TILELOCID',size = 20, labelpad = 10) + else: + cb.set_label(label='{0}'.format(CompQty),size = 20, labelpad = 10) + + + + plt.imshow(hist2D.T, origin = 'lower', cmap = cmap, norm = norm) + plt.xlabel('RA') + plt.ylabel('Dec') + plt.xticks(ticks = np.arange(nbinsRA)[0::spacing], labels = np.around(binsRA[0::spacing], decimals = 2)) + plt.yticks(ticks = np.arange(nbinsDec)[0::spacing], labels = np.around(binsDec[0::spacing], decimals = 2)) + plt.tight_layout() + if verbose: + print('RALimits = {0}'.format(RALimits)) + print('DecLimits = {0}'.format(DecLimits)) + print('nanminRA, nanmaxRA, nanminDec, nanmaxDec') + print(np.nanmin(combo['RA'])) + print(np.nanmax(combo['RA'])) + print(np.nanmin(combo['DEC'])) + print(np.nanmax(combo['DEC'])) \ No newline at end of file diff --git a/py/LSS/SV3/reprotests.py b/py/LSS/SV3/reprotests.py new file mode 100644 index 000000000..8bf6031d6 --- /dev/null +++ b/py/LSS/SV3/reprotests.py @@ -0,0 +1,5 @@ +import numpy as np +import matplotlib.pyplot as plt +import astropy.io.fits as pf +from astropy.table import Table, vstack, join + diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index f59c663d0..f4b69b52d 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -63,7 +63,9 @@ def get_zlims(tracer, tracer2=None, option=None): logger.warning('extended is no longer a meaningful option') #zlims = [0.8, 1.1, 1.6] if 'smallshells' in option: - zlims = [0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6] + zlims = [0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6] + if 'uchuu' in option.lower(): + zlims = [0.88, 1.00, 1.16, 1.34] if tracer.startswith('QSO'): zlims = [0.8, 1.1, 1.6, 2.1] @@ -144,16 +146,25 @@ def _format_bitweights(bitweights): def get_clustering_positions_weights(catalog, distance, zlim=(0., np.inf),maglim=None, weight_type='default', name='data', return_mask=False, option=None): + try: + logger.info(catalog.shape) + except: + try: + logger.info(len(catalog)) + except: + logger.info('catalog has neither shape nor len') if maglim is None: mask = (catalog['Z'] >= zlim[0]) & (catalog['Z'] < zlim[1]) if maglim is not None: mask = (catalog['Z'] >= zlim[0]) & (catalog['Z'] < zlim[1]) & (catalog['ABSMAG_R'] >= maglim[0]) & (catalog['ABSMAG_R'] < maglim[1]) - + logger.info('np.sum(mask) = {0:d}'.format(np.sum(mask))) if option: if 'elgzmask' in option: zmask = ((catalog['Z'] >= 1.49) & (catalog['Z'] < 1.52)) mask &= ~zmask + logger.info('np.sum(mask) = {0:d}'.format(np.sum(mask))) + logger.info('Using {:d} rows for {}.'.format(mask.sum(), name)) positions = [catalog['RA'][mask], catalog['DEC'][mask], distance(catalog['Z'][mask])] weights = np.ones_like(positions[0]) From e37d33ea6b6c660655467cd64873664ba117636a Mon Sep 17 00:00:00 2001 From: jalasker Date: Fri, 6 Oct 2023 09:09:07 -0700 Subject: [PATCH 004/297] added untracked files to enable git pull. --- bin/#LOCAL_SurveyAltMTLScript.sh# | 282 ++++++++ bin/Y1DataReproductionScript.sh | 316 ++++++++ scripts/mock_tools/Bad_fiber_lists.py | 163 +++++ .../LRG+BGS_bad_fiber_list3sigfid.py | 176 +++++ scripts/mock_tools/comp_zstats_specrels.py | 272 +++++++ .../compare_snapshot_dir_with_live.py | 22 + scripts/mock_tools/getLRGmask.py | 186 +++++ scripts/mock_tools/getLRGmask_tar.py | 161 +++++ scripts/mock_tools/get_speccon.py | 211 ++++++ scripts/mock_tools/getmask_type.py | 195 +++++ scripts/mock_tools/lss_cat_match_dr16.py | 102 +++ scripts/mock_tools/mkBGS_flavors.py | 127 ++++ scripts/mock_tools/mkBGS_flavors_kEE.py | 125 ++++ scripts/mock_tools/mkCat_tar4ang.py | 111 +++ scripts/mock_tools/mkemlin.py | 100 +++ scripts/mock_tools/mknzplots.py | 66 ++ scripts/mock_tools/perfiber_success_stats.py | 175 +++++ scripts/mock_tools/pkrun.py | 278 ++++++++ scripts/mock_tools/qso_cat_match_dr16q.py | 113 +++ scripts/mock_tools/readwrite_pixel_bitmask.py | 145 ++++ scripts/mock_tools/recon.py | 193 +++++ scripts/mock_tools/summary_numbers.py | 51 ++ scripts/mock_tools/xiruncz.py | 193 +++++ scripts/mock_tools/xirunpc.py | 672 ++++++++++++++++++ scripts/xirunpc.py | 2 + 25 files changed, 4437 insertions(+) create mode 100755 bin/#LOCAL_SurveyAltMTLScript.sh# create mode 100755 bin/Y1DataReproductionScript.sh create mode 100644 scripts/mock_tools/Bad_fiber_lists.py create mode 100644 scripts/mock_tools/LRG+BGS_bad_fiber_list3sigfid.py create mode 100644 scripts/mock_tools/comp_zstats_specrels.py create mode 100644 scripts/mock_tools/compare_snapshot_dir_with_live.py create mode 100644 scripts/mock_tools/getLRGmask.py create mode 100644 scripts/mock_tools/getLRGmask_tar.py create mode 100644 scripts/mock_tools/get_speccon.py create mode 100644 scripts/mock_tools/getmask_type.py create mode 100755 scripts/mock_tools/lss_cat_match_dr16.py create mode 100644 scripts/mock_tools/mkBGS_flavors.py create mode 100644 scripts/mock_tools/mkBGS_flavors_kEE.py create mode 100644 scripts/mock_tools/mkCat_tar4ang.py create mode 100644 scripts/mock_tools/mkemlin.py create mode 100644 scripts/mock_tools/mknzplots.py create mode 100644 scripts/mock_tools/perfiber_success_stats.py create mode 100644 scripts/mock_tools/pkrun.py create mode 100644 scripts/mock_tools/qso_cat_match_dr16q.py create mode 100644 scripts/mock_tools/readwrite_pixel_bitmask.py create mode 100644 scripts/mock_tools/recon.py create mode 100644 scripts/mock_tools/summary_numbers.py create mode 100644 scripts/mock_tools/xiruncz.py create mode 100644 scripts/mock_tools/xirunpc.py diff --git a/bin/#LOCAL_SurveyAltMTLScript.sh# b/bin/#LOCAL_SurveyAltMTLScript.sh# new file mode 100755 index 000000000..09c21e677 --- /dev/null +++ b/bin/#LOCAL_SurveyAltMTLScript.sh# @@ -0,0 +1,282 @@ +#!/bin/bash +start=`date +%s.%N` +#All Boolean True/False parameters are 0 for False or 1 for True +#So python interprets them correctly + +#Location where you have cloned the LSS Repo +path2LSS=~/.local/desicode/LSS/bin/ + +#Flags for debug/verbose mode/profiling code time usage +debug=1 +verbose=1 +profile=0 + +#Observing conditions for generating MTLs (should be all caps "DARK" or "BRIGHT") +obscon='DARK' + +#Survey to generate MTLs for (should be lowercase "sv3" or "main", sv2, sv1, and cmx are untested and will likely fail) +#survey='sv3' +survey='main' + +#List of healpixels to create Alt MTLs for +#hpListFile="$path2LSS/SV3HPList.txt" +hpListFile="$path2LSS/MainSurveyHPList.txt" + +#simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written +simName=AltMTLReproDebug_"$survey" + +#Number of realizations to generate. Ideally a multiple of 64 +#However, you can choose smaller numbers for debugging + + + ndir=2 +#Number of observation dates to loop through +#Defaults to 40 dates for SV3 +NObsDates=40 +#Number of nodes to run on. This will launch up to 64*N jobs +#if that number of alternate universes have already been generated +#Defaults to 4 for 128 directories +NNodes=1 + +#ALTMTLHOME is a home directory for all of your alternate MTLs. Default is your scratch directory +#There will be an environment variable $ALTMTLHOME for the "survey alt MTLs" +#However, you should specify your own directory to a. not overwrite the survey alt MTLs +# and b. keep your alt MTLs somewhere that you have control/access + +#Uncomment the following line to set your own/nonscratch directory +#ALTMTLHOME=/path/to/your/directory/ + + +if [[ "${NERSC_HOST}" == "cori" ]]; then + CVal='haswell' + QVal='interactive' + ProcPerNode=32 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$CSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi +elif [[ "${NERSC_HOST}" == "perlmutter" ]]; then + #srunConfig='-C cpu -q regular' + CVal='cpu' + QVal='interactive' + ProcPerNode=128 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$PSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi + +else + echo "This code is only supported on NERSC Cori and NERSC Perlmutter. Goodbye" + exit 1234 +fi + + + + +#Options for InitializeAltMTLs + +#Random seed. Change to any integer you want (or leave the same) +#If seed is different between two otherwise identical runs, the initial MTLs will also be different +#seed is also saved in output directory +seed=31415 + + + +#Set to true(1) if you want to clobber already existing files for Alt MTL generation +overwrite=0 + + + +#For rundate formatting in simName, either manually modify the string below +#to be the desired date or comment that line out and uncomment the +#following line to autogenerate date strings. +#To NOT use any date string specification, use the third line, an empty string +#datestring='071322' +datestring=`date +%y%m%d` +#datestring='' + +#Can save time in MTL generation by first writing files to local tmp directory and then copying over later +#usetmp=True will use the local tmp directory and usetmp=False will directly write to your output directory +usetmp=True + +if [ $usetmp ] +then + outputMTLDirBaseBase=`mktemp -d /dev/shm/"$USER"_tempdirXXXX` +else + outputMTLDirBaseBase=$ALTMTLHOME +fi +printf -v outputMTLDirBase "$outputMTLDirBaseBase/$simName/" $datestring $ndir $survey +printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $survey + + + +#These two options only are considered if the obscon is BRIGHT +#First option indicates whether to shuffle the top level priorities +#of BGS_FAINT/BGS_FAINT_HIP. Second option indicates what fraction/percent +#of BGS_FAINT to promote to BGS_FAINT_HIP. Default is 20%, same as SV3 +shuffleBrightPriorities=0 +PromoteFracBGSFaint=0.2 + +# location of original MTLs to shuffle. +# Default directory is a read only mount of the CFS filesystem +# You can only access that directory from compute nodes. +# Do NOT use the commented out directory (the normal mount of CFS) +# unless the read only mount is broken +#exampleledgerbase=/dvs_ro/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ +exampleledgerbase=/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ + +#Options for DateLoopAltMTL and runAltMTLParallel + +#Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). +#Default = 0/False. Set equal to 1 if you want to restart from the first observations +qR=0 + + +#getosubp: grab subpriorities from the original (exampleledgerbase) MTLs +#This should only be turned on for SV testing/debugging purposes +#This should not be required for main survey debugging. +getosubp=0 + +#shuffleSubpriorities(reproducing) must be set to 1(0) to ensure +#subpriorities are shuffled. debug mode for main survey +#will only require these flags to be set to 0(1) and not the getosubp flag +shuffleSubpriorities=0 +reproducing=1 + +#Include secondary targets? +secondary=0 + +numobs_from_ledger=1 + +#Force redo fiber assignment if it has already been done. +redoFA=0 + + +#Options for MakeBitweightsParallel +#True/False(1/0) as to whether to split bitweight calculation +#among nodes by MPI between realizations +splitByReal=0 + +#Split the calculation of bitweights into splitByChunk +#chunks of healpixels. +splitByChunk=100 + +#Set to true if you want to clobber already existing bitweight files +overwrite2=1 + +#Actual running of scripts + +#Copy this script to output directory for reproducbility +thisFileName=$outputMTLDirBase/$0 + +echo $thisFileName + +if [ -f "$thisFileName" ] +then + echo "File is found. Checking to see it is identical to the original." + cmp $0 $thisFileName + comp=$? + if [[ $comp -eq 1 ]] + then + echo "Files are not identical." + echo "If this is intended, please delete or edit the original copied script at $thisFileName" + echo "If this is unintended, you can reuse the original copied script at that same location" + echo "goodbye" + exit 3141 + elif [[ $comp -eq 0 ]] + then + echo "files are same, continuing" + else + echo "Something has gone very wrong. Exit code for cmp was $a" + exit $a + fi +else + echo "Copied script is not found. Copying now, making directories as needed." + mkdir -p $outputMTLDirBase +fi + +if [ -d "$outputMTLFinalDestination" ] +then + echo "output final directory exists" +else + echo "output final directory does not exist. Creating and copying script there" + mkdir -p $outputMTLFinalDestination + cp $0 $outputMTLFinalDestination +fi + +if [ $getosubp -gt 0 ] +then + touch $outputMTLFinalDestination/GetOSubpTrue +fi + +echo 'moving on to python scripts (REMOVE BEFORE PUSHING)' +printf -v OFIM "%s/Initialize%sAltMTLsParallelOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $date + +srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=120000 $path2LSS/InitializeAltMTLsParallel.py $seed $ndir $overwrite $obscon $survey $outputMTLDirBase $hpListFile $shuffleBrightPriorities $PromoteFracBGSFaint $exampleledgerbase $NNodes $usetmp "$outputMTLFinalDestination/Univ{0:03d}" $shuffleSubpriorities $reproducing $debug $verbose $ProcPerNode >& $OFIM +if [ $? -ne 0 ]; then + exit 1234 + endInit=`date +%s.%N` + runtimeInit=$( echo "$endInit - $start" | bc -l ) + echo "runtime for initialization" + echo $runtimeInit +fi + +endInit=`date +%s.%N` +runtimeInit=$( echo "$endInit - $start" | bc -l ) +echo "runtime for initialization" +echo $runtimeInit + +printf -v OFDL "%s/dateLoop%sAltMTLOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring + +runtimeInit=$( echo "$endInit - $start" | bc -l ) + +nohup bash $path2LSS/dateLoopAltMTL.sh $qR $NObsDates $NNodes $outputMTLFinalDestination $secondary $obscon $survey $numobs_from_ledger $redoFA $getosubp $path2LSS $CVal $QVal $debug $verbose $ProcPerNode >& $OFDL + +endDL=`date +%s.%N` + +if [ $? -ne 0 ]; then + runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) + echo "runtime for Dateloop of $NObsDates days" + echo $runtimeDateLoop + exit 12345 +fi + +runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +echo "runtime for Dateloop of $NObsDates days" +echo $runtimeDateLoop + +if [ $splitByReal -ne 0 ]; then + printf -v OFBW "%s/MakeBitweights%sOutputCase1%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring + ##echo "skipping bitweights case 1" + srun --nodes=1 -C $CVal -q $QVal -A desi -t 04:00:00 --mem=120000 $path2LSS/MakeBitweights.py $survey $obscon $ndir $splitByReal $splitByChunk $hpListFile $outputMTLFinalDestination $overwrite2 >& $OFBW +else + printf -v OFBW "%s/MakeBitweights%sOutputCase2%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring + ##echo "skipping bitweights case 2" + srun --nodes=1 -C $CVal -q $QVal -A desi -t 04:00:00 --mem=120000 $path2LSS/MakeBitweights.py $survey $obscon $ndir $splitByReal $splitByChunk $hpListFile $outputMTLFinalDestination $overwrite2 >& $OFBW +fi + +endBW=`date +%s.%N` + + + +runtimeInit=$( echo "$endInit - $start" | bc -l ) +runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +runtimeBitweights=$( echo "$endBW - $endDL" | bc -l ) + +echo "runtime for initialization" +echo $runtimeInit +echo "runtime for Dateloop of $NObsDates days" +echo $runtimeDateLoop +echo "runtime for making bitweights" +echo $runtimeBitweights + + + +echo "runtime for initialization \\n\\ + $runtimeInit \\n\\ + runtime for Dateloop of $NObsDates days \\n\\ + $runtimeDateLoop \\n\\ + runtime for making bitweights \\n\\ + $runtimeBitweights" > $outputMTLFinalDestination/TimingSummary_$datestring.txt \ No newline at end of file diff --git a/bin/Y1DataReproductionScript.sh b/bin/Y1DataReproductionScript.sh new file mode 100755 index 000000000..13360fdf7 --- /dev/null +++ b/bin/Y1DataReproductionScript.sh @@ -0,0 +1,316 @@ +#!/bin/bash +start=`date +%s.%N` + +#simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written +simName=Y1Reproduction +#Location where you have cloned the LSS Repo +path2LSS=~/.local/desicode/LSS/bin/ + +# Flags for debug/verbose mode/profiling code time usage. +# Uncomment second set of options to turn on the modes +debug='' +verbose='' +profile='' +#debug='--debug' +#verbose='--verbose' +#profile='--profile' + +#Uncomment second option if running on mocks +mock='' +#mock='--mock' + +#ALTMTLHOME is a home directory for all of your alternate MTLs. Default is your scratch directory +#There will be an environment variable $ALTMTLHOME for the "survey alt MTLs" +#However, you should specify your own directory to a. not overwrite the survey alt MTLs +# and b. keep your alt MTLs somewhere that you have control/access + +#Uncomment the following line to set your own/nonscratch directory +#ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ + + +if [[ "${NERSC_HOST}" == "cori" ]]; then + CVal='haswell' + QVal='interactive' + ProcPerNode=32 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$CSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi +elif [[ "${NERSC_HOST}" == "perlmutter" ]]; then + srunConfig='-C cpu -q regular' + CVal='cpu' + QVal='interactive' + ProcPerNode=128 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$PSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi + +else + echo "This code is only supported on NERSC Cori and NERSC Perlmutter. Goodbye" + exit 1234 +fi + + + + +#Options for InitializeAltMTLs + +#Random seed. Change to any integer you want (or leave the same) +#If seed is different between two otherwise identical runs, the initial MTLs will also be different +#seed is also saved in output directory +seed=8935781 + +#Number of realizations to generate. Ideally a multiple of 64 for bitweights +#However, you can choose smaller numbers for debugging +ndir=2 + +#Uncomment second option if you want to clobber already existing files for Alt MTL generation +overwrite='' +#overwrite='--overwrite' + +#Observing conditions for generating MTLs (should be all caps "DARK" or "BRIGHT") +obscon='DARK' +#obscon='BRIGHT' + +#Survey to generate MTLs for (should be lowercase "sv3" or "main", sv2, sv1, and cmx are untested and will likely fail) +#survey='sv3' +survey='main' +# options are default None (empty strings). Uncommenting the second options will set them to the Y1 start and end dates. +#startDate='' +#endDate='' +startDate='' +endDate=20220624 + +#For rundate formatting in simName, either manually modify the string below +#to be the desired date or comment that line out and uncomment the +#following line to autogenerate date strings. +#To NOT use any date string specification, use the third line, an empty string +#datestring='071322' +#datestring=`date +%y%m%d` +datestring='' + +#Can save time in MTL generation by first writing files to local tmp directory and then copying over later +#uncommenting the second option will directly write to your output directory +usetmp='' +#usetmp='--dontUseTemp' + +if [ -z $usetmp ] +then + outputMTLDirBaseBase=`mktemp -d /dev/shm/"$USER"_tempdirXXXX` +else + outputMTLDirBaseBase=$ALTMTLHOME +fi +printf -v outputMTLDirBase "$outputMTLDirBaseBase/$simName/" $datestring $ndir $survey +printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $survey + +#List of healpixels to create Alt MTLs for +#hpListFile="$path2LSS/MainSurveyHPList_mock.txt" +hpListFile="$path2LSS/MainSurveyHPList.txt" +#hpListFile="$path2LSS/DebugMainHPList.txt" +#hpListFile="$path2LSS/SV3HPList.txt" + +#These two options only are considered if the obscon is BRIGHT +#First option indicates whether to shuffle the top level priorities +#of BGS_FAINT/BGS_FAINT_HIP. Uncomment section option to turn off shuffling of bright time priorities +#Second option indicates what fraction/percent +#of BGS_FAINT to promote to BGS_FAINT_HIP. Default is 20%, same as SV3 + +#shuffleBrightPriorities='--shuffleBrightPriorities' +shuffleBrightPriorities='' + + +shuffleELGPriorities='' +#shuffleELGPriorities='--shuffleELGPriorities' + +#PromoteFracBGSFaint=0.2 +PromoteFracBGSFaint=0.2 +#PromoteFracELG=0.1 +PromoteFracELG=0.1 + +# location of original MTLs to shuffle. +# Default directory is a read only mount of the CFS filesystem +# You can only access that directory from compute nodes. +# Do NOT use the commented out directory (the normal mount of CFS) +# unless the read only mount is broken +exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ +#exampleLedgerBase=/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ +#exampleLedgerBase=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ +#exampleLedgerBase=$SCRATCH/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ +#Options for DateLoopAltMTL and runAltMTLParallel + +#Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). +#Default = Empty String/False. Uncomment second option if you want to restart from the first observations +#PLEASE DO NOT CHANGEME +echo "Fix QR resetting for new argparse usage" +qR='' +#qR='-qr' + +#Number of observation dates to loop through +#Defaults to 40 dates for SV3 +NObsDates=500 + +# Whether to submit a new job with dateLoopAltMTL for each date +# or to submit a single job +# multiDate=0 +#multiDate='--multiDate' +#echo 'setting QVal here for debug. Fix later.' +#QVal='debug' +#QVal='regular' +#Number of nodes to run on. This will launch up to 64*N jobs +#if that number of alternate universes have already been generated +#Calculated automatically from number of sims requested and number of processes per node. Be careful if setting manually +NNodes=$(( ($ndir + $ProcPerNode - 1 )/$ProcPerNode )) +#echo $NNodes +#getosubp: grab subpriorities from the original (exampleledgerbase) MTLs +#This should only be turned on for SV testing/debugging purposes +#This should not be required for main survey debugging. +getosubp='' +#getosubp='--getosubp' + +#shuffleSubpriorities(reproducing) must be left as empty strings to ensure +#subpriorities are shuffled. debug mode for main survey +#will only require these flags to be set by uncommenting second options + +#dontShuffleSubpriorities='' +#reproducing='' +dontShuffleSubpriorities='--dontShuffleSubpriorities' +reproducing='--reproducing' +#Include secondary targets? +secondary='' +#secondary='--secondary' + + +#If running from mocks, must set target directory. +#Otherwise this is optional +targfile='' #CHANGEME IF RUNNING ON MOCKS +#targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory +#targfile='--targfile=/cscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA1.fits' +#targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' + + +#Default is use numobs from ledger. Uncomment second option to set numobs NOT from ledger +numobs_from_ledger='' +#numobs_from_ledger='--NumObsNotFromLedger' + +#Uncomment second line to force redo fiber assignment if it has already been done. +redoFA='' +#redoFA='--redoFA' + + +#Options for MakeBitweightsParallel +#True/False(1/0) as to whether to split bitweight calculation +#among nodes by MPI between realizations +#splitByReal=1 + +#Split the calculation of bitweights into splitByChunk +#chunks of healpixels. +#splitByChunk=1 + +#Set to true (1) if you want to clobber already existing bitweight files +overwrite2='' +#overwrite2='--overwrite' +#Actual running of scripts + +#Copy this script to output directory for reproducbility +thisFileName=$outputMTLFinalDestination/$0 + +echo $thisFileName + +if [ -f "$thisFileName" ] +then + echo "File is found. Checking to see it is identical to the original." + cmp $0 $thisFileName + comp=$? + if [[ $comp -eq 1 ]] + then + echo "Files are not identical." + echo "If this is intended, please delete or edit the original copied script at $thisFileName" + echo "If this is unintended, you can reuse the original copied script at that same location" + echo "goodbye" + exit 3141 + elif [[ $comp -eq 0 ]] + then + echo "files are same, continuing" + else + echo "Something has gone very wrong. Exit code for cmp was $a" + exit $a + fi +else + echo "Copied script is not found. Copying now, making directories as needed." + mkdir -p $outputMTLFinalDestination + cp $SLURM_SUBMIT_DIR $0 $outputMTLFinalDestination/$0 +fi + +if [ -d "$outputMTLFinalDestination" ] +then + echo "output final directory exists" + echo $outputMTLFinalDestination +else + echo "output final directory does not exist. Creating and copying script there" + mkdir -p $outputMTLFinalDestination + cp $0 $outputMTLFinalDestination +fi + +if [ -z $getosubp ] +then + touch $outputMTLFinalDestination/GetOSubpTrue +fi + +printf -v OFIM "%s/Initialize%sAltMTLsParallelOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $date + +echo "srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclusive $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM" +srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclusive $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM +if [ $? -ne 0 ]; then + exit 1234 + endInit=`date +%s.%N` + runtimeInit=$( echo "$endInit - $start" | bc -l ) + echo "runtime for initialization" + echo $runtimeInit +fi + +endInit=`date +%s.%N` +runtimeInit=$( echo "$endInit - $start" | bc -l ) +echo "runtime for initialization" +echo $runtimeInit + +printf -v OFDL "%s/dateLoop%sAltMTLOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring + +runtimeInit=$( echo "$endInit - $start" | bc -l ) +argstring="--altMTLBaseDir=$outputMTLFinalDestination --obscon=$obscon --survey=$survey --ProcPerNode=$ProcPerNode $numobs_from_ledger $redoFA $getosubp $debug $verbose $secondary $mock $targfile $multiDate" +nohup bash $path2LSS/dateLoopAltMTL.sh $NObsDates $NNodes $path2LSS $CVal $QVal $qR $argstring >& $OFDL + +endDL=`date +%s.%N` + +if [ $? -ne 0 ]; then + runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) + echo "runtime for Dateloop of $NObsDates days" + echo $runtimeDateLoop + exit 12345 +fi +runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +echo "runtime for Dateloop of $NObsDates days" +echo $runtimeDateLoop +exit 54321 + + + +printf -v OFBW "%s/MakeBitweights%sOutput%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring +srun --nodes=1 -C $CVal -q $QVal -A desi -t 04:00:00 --mem=120000 $path2LSS/MakeBitweights.py --survey=$survey --obscon=$obscon --ndir=$ndir --ProcPerNode=$ProcPerNode --HPListFile=$hpListFile --outdir=$outputMTLFinalDestination $overwrite2 $verbose $debug >& $OFBW + +endBW=`date +%s.%N` + + + +runtimeInit=$( echo "$endInit - $start" | bc -l ) +runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +runtimeBitweights=$( echo "$endBW - $endDL" | bc -l ) + +echo "runtime for initialization" +echo $runtimeInit +echo "runtime for Dateloop of $NObsDates days" +echo $runtimeDateLoop +echo "runtime for making bitweights" +echo $runtimeBitweights diff --git a/scripts/mock_tools/Bad_fiber_lists.py b/scripts/mock_tools/Bad_fiber_lists.py new file mode 100644 index 000000000..e3f9a9606 --- /dev/null +++ b/scripts/mock_tools/Bad_fiber_lists.py @@ -0,0 +1,163 @@ +import numpy as np +from scipy import stats +from scipy.stats import norm +import fitsio +import glob +import os +import matplotlib.pyplot as plt +import statistics +import argparse +import astropy +from astropy.table import Table,join +from astropy.time import Time +from astropy.io import fits + +import LSS.common_tools as common + +parser = argparse.ArgumentParser() +#parser.add_argument("--type", help="tracer type to be selected") +basedir='/global/cfs/cdirs/desi/survey/catalogs' +parser.add_argument("--basedir", help="base directory for input/output",default=basedir) +#parser.add_argument("--version", help="catalog version; use 'test' unless you know what you are doing!",default='test') +parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='DA02') +parser.add_argument("--verspec",help="version for redshifts",default='guadalupe') + +args = parser.parse_args() +basedir = args.basedir +#version = args.version +survey = args.survey +specver = args.verspec + + + +f=basedir+'/'+survey+'/LSS/'+specver+'/ELG_zsuccess.txt' +f1=basedir+'/'+survey+'/LSS/'+specver+'/QSO_zsuccess.txt' +f2=basedir+'/'+survey+'/LSS/'+specver+'/LRG_zsuccess.txt' +f3=basedir+'/'+survey+'/LSS/'+specver+'/BGS_ANY_zsuccess.txt' + + + +ELG=Table() +ELG['FIBER']=np.loadtxt(f)[:,0] +ELG['frac_suc']=np.loadtxt(f)[:,1] +ELG['n_suc']=np.loadtxt(f)[:,2] +ELG['n_tot']=np.loadtxt(f)[:,3] + +QSO=Table() +QSO['FIBER']=np.loadtxt(f1)[:,0] +QSO['frac_suc']=np.loadtxt(f1)[:,1] +QSO['n_suc']=np.loadtxt(f1)[:,2] +QSO['n_tot']=np.loadtxt(f1)[:,3] + +LRG=Table() +LRG['FIBER']=np.loadtxt(f2)[:,0] +LRG['frac_suc']=np.loadtxt(f2)[:,1] +LRG['n_suc']=np.loadtxt(f2)[:,2] +LRG['n_tot']=np.loadtxt(f2)[:,3] + + +BGS=Table() +BGS['FIBER']=np.loadtxt(f3)[:,0] +BGS['frac_suc']=np.loadtxt(f3)[:,1] +BGS['n_suc']=np.loadtxt(f3)[:,2] +BGS['n_tot']=np.loadtxt(f3)[:,3] + + +def fse(fiberstats): + #masknosuc= fiberstats['n_suc']>0 + #print(fiberstats[~masknosuc]) + mask1ntots = fiberstats['n_tot']>1 + fiberstats = fiberstats[mask1ntots] + fiberstats['frac_suc'] = fiberstats['n_suc']/fiberstats['n_tot'] + mean = np.sum(fiberstats['n_suc'])/np.sum(fiberstats['n_tot']) + + + error_floor = True + + n, p = fiberstats['n_tot'].copy(), fiberstats['frac_suc'].copy() + if error_floor: + p1 = np.maximum(1-p, 1/n) # error floor + else: + p1 = p + fiberstats['frac_suc_err'] = np.clip(np.sqrt(n * p * (1-p))/n, np.sqrt(n * p1 * (1-p1))/n, 1) + + fiberstats['check'] =(mean - fiberstats['frac_suc'])/fiberstats['frac_suc_err'] + fiberstats.sort('frac_suc') + + + + + from scipy.stats import binom + + bad_stats=Table() + bad_stats["FIBER"]=fiberstats["FIBER"] + bad_stats["n_tot"],bad_stats["n_suc"]=fiberstats['n_tot'], fiberstats['n_suc'] + bad_stats['n_fail'] = bad_stats['n_tot']-bad_stats['n_suc'] + bad_stats['frac_suc']= bad_stats['n_suc']/bad_stats['n_tot'] + bad_stats['frac_suc_err']=fiberstats['frac_suc_err'] + bad_stats['more_fail_p']=np.zeros(len(bad_stats)) + bad_stats['check']=fiberstats["check"] + for fiber in fiberstats['FIBER']: + n = fiberstats['n_tot'][fiberstats['FIBER']==fiber] + s = fiberstats['n_suc'][fiberstats['FIBER']==fiber] + p = mean + bad_stats["more_fail_p"][fiberstats['FIBER']==fiber]= binom.cdf(s-1, n, p) + + + nsigma=float(input("Enter the req sigma value\n")) + #mcheck=fiberstats['check']>3 + mfail=bad_stats['more_fail_p']1 + fstats_comb=fstats_comb[mask0] + fstats_comb['frac_suc']=fstats_comb['n_suc']/fstats_comb['n_tot'] + return(fstats_comb) + +LRGBGS=combine(LRG,BGS) + + + + +choice=1 +while(choice==1): + print("\nEnter number\n1 ELG\n2 LRG\n3 QSO\n4 BGS\n5 LRGBGS\n") + t_t=int(input()) + if(t_t==1): + bad_t,sig_t =fse(ELG) + name="ELG" + elif(t_t==2): + bad_t,sig_t =fse(LRG) + name="LRG" + elif(t_t==3): + bad_t,sig_t =fse(QSO) + name="QSO" + elif(t_t==4): + bad_t,sig_t =fse(BGS) + name="BGS" + elif(t_t==5): + bad_t,sig_t =fse(LRGBGS) + name="LRGBGS" + + fn = "/global/homes/s/sidpen90/desicode/LSS/badfibfail/"+name+"_bad_fibers"+str(sig_t)+"_sigma.txt" + np.savetxt(fn,bad_t['FIBER'],fmt='%i') + print('saved results to '+fn) + choice=int(input("\nPress 1 to run again or any other key to exit\n")) diff --git a/scripts/mock_tools/LRG+BGS_bad_fiber_list3sigfid.py b/scripts/mock_tools/LRG+BGS_bad_fiber_list3sigfid.py new file mode 100644 index 000000000..67e1d335f --- /dev/null +++ b/scripts/mock_tools/LRG+BGS_bad_fiber_list3sigfid.py @@ -0,0 +1,176 @@ +import numpy as np +#!pip install astropy +#!pip install fitsio +from scipy import stats +from scipy.stats import norm +import fitsio +import glob +import os +import matplotlib.pyplot as plt +import statistics +import argparse +import astropy +from astropy.table import Table,join +from astropy.time import Time +from astropy.io import fits + +import LSS.common_tools as common + +parser = argparse.ArgumentParser() +#parser.add_argument("--type", help="tracer type to be selected") +basedir='/global/cfs/cdirs/desi/survey/catalogs' +parser.add_argument("--basedir", help="base directory for input/output",default=basedir) +parser.add_argument("--version", help="catalog version; use 'test' unless you know what you are doing!",default='test') +parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='DA02') +parser.add_argument("--verspec",help="version for redshifts",default='guadalupe') + +args = parser.parse_args() +basedir = args.basedir +version = args.version +survey = args.survey +specver = args.verspec + +#filepathLF = basedir+'/'+survey+'/LSS/'+specver+'/LSScats/'+version+'/LRG_full.dat.fits' +#filepathBGS = basedir+'/'+survey+'/LSS/'+specver+'/LSScats/'+version+'/BGS_ANY_full.dat.fits' + + + +#ff = fitsio.read(filepathLF) +#hdul = fits.open(filepathLF) +#ff2 = fitsio.read(filepathBGS) +#hdul = fits.open(filepathBGS) + +if survey != 'SV3': + zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_dark_tarspecwdup_zdone.fits' + dz = Table(fitsio.read(zf)) + desitarg = 'DESI_TARGET' + bit = 1 #for selecting LRG + wtype = ((dz[desitarg] & bit) > 0) + print(len(dz[wtype])) + #dz = dz[wtype&wg] + dz = dz[wtype] + + ff = common.cut_specdat(dz) + + zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_bright_tarspecwdup_zdone.fits' + dz = Table(fitsio.read(zf)) + desitarg = 'BGS_TARGET' + wtype = dz[desitarg] > 0#((dz[desitarg] & bit) > 0) + print(len(dz[wtype])) + #dz = dz[wtype&wg] + dz = dz[wtype] + + ff2 = common.cut_specdat(dz) + +else: + zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_dark_tarspecwdup_Alltiles.fits' + dz = Table(fitsio.read(zf)) + desitarg = 'SV3_DESI_TARGET' + bit = 1 #for selecting LRG + wtype = ((dz[desitarg] & bit) > 0) + print(len(dz[wtype])) + #dz = dz[wtype&wg] + dz = dz[wtype] + wz = dz['ZWARN'] != 999999 #this is what the null column becomes + wz &= dz['ZWARN']*0 == 0 #just in case of nans + wz &= dz['COADD_FIBERSTATUS'] == 0 + ff = dz[wz] + + zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_bright_tarspecwdup_Alltiles.fits' + dz = Table(fitsio.read(zf)) + desitarg = 'SV3_BGS_TARGET' + wtype = dz[desitarg] > 0#((dz[desitarg] & bit) > 0) + print(len(dz[wtype])) + #dz = dz[wtype&wg] + dz = dz[wtype] + wz = dz['ZWARN'] != 999999 #this is what the null column becomes + wz &= dz['ZWARN']*0 == 0 #just in case of nans + wz &= dz['COADD_FIBERSTATUS'] == 0 + + ff2 = dz[wz] + + +z_suc= ff['ZWARN']==0 +z_suc &= ff['DELTACHI2']>15 +z_suc &= ff['Z']<1.5 +z_tot = ff['ZWARN'] != 999999 +z_tot &= ff['ZWARN']*0 == 0 + +#print(len(ff[z_suc]),len(ff[z_tot])) +print("zsuccess rate for LRG=",len(ff[z_suc])/len(ff[z_tot])) +cat1 = Table(ff[z_tot]) + +full=Table() +full['FIBER'] = np.arange(5000) + +fiberstats = Table() +fiberstats['FIBER'], fiberstats['n_tot'] = np.unique(ff['FIBER'][z_tot], return_counts=True) +#fiberstats.sort('n_tot') + +tt = Table() +tt['FIBER'], tt['n_suc'] = np.unique(ff['FIBER'][z_suc], return_counts=True) + +fiberstats1 = join(fiberstats, tt, keys='FIBER', join_type='outer').filled(0) +fiberstats1 = join(fiberstats1,full, keys='FIBER',join_type='outer').filled(0) +#fiberstats1['frac_suc'] = fiberstats1['n_suc']/fiberstats1['n_tot'] + + +z_tot = ff2['ZWARN'] != 999999 +z_tot &= ff2['ZWARN']*0 == 0 +z_suc =ff2['ZWARN']==0 +z_suc&=ff2['DELTACHI2']>40 +#print(len(ff2[z_suc]),len(ff2[z_tot])) +print("zsuccess rate for BGS=",len(ff2[z_suc])/len(ff2[z_tot])) +cat2 = Table(ff2[z_tot]) + +fiberstats2 = Table() +fiberstats2['FIBER'], fiberstats2['n_tot'] = np.unique(ff2['FIBER'][z_tot], return_counts=True) +#fiberstats.sort('n_tot') + +tt2 = Table() +tt2['FIBER'], tt2['n_suc'] = np.unique(ff2['FIBER'][z_suc], return_counts=True) +fiberstats2 = join(fiberstats2, tt2, keys='FIBER', join_type='outer').filled(0) +fiberstats2 = join(fiberstats2,full, keys='FIBER',join_type='outer').filled(0) +#fiberstats2['frac_suc'] = fiberstats2['n_suc']/fiberstats2['n_tot'] + + +fstats_comb = Table() +fstats_comb['Fiber']=np.arange(5000) +fstats_comb['n_tot']=np.arange(5000) +fstats_comb['n_suc']=np.arange(5000) +for fiber in fstats_comb['Fiber']: + m1=fiberstats1['FIBER']==fiber + m2=fiberstats2['FIBER']==fiber + fstats_comb['n_tot'][fiber] = fiberstats1['n_tot'][m1]+fiberstats2['n_tot'][m2] + fstats_comb['n_suc'][fiber] = fiberstats1['n_suc'][m1]+fiberstats2['n_suc'][m2] + +mask0= fstats_comb['n_tot']>1 +fstats_comb=fstats_comb[mask0] +fstats_comb['frac_suc']=fstats_comb['n_suc']/fstats_comb['n_tot'] +#fstats_comb + +error_floor = True + +n, p = fstats_comb['n_tot'].copy(), fstats_comb['frac_suc'].copy() +if error_floor: + p1 = np.maximum(1-p, 1/n) # error floor +else: + p1 = p +fstats_comb['frac_suc_err'] = np.clip(np.sqrt(n * p * (1-p))/n, np.sqrt(n * p1 * (1-p1))/n, 1) + +#print("Removed fibers for having only 1 obs:\n",fstats_comb['FIBER'][ntotmask]) +mean = np.sum(fstats_comb['n_suc'])/np.sum(fstats_comb['n_tot']) +fstats_comb['check'] =(mean - fstats_comb['frac_suc'])/fstats_comb['frac_suc_err'] +fstats_comb.sort('frac_suc') +#fstats_comb + + + +#mean = np.sum(fstats_comb['n_suc'])/np.sum(fstats_comb['n_tot']) +n = 3 +maskcheck = fstats_comb['check']>n +print(fstats_comb) +#np.savetxt(basedir+'/'+survey+'/LSS/'+specver+'/LSScats/'+version+"/lrg+bgs_"+str(n)+"sig_bad_fibers.txt",fstats_comb[maskcheck]['Fiber'],fmt='%i') +fn = basedir+'/'+survey+'/LSS/'+specver+"/lrg+bgs_"+str(n)+"sig_bad_fibers.txt" +np.savetxt(fn,fstats_comb[maskcheck]['Fiber'],fmt='%i') +print('saved results to '+fn) \ No newline at end of file diff --git a/scripts/mock_tools/comp_zstats_specrels.py b/scripts/mock_tools/comp_zstats_specrels.py new file mode 100644 index 000000000..9a000bd30 --- /dev/null +++ b/scripts/mock_tools/comp_zstats_specrels.py @@ -0,0 +1,272 @@ +import numpy as np +#!pip install astropy +#!pip install fitsio +from scipy import stats +from scipy.stats import norm +import fitsio +import glob +import os +import sys +import matplotlib.pyplot as plt +import statistics +import argparse +import astropy +from astropy.table import Table,join +from astropy.time import Time +from astropy.io import fits + +import LSS.common_tools as common + + +parser = argparse.ArgumentParser() +#parser.add_argument("--type", help="tracer type to be selected") +basedir='/global/cfs/cdirs/desi/survey/catalogs' +parser.add_argument("--basedir", help="base directory for input/output",default=basedir) +parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='DA02') +parser.add_argument("--verspec",help="version for redshifts",default='guadalupe') +parser.add_argument("--verspec_new",help="version for redshifts",default='newQSOtemp_tagged') +parser.add_argument("--tracer",help="tracer type(s) (e.g., LRG)",default='all') +parser.add_argument("--mbit5",help="whether to screen against zwarn mask bit 5",default='n') +parser.add_argument("--mbit510",help="whether to screen against zwarn mask bits 5 and 10",default='n') +parser.add_argument("--zwarn0",help="only count as success if zwarn == 0",default='n') + +args = parser.parse_args() +basedir = args.basedir +survey = args.survey +specver = args.verspec +#tp = args.tracer + + + +#ff = fitsio.read(filepathLF) +#hdul = fits.open(filepathLF) +#ff2 = fitsio.read(filepathBGS) +#hdul = fits.open(filepathBGS) + +if args.tracer == 'all': + tracers = ['QSO','LRG','ELG','BGS_ANY'] +else: + tracers = [args.tracer] + + + +for tp in tracers: + notqso = '' + if survey == 'DA02': + if tp == 'LRG': + bit = 1 #for selecting LRG + if tp == 'ELG': + bit = 2 + notqso = 'notqso' + if tp == 'QSO': + bit = 4 + if tp == 'BGS_ANY': + zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_bright_tarspecwdup_zdone.fits' + zf_new = basedir+'/'+survey+'/LSS/'+args.verspec_new+'/datcomb_bright_spec_zdone.fits' + dz = Table(fitsio.read(zf)) + + + desitarg = 'BGS_TARGET' + wtype = dz[desitarg] > 0#((dz[desitarg] & bit) > 0) + else: + zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_dark_tarspecwdup_zdone.fits' + zf_new = basedir+'/'+survey+'/LSS/'+args.verspec_new+'/datcomb_dark_spec_zdone.fits' + dz = Table(fitsio.read(zf)) + desitarg = 'DESI_TARGET' + wtype = ((dz[desitarg] & bit) > 0) + if tp == 'ELG': + wtype &= ((dz[desitarg] & 4) == 0) #remove QSO + print(len(dz[wtype])) + #dz = dz[wtype&wg] + dz = dz[wtype] + + dz = common.cut_specdat(dz) + dz_new = Table(fitsio.read(zf_new)) + dz_new.keep_columns(['Z','ZWARN','DELTACHI2','TARGETID','TILEID','LOCATION']) + print(len(dz)) + dz = join(dz,dz_new,keys=['TARGETID','TILEID','LOCATION'],table_names=['fid','new']) + print(str(len(dz))+' should agree with above') + + + from LSS.globals import main + pars = main(tp,args.verspec) + + elif survey == 'main': + sys.exit(survey+' not supported yet') + zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_'+tp+'_tarspecwdup_zdone.fits' + dz = Table(fitsio.read(zf)) + if tp == 'ELG': + wtype = ((dz['DESI_TARGET'] & 4) == 0) #remove QSO + dz = dz[wtype] + dz = common.cut_specdat(dz) + from LSS.globals import main + pars = main(tp,args.verspec) + + + elif survey == 'SV3': + sys.exit('not written for SV3 yet') + zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_dark_tarspecwdup_Alltiles.fits' + dz = Table(fitsio.read(zf)) + desitarg = 'SV3_DESI_TARGET' + bit = 1 #for selecting LRG + wtype = ((dz[desitarg] & bit) > 0) + print(len(dz[wtype])) + #dz = dz[wtype&wg] + dz = dz[wtype] + wz = dz['ZWARN'] != 999999 #this is what the null column becomes + wz &= dz['ZWARN']*0 == 0 #just in case of nans + wz &= dz['COADD_FIBERSTATUS'] == 0 + ff = dz[wz] + + zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_bright_tarspecwdup_Alltiles.fits' + dz = Table(fitsio.read(zf)) + desitarg = 'SV3_BGS_TARGET' + wtype = dz[desitarg] > 0#((dz[desitarg] & bit) > 0) + print(len(dz[wtype])) + #dz = dz[wtype&wg] + dz = dz[wtype] + wz = dz['ZWARN'] != 999999 #this is what the null column becomes + wz &= dz['ZWARN']*0 == 0 #just in case of nans + wz &= dz['COADD_FIBERSTATUS'] == 0 + + ff2 = dz[wz] + + z_tot = dz['ZWARN_fid'] != 999999 + z_tot &= dz['ZWARN_fid']*0 == 0 + z_new = dz['ZWARN_new'] != 999999 + z_new &= dz['ZWARN_new']*0 == 0 + print('number with z to consider fid,new') + print(len(dz[z_tot]),len(dz[z_new])) + + + if tp == 'LRG': + z_suc= dz['ZWARN_fid']==0 + z_suc &= dz['DELTACHI2_fid']>15 + z_suc &= dz['Z_fid']<1.5 + z_sucnew= dz['ZWARN_new']==0 + z_sucnew &= dz['DELTACHI2_new']>15 + z_sucnew &= dz['Z_new']<1.5 + zmin = 0.4 + zmax = 1.1 + + if tp == 'ELG': + o2f = fitsio.read(pars.elgzf,columns=['TARGETID','LOCATION','TILEID','OII_FLUX','OII_FLUX_IVAR']) + dz = join(dz,o2f,keys=['TARGETID','TILEID','LOCATION']) + o2c = np.log10(dz['OII_FLUX'] * np.sqrt(dz['OII_FLUX_IVAR']))+0.2*np.log10(dz['DELTACHI2_fid']) + z_suc = o2c > 0.9 + o2f_new = fitsio.read(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/emlin_catalog.fits' ,columns=['TARGETID','LOCATION','TILEID','OII_FLUX','OII_FLUX_IVAR']) + dz = join(dz,o2f_new,keys=['TARGETID','TILEID','LOCATION'],table_names=['fid','new']) + o2c_new = np.log10(dz['OII_FLUX_new'] * np.sqrt(dz['OII_FLUX_IVAR_new']))+0.2*np.log10(dz['DELTACHI2_new']) + z_sucnew = o2c_new > 0.9 + zmin = 0.6 + zmax = 1.6 + + if tp == 'QSO': + qsozf = pars.qsozf + if specver == 'guadalupe': + qsozf = '/global/cfs/cdirs/desi/users/edmondc/QSO_catalog/guadalupe/QSO_cat_guadalupe_cumulative.fits' + arz = Table(fitsio.read(qsozf)) + arz.keep_columns(['TARGETID','LOCATION','TILEID','Z','Z_QN']) + arz['TILEID'] = arz['TILEID'].astype(int) + + #arz = fitsio.read(qsozf,columns=['TARGETID','LOCATION','TILEID','Z','Z_QN']) + + #arz['TILEID'] = arz['TILEID'].astype(int) + dz = join(dz,arz,keys=['TARGETID','TILEID','LOCATION'],join_type='left',uniq_col_name='{col_name}{table_name}',table_names=['','_QF']) + #dz['Z'].name = 'Z_RR' #rename the original redrock redshifts + #dz['Z_QF'].name = 'Z' #the redshifts from the quasar file should be used instead + + z_suc = dz['Z'].mask == False #previous Z column should have become Z_fid + if args.mbit5 == 'y': + z_suc &= dz['ZWARN_fid'] & 2**5 == 0 + qsozf_new = basedir+'/'+survey+'/LSS/'+args.verspec_new+'/QSO_catalog.fits' + arz = Table(fitsio.read(qsozf_new)) + arz.keep_columns(['TARGETID','LOCATION','TILEID','Z','Z_QN']) + arz['TILEID'] = arz['TILEID'].astype(int) + dz = join(dz,arz,keys=['TARGETID','TILEID','LOCATION'],join_type='left',uniq_col_name='{col_name}{table_name}',table_names=['','_QF_new']) + #print(dz.dtype.names) + z_sucnew = dz['Z_QF_new'].mask == False + if args.mbit5 == 'y': + z_sucnew &= dz['ZWARN_new'] & 2**5 == 0 + if args.mbit510 == 'y': + z_sucnew &= dz['ZWARN_new'] & 2**5 == 0 + z_sucnew &= dz['ZWARN_new'] & 2**10 == 0 + if args.zwarn0 == 'y': + z_sucnew &= dz['ZWARN_new'] == 0 + + zmin = 0.8 + zmax = 3.5 + + + if tp == 'BGS_ANY': + z_suc = dz['ZWARN_fid']==0 + z_suc &= dz['DELTACHI2_fid']>40 + z_sucnew = dz['ZWARN_new']==0 + z_sucnew &= dz['DELTACHI2_new']>40 + zmin = 0.01 + zmax = 0.6 + + #print(len(ff[z_suc]),len(ff[z_tot])) + print("fiducial zsuccess rate for "+tp,len(dz[z_suc&z_tot])/len(dz[z_tot])) + print("new zsuccess rate for "+tp,len(dz[z_sucnew&z_new])/len(dz[z_new])) + print("fraction with zsuccess in both "+tp,len(dz[z_sucnew&z_new&z_suc])/len(dz[z_new])) + + if tp != 'QSO': + plt.hist(dz['Z_fid'][z_suc&z_tot],histtype='step',label='fiducial',range=(zmin,zmax),bins=50) + plt.hist(dz['Z_new'][z_sucnew&z_new],histtype='step',label='new',range=(zmin,zmax),bins=50) + plt.legend() + plt.xlabel('redshift') + plt.ylabel('# of good z in bin') + plt.title(tp+notqso) + plt.savefig(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/'+tp+notqso+'_zhistcompGuad.png') + + plt.show() + plt.plot(dz['Z_fid'][z_suc&z_tot&z_sucnew],dz['Z_new'][z_suc&z_tot&z_sucnew],'k,') + plt.xlabel('Guadalupe redshift') + plt.ylabel('new redshift') + plt.title(tp+notqso) + plt.savefig(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/'+tp+notqso+'_zcompGuad.png') + plt.show() + + else: + plt.hist(dz['Z'][z_suc&z_tot],histtype='step',label='fiducial',range=(zmin,zmax),bins=50) + plt.hist(dz['Z_QF_new'][z_sucnew&z_new],histtype='step',label='new',range=(zmin,zmax),bins=50) + plt.legend() + plt.xlabel('redshift') + plt.ylabel('# of good z in bin') + plt.title(tp+notqso) + fn_app = '' + if args.mbit5 == 'y': + fn_app = '_maskbit5' + if args.mbit510 == 'y': + fn_app = '_maskbits510' + if args.zwarn0 == 'y': + fn_app = '_zwarn0' + + plt.savefig(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/'+tp+notqso+'_zhistcompGuad'+fn_app+'.png') + plt.show() + plt.plot(dz['Z'][z_suc&z_tot&z_sucnew],dz['Z_QF_new'][z_suc&z_tot&z_sucnew],'k,') + plt.xlabel('Guadalupe redshift') + plt.ylabel('new redshift') + plt.title(tp+notqso) + plt.savefig(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/'+tp+notqso+'_zcompGuad'+fn_app+'.png') + plt.show() + plt.plot(dz['Z_QF_new'][z_suc&z_tot&z_sucnew],(dz['Z_QF_new'][z_suc&z_tot&z_sucnew]-dz['Z'][z_suc&z_tot&z_sucnew])/(1+dz['Z_QF_new'][z_suc&z_tot&z_sucnew]),'k,') + plt.xlabel('new redshift') + plt.ylabel('(new z-Guadalupe z)/(1+new z)') + plt.ylim(-0.02,0.02) + plt.title(tp+notqso) + plt.savefig(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/'+tp+notqso+'_zdiffGuad'+fn_app+'.png') + plt.show() + + plt.plot(dz['Z'][z_suc&z_tot&z_sucnew],dz['Z_QF_new'][z_suc&z_tot&z_sucnew],'k,') + plt.xlabel('Guadalupe redshift') + plt.ylabel('new redshift') + plt.title(tp+notqso) + plt.xlim(1.3,1.6) + plt.ylim(1.3,1.6) + plt.savefig(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/'+tp+notqso+'_zcompGuadzoom'+fn_app+'.png') + plt.show() + + + diff --git a/scripts/mock_tools/compare_snapshot_dir_with_live.py b/scripts/mock_tools/compare_snapshot_dir_with_live.py new file mode 100644 index 000000000..82de710cf --- /dev/null +++ b/scripts/mock_tools/compare_snapshot_dir_with_live.py @@ -0,0 +1,22 @@ +#!/usr/bin/python +''' +Useful script for checking the integrity of a directory + +example run: + python compare_snapshot_dir_with_live.py --livedir /global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/daily/LSScats/1/ --snapdate 2021-08-26 + +Please note snapshots on cfs are only present for a week. Asking for a snapshot older than a week will cause a missing directory error. +''' +import argparse +import os + +parser = argparse.ArgumentParser(description=__doc__) +parser.add_argument('--livedir', type=str, required=True, help="Directory you wish to compare with its snapshot") +parser.add_argument('--snapdate', type=str, required=True, help="Date of snapshot to compare against, format YYYY-MM-DD") + +uargs = parser.parse_args() +live = set(os.listdir(uargs.livedir)) +snapshot = set(os.listdir(uargs.livedir+'/.snapshots/'+uargs.snapdate)) + +print(f'Files present in the snapshot {uargs.snapdate} but not in the live version: {snapshot-live}') +print(f'Files present in the live version but not the snapshot {uargs.snapdate}: {live-snapshot}') diff --git a/scripts/mock_tools/getLRGmask.py b/scripts/mock_tools/getLRGmask.py new file mode 100644 index 000000000..c9faf7b6f --- /dev/null +++ b/scripts/mock_tools/getLRGmask.py @@ -0,0 +1,186 @@ +# Get LRG bitmasks for a catalog +# originally written by Rongpu Zhou +# Examples: +# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --input catalog.fits --output catalog_lrgmask.npy +# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --input /global/cfs/cdirs/desi/target/catalogs/dr9/0.49.0/randoms/resolve/randoms-1-0.fits --output $CSCRATCH/temp/randoms-1-0-lrgmask_v1.fits +# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --input /global/cfs/cdirs/desi/users/rongpu/targets/dr9.0/1.0.0/resolve/dr9_lrg_south_1.0.0_basic.fits --output $CSCRATCH/temp/dr9_lrg_south_1.0.0_lrgmask_v1.fits + +from __future__ import division, print_function +from functools import partial +import sys, os, glob, time, warnings, gc +import numpy as np +import matplotlib.pyplot as plt +from astropy.table import Table, vstack, hstack, join +import fitsio + +from astropy.io import fits +from astropy import wcs + +from multiprocessing import Pool +import argparse + + +time_start = time.time() + +#bitmask_dir = '/global/cscratch1/sd/rongpu/desi/lrg_pixel_bitmask/v1' +bitmask_dir = '/global/cfs/cdirs/desi/survey/catalogs/brickmasks/LRG/v1.1' + +n_processes = 32 + +################## +debug = False +################## + +if os.environ['NERSC_HOST'] == 'cori': + scratch = 'CSCRATCH' +elif os.environ['NERSC_HOST'] == 'perlmutter': + scratch = 'PSCRATCH' +else: + print('NERSC_HOST is not cori or permutter but is '+os.environ['NERSC_HOST']) + sys.exit('NERSC_HOST not known (code only works on NERSC), not proceeding') + + +parser = argparse.ArgumentParser() +parser.add_argument("--basedir", help="base directory for output, default is SCRATCH",default=scratch) +parser.add_argument("--survey", help="e.g., SV3 or main",default='SV3') +parser.add_argument("--version", help="catalog version; use 'test' unless you know what you are doing!",default='test') +parser.add_argument("--verspec",help="version for redshifts",default='everest') +parser.add_argument("--minr", help="minimum number for random files",default=0,type=int) +parser.add_argument("--maxr", help="maximum for random files, default is 1, but 18 are available (use parallel script for all)",default=1,type=int) + +#parser.add_argument('-i', '--input', required=True) +#parser.add_argument('-o', '--output', required=True) +args = parser.parse_args() + +lssdir = args.basedir +'/'+args.survey+'/LSS/' + +ldirspec = lssdir+args.verspec+'/' + +indirfull = ldirspec+'/LSScats/'+args.version+'/' + +tp = 'LRG' + +#if args.survey == 'main' or args.survey == 'DA02': +# tp += 'zdone' + + + +def bitmask_radec(brickid, ra, dec): + + brick_index = np.where(bricks['BRICKID']==brickid)[0][0] + + brickname = str(bricks['BRICKNAME'][brick_index]) + if bricks['PHOTSYS'][brick_index]=='N': + field = 'north' + elif bricks['PHOTSYS'][brick_index]=='S': + field = 'south' + else: + raise ValueError + # bitmask_fn = '/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/{}/coadd/{}/{}/legacysurvey-{}-maskbits.fits.fz'.format(field, brickname[:3], brickname, brickname) + bitmask_fn = os.path.join(bitmask_dir, '{}/coadd/{}/{}/{}-lrgmask.fits.gz'.format(field, brickname[:3], brickname, brickname)) + + bitmask_img = fitsio.read(bitmask_fn) + + header = fits.open(bitmask_fn)[1].header + w = wcs.WCS(header) + + coadd_x, coadd_y = w.wcs_world2pix(ra, dec, 0) + coadd_x, coadd_y = np.round(coadd_x).astype(int), np.round(coadd_y).astype(int) + + bitmask = bitmask_img[coadd_y, coadd_x] + + return bitmask + +def wrapper(bid_index,bidorder,bidcnts,bid_unique,cat): + + idx = bidorder[bidcnts[bid_index]:bidcnts[bid_index+1]] + brickid = bid_unique[bid_index] + + ra, dec = cat['RA'][idx], cat['DEC'][idx] + + bitmask = bitmask_radec(brickid, ra, dec) + + data = Table() + data['idx'] = idx + data['lrg_mask'] = bitmask + data['TARGETID'] = cat['TARGETID'][idx] + + return data + + +def mkfile(input_path,output_path): + try: + cat = fitsio.read(input_path, rows=None, columns=['lrg_mask']) + return 'file already has lrg_mask column' + except: + print('adding lrg_mask column') + + try: + cat = Table(fitsio.read(input_path, rows=None, columns=['RA', 'DEC', 'BRICKID','TARGETID'])) + except ValueError: + cat = Table(fitsio.read(input_path, rows=None, columns=['RA', 'DEC','TARGETID'])) + + print(len(cat)) + + #for col in cat.colnames: + # cat.rename_column(col, col.upper()) + + #if 'TARGET_RA' in cat.colnames: + # cat.rename_columns(['TARGET_RA', 'TARGET_DEC'], ['RA', 'DEC']) + + if 'BRICKID' not in cat.colnames: + from desiutil import brick + tmp = brick.Bricks(bricksize=0.25) + cat['BRICKID'] = tmp.brickid(cat['RA'], cat['DEC']) + + # Just some tricks to speed up things up + bid_unique, bidcnts = np.unique(cat['BRICKID'], return_counts=True) + bidcnts = np.insert(bidcnts, 0, 0) + bidcnts = np.cumsum(bidcnts) + bidorder = np.argsort(cat['BRICKID']) + + + # start multiple worker processes + with Pool(processes=n_processes) as pool: + res = pool.map(partial(wrapper,bidorder=bidorder,bidcnts=bidcnts,bid_unique=bid_unique,cat=cat), np.arange(len(bid_unique))) + #partial(func, b=second_arg), a_args + + res = vstack(res) + res.sort('idx') + res.remove_column('idx') + + cat = Table(fitsio.read(input_path)) + + if len(cat) != len(res): + print('mismatched lengths, somehow get brick mask removed data!!!') + + else: + res = join(cat,res,keys=['TARGETID']) + if output_path.endswith('.fits'): + res.write(output_path,overwrite=True) + else: + np.write(output_path, np.array(res['lrg_mask'])) + del cat + del res + print('Done!', time.strftime("%H:%M:%S", time.gmtime(time.time() - time_start))) + +# bricks = Table(fitsio.read('/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/survey-bricks.fits.gz')) +bricks = Table(fitsio.read('/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/randoms/survey-bricks-dr9-randoms-0.48.0.fits')) + +if debug: + rows = np.arange(int(1e3)) +else: + rows = None + +input_path = indirfull+tp+'_full_noveto.dat.fits' +output_path = input_path #we will over-write, just adding new column + +mkfile(input_path,output_path) + +for ri in range(args.minr,args.maxr): + input_path = indirfull+tp+'_'+str(ri)+'_full_noveto.ran.fits' + output_path = input_path #we will over-write, just adding new column + + mkfile(input_path,output_path) + print('adding mask column to LRGs random number '+str(ri)) + diff --git a/scripts/mock_tools/getLRGmask_tar.py b/scripts/mock_tools/getLRGmask_tar.py new file mode 100644 index 000000000..155e6ff27 --- /dev/null +++ b/scripts/mock_tools/getLRGmask_tar.py @@ -0,0 +1,161 @@ +# Get LRG bitmasks for a catalog +# originally written by Rongpu Zhou +# Examples: +# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --input catalog.fits --output catalog_lrgmask.npy +# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --input /global/cfs/cdirs/desi/target/catalogs/dr9/0.49.0/randoms/resolve/randoms-1-0.fits --output $CSCRATCH/temp/randoms-1-0-lrgmask_v1.fits +# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --input /global/cfs/cdirs/desi/users/rongpu/targets/dr9.0/1.0.0/resolve/dr9_lrg_south_1.0.0_basic.fits --output $CSCRATCH/temp/dr9_lrg_south_1.0.0_lrgmask_v1.fits + +from __future__ import division, print_function +from functools import partial +import sys, os, glob, time, warnings, gc +import numpy as np +import matplotlib.pyplot as plt +from astropy.table import Table, vstack, hstack, join +import fitsio + +from astropy.io import fits +from astropy import wcs + +from multiprocessing import Pool +import argparse + + +time_start = time.time() + +#bitmask_dir = '/global/cscratch1/sd/rongpu/desi/lrg_pixel_bitmask/v1' +bitmask_dir = '/global/cfs/cdirs/desi/survey/catalogs/brickmasks/LRG/v1' + +n_processes = 32 + +################## +debug = False +################## + +parser = argparse.ArgumentParser() +parser.add_argument("--basedir", help="base directory for output, default is CSCRATCH",default=os.environ['CSCRATCH']) +parser.add_argument("--survey", help="e.g., SV3 or main",default='main') + +#parser.add_argument('-i', '--input', required=True) +#parser.add_argument('-o', '--output', required=True) +args = parser.parse_args() + +lssdir = args.basedir +'/'+args.survey+'/LSS/' + +tp = 'LRG' + + + +def bitmask_radec(brickid, ra, dec): + + brick_index = np.where(bricks['BRICKID']==brickid)[0][0] + + brickname = str(bricks['BRICKNAME'][brick_index]) + if bricks['PHOTSYS'][brick_index]=='N': + field = 'north' + elif bricks['PHOTSYS'][brick_index]=='S': + field = 'south' + else: + raise ValueError + # bitmask_fn = '/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/{}/coadd/{}/{}/legacysurvey-{}-maskbits.fits.fz'.format(field, brickname[:3], brickname, brickname) + bitmask_fn = os.path.join(bitmask_dir, '{}/coadd/{}/{}/{}-lrgmask.fits.gz'.format(field, brickname[:3], brickname, brickname)) + + bitmask_img = fitsio.read(bitmask_fn) + + header = fits.open(bitmask_fn)[1].header + w = wcs.WCS(header) + + coadd_x, coadd_y = w.wcs_world2pix(ra, dec, 0) + coadd_x, coadd_y = np.round(coadd_x).astype(int), np.round(coadd_y).astype(int) + + bitmask = bitmask_img[coadd_y, coadd_x] + + return bitmask + +def wrapper(bid_index,bidorder,bidcnts,bid_unique,cat): + + idx = bidorder[bidcnts[bid_index]:bidcnts[bid_index+1]] + brickid = bid_unique[bid_index] + + ra, dec = cat['RA'][idx], cat['DEC'][idx] + + bitmask = bitmask_radec(brickid, ra, dec) + + data = Table() + data['idx'] = idx + data['lrg_mask'] = bitmask + data['TARGETID'] = cat['TARGETID'][idx] + + return data + + +def mkfile(input_path,output_path): + try: + cat = fitsio.read(input_path, rows=None, columns=['lrg_mask']) + return 'file already has lrg_mask column' + except: + print('adding lrg_mask column') + + try: + cat = Table(fitsio.read(input_path, rows=None, columns=['RA', 'DEC', 'BRICKID','TARGETID'])) + except ValueError: + cat = Table(fitsio.read(input_path, rows=None, columns=['RA', 'DEC','TARGETID'])) + + print(len(cat)) + + #for col in cat.colnames: + # cat.rename_column(col, col.upper()) + + #if 'TARGET_RA' in cat.colnames: + # cat.rename_columns(['TARGET_RA', 'TARGET_DEC'], ['RA', 'DEC']) + + if 'BRICKID' not in cat.colnames: + from desiutil import brick + tmp = brick.Bricks(bricksize=0.25) + cat['BRICKID'] = tmp.brickid(cat['RA'], cat['DEC']) + + # Just some tricks to speed up things up + bid_unique, bidcnts = np.unique(cat['BRICKID'], return_counts=True) + bidcnts = np.insert(bidcnts, 0, 0) + bidcnts = np.cumsum(bidcnts) + bidorder = np.argsort(cat['BRICKID']) + + + # start multiple worker processes + with Pool(processes=n_processes) as pool: + res = pool.map(partial(wrapper,bidorder=bidorder,bidcnts=bidcnts,bid_unique=bid_unique,cat=cat), np.arange(len(bid_unique))) + #partial(func, b=second_arg), a_args + + res = vstack(res) + res.sort('idx') + res.remove_column('idx') + + cat = Table(fitsio.read(input_path)) + + if len(cat) != len(res): + print('mismatched lengths, somehow get brick mask removed data!!!') + + else: + res = join(cat,res,keys=['TARGETID']) + if output_path.endswith('.fits'): + res.write(output_path,overwrite=True) + else: + np.write(output_path, np.array(res['lrg_mask'])) + del cat + del res + print('Done!', time.strftime("%H:%M:%S", time.gmtime(time.time() - time_start))) + +# bricks = Table(fitsio.read('/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/survey-bricks.fits.gz')) +bricks = Table(fitsio.read('/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/randoms/survey-bricks-dr9-randoms-0.48.0.fits')) + +if debug: + rows = np.arange(int(1e3)) +else: + rows = None + +if args.survey == 'main': + input_path = lssdir+tp+'targetsDR9v1.1.1.fits' + output_path = input_path #we will over-write, just adding new column + +mkfile(input_path,output_path) + + diff --git a/scripts/mock_tools/get_speccon.py b/scripts/mock_tools/get_speccon.py new file mode 100644 index 000000000..bdbea7266 --- /dev/null +++ b/scripts/mock_tools/get_speccon.py @@ -0,0 +1,211 @@ +#adapts Mike Wilson's notebook +import glob +import numpy as np +import astropy.io.fits as fits +import argparse +import fitsio +import os + +from astropy.table import Table, join, unique, vstack +from desiutil.log import get_logger +import ephem +import astropy.units as u + +from desisurvey.config import Configuration +from astropy.time import Time +from astropy.table import Table + +config = Configuration() + +mayall = ephem.Observer() +mayall.lat = config.location.latitude().to(u.rad).value +mayall.lon = config.location.longitude().to(u.rad).value +mayall.elevation = config.location.elevation().to(u.m).value + + + + +parser = argparse.ArgumentParser() +parser.add_argument("--basedir", help="base directory for output, default is CSCRATCH",default=os.environ['CSCRATCH']) +parser.add_argument("--survey", help="main or sv3",default='main') +parser.add_argument("--prog", help="dark or bright",default='dark') +parser.add_argument("--verspec",help="version for redshifts",default='daily') +parser.add_argument("--test",help="if yes, test a small fraction of the exposures",default='n') + +args = parser.parse_args() + +sw = args.survey +if args.survey == 'sv3': + sw = 'SV3' + +outf = args.basedir +'/'+sw+'/LSS/'+args.verspec+'/specobscon_'+args.prog+'.fits' + +datadir = '/global/cfs/cdirs/desi/spectro/redux/'+args.verspec+'/' +exposures = fitsio.read(datadir + '/exposures-'+args.verspec+'.fits') +if args.test == 'y': + exposures = exposures[:10] +exposures = Table(exposures) +nexp = len(exposures) +#if args.test == 'y': +# nexp = 10 +exposures['MOON_ILLUM'] = np.zeros(nexp) + +moon = ephem.Moon(mayall) +for ii in range(0,nexp): + + t = Time(exposures[ii]['MJD'], format='mjd') + moon.compute(t.datetime) + + moon_illum = moon.moon_phase + exposures[ii]['MOON_ILLUM'] = moon_illum + +print('added moon illumination, median is:'+str(np.median(exposures['MOON_ILLUM']))) + + + +addcols = ['ZD','ETCTRANS', 'ETCTHRUB', 'ETCSKY', 'ACQFWHM','SLEWANGL','MOONSEP','PMIRTEMP', 'TAIRTEMP','PARALLAC','ROTOFFST','TURBRMS','WINDSPD','WINDDIR'] + +for col in addcols: + exposures[col] = np.ones(nexp)*-99 + + +for ii in range(0,nexp): + es = str(exposures[ii]['EXPID']).zfill(8) + efn = '/global/cfs/cdirs/desi/spectro/data/'+str(exposures[ii]['NIGHT'])+'/'+es+'/desi-'+es+'.fits.fz' + hh = fitsio.read_header(efn,ext=1) + if ii//100 == ii/100: + print('at exposure '+str(ii)+ ' out of '+str(nexp)) + for col in addcols: + try: + exposures[ii][col] = hh[col] + except: + pass + +for col in addcols: + selnull = exposures[col] == -99 + print('fraction null:') + print(col,str(len(exposures[selnull])/len(exposures))) + +ocol = ['MOON_ILLUM','EXPID', 'SEEING_ETC', 'AIRMASS', 'EBV', 'TRANSPARENCY_GFA', 'SEEING_GFA', 'SKY_MAG_AB_GFA', 'SKY_MAG_G_SPEC', 'SKY_MAG_R_SPEC', 'SKY_MAG_Z_SPEC', 'EFFTIME_SPEC'] +tcol = addcols + ocol +exposures = exposures[tcol] + +if args.verspec == 'daily': + dcat = fitsio.read(args.basedir +'/'+sw+'/LSS/'+args.verspec+'/datcomb_'+args.prog+'_spec_zdone.fits') +else: + dcat = fitsio.read(datadir+'/zcatalog/ztile-'+args.survey+'-'+args.prog+'-'+'cumulative.fits') +tids = np.unique(dcat['TILEID']) + +mt = Table.read('/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/ops/tiles-specstatus.ecsv') +wd = mt['SURVEY'] == args.survey +wd &= mt['FAPRGRM'] == args.prog +wd &= np.isin(mt['TILEID'],tids) +mtd = mt[wd] + +tiles4comb = Table() +tiles4comb['TILEID'] = mtd['TILEID'].astype(int) +tiles4comb['ZDATE'] = mtd['LASTNIGHT'] + +print('numbers of tiles, should match:') +print(len(tids),len(tiles4comb)) + +coadd_fpaths = [] + +for ii in range(0,len(tiles4comb)): + ''' + Retrieve coadd paths for all tiles + ''' + + fpath = '{}/tiles/cumulative/{:d}/{:d}'.format(datadir, tiles4comb['TILEID'][ii], tiles4comb['ZDATE'][ii]) + + + # Here we grab the path for each coadd under cumulative/tileid/zdate + fpaths = sorted(glob.glob(fpath + '/' + 'coadd-?-{:d}-thru{}.fits'.format(tiles4comb['TILEID'][ii], tiles4comb['ZDATE'][ii]))) + + + coadd_fpaths += [x for x in fpaths] + +print(coadd_fpaths[:12]) + +def process_coadd(coadd_fpath): + ''' + Retrieve the input expids for each location on a given (tileid, thru_night). + + Note: + assuming input expids may differ by location due to quality cuts. We + run round this later by processing simulatenously all locs with the same + input expids. + ''' + tileid = coadd_fpath.split('/')[-3] + thru_night = coadd_fpath.split('/')[-2] + + coadd = Table.read(coadd_fpath, hdu='EXP_FIBERMAP') + # coadd + + # expids, cnts = np.unique(coadd['EXPID'], return_counts=True) + + # print(len(expids)) + + condition_cat = coadd['TARGETID', 'LOCATION'] + condition_cat = unique(condition_cat) + condition_cat.sort('LOCATION') + + condition_cat['TILEID'] = tileid + condition_cat['THRU_NIGHT'] = thru_night + condition_cat['IN_EXPIDS'] = 'x' * 50 + + locs, cnts = np.unique(condition_cat['LOCATION'].data, return_counts=True) + + assert cnts.max() == 1 + assert np.all(locs == condition_cat['LOCATION'].data) + + for i, loc in enumerate(locs): + coadd_loc = coadd[(coadd['LOCATION'] == loc) & (coadd['FIBERSTATUS'] == 0)] + + loc_expids = '-'.join(np.unique(coadd_loc['EXPID'].data).astype(str).tolist()) + + condition_cat['IN_EXPIDS'][i] = loc_expids + + # print(i, loc_expids) + + return condition_cat + +to_process = coadd_fpaths +condition_cat = [process_coadd(x) for x in to_process] +condition_cat = vstack(condition_cat) + +unique_in_expids = np.unique(condition_cat['IN_EXPIDS'].data).tolist() + +unique_in_expids.remove('') + +update_cols = list(exposures.dtype.names) +update_cols.remove('EXPID') +update_cols.remove('EFFTIME_SPEC') + +for col in update_cols: + condition_cat[col] = -99. + +for in_expids in unique_in_expids: + expids = np.array(in_expids.split('-')).astype(np.int) + + # Get the exposure conditions for this set of expids. + in_exposures = exposures[np.isin(exposures['EXPID'].data, expids)] + + # print(expids) + # print(in_exposures) + + mean_function = lambda x: np.average(x, weights=in_exposures['EFFTIME_SPEC']) + + # Weighted mean of the condition table for this exp. set (weights are efftime_spec) + in_exposures = in_exposures.groups.aggregate(mean_function) + + # To be extra sure, we could include matches to TILEID and thru night. + to_update = condition_cat['IN_EXPIDS'] == in_expids + + for col in update_cols: + condition_cat[col].data[to_update] = in_exposures[col].data[0] + + print('Processed: {}'.format(in_expids)) + + +condition_cat.write(outf, format='fits', overwrite=True) diff --git a/scripts/mock_tools/getmask_type.py b/scripts/mock_tools/getmask_type.py new file mode 100644 index 000000000..21fa05517 --- /dev/null +++ b/scripts/mock_tools/getmask_type.py @@ -0,0 +1,195 @@ +# Get LRG bitmasks for a catalog +# originally written by Rongpu Zhou +# Examples: +# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --input catalog.fits --output catalog_lrgmask.npy +# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --input /global/cfs/cdirs/desi/target/catalogs/dr9/0.49.0/randoms/resolve/randoms-1-0.fits --output $CSCRATCH/temp/randoms-1-0-lrgmask_v1.fits +# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --input /global/cfs/cdirs/desi/users/rongpu/targets/dr9.0/1.0.0/resolve/dr9_lrg_south_1.0.0_basic.fits --output $CSCRATCH/temp/dr9_lrg_south_1.0.0_lrgmask_v1.fits + +from __future__ import division, print_function +from functools import partial +import sys, os, glob, time, warnings, gc +import numpy as np +import matplotlib.pyplot as plt +from astropy.table import Table, vstack, hstack, join +import fitsio + +from astropy.io import fits +from astropy import wcs + +from multiprocessing import Pool +import argparse + + +time_start = time.time() + +#bitmask_dir = '/global/cscratch1/sd/rongpu/desi/lrg_pixel_bitmask/v1' + +n_processes = 32 + +################## +debug = False +################## +if os.environ['NERSC_HOST'] == 'cori': + scratch = 'CSCRATCH' +elif os.environ['NERSC_HOST'] == 'perlmutter': + scratch = 'PSCRATCH' +else: + print('NERSC_HOST is not cori or permutter but is '+os.environ['NERSC_HOST']) + sys.exit('NERSC_HOST not known (code only works on NERSC), not proceeding') + + +parser = argparse.ArgumentParser() +parser.add_argument("--tracer", help="tracer type to be selected") +parser.add_argument("--basedir", help="base directory for output, default is SCRATCH",default=os.environ[scratch]) +parser.add_argument("--survey", help="e.g., SV3 or main",default='SV3') +parser.add_argument("--version", help="catalog version; use 'test' unless you know what you are doing!",default='test') +parser.add_argument("--mver", help="version of the mask",default='1') +parser.add_argument("--verspec",help="version for redshifts",default='everest') +parser.add_argument("--minr", help="minimum number for random files",default=0,type=int) +parser.add_argument("--maxr", help="maximum for random files, default is 1, but 18 are available (use parallel script for all)",default=1,type=int) + +#parser.add_argument('-i', '--input', required=True) +#parser.add_argument('-o', '--output', required=True) +args = parser.parse_args() + +lssdir = args.basedir +'/'+args.survey+'/LSS/' + +ldirspec = lssdir+args.verspec+'/' + +indirfull = ldirspec+'/LSScats/'+args.version+'/' + +tp = args.tracer +tpr = tp[:3] +tprl = tpr.lower() +print(tp,tpr,tprl) + +bitmask_dir = '/global/cfs/cdirs/desi/survey/catalogs/brickmasks/'+tpr+'/v'+args.mver + + +#if args.survey == 'main' or args.survey == 'DA02': +# tp += 'zdone' + + + +def bitmask_radec(brickid, ra, dec): + + brick_index = np.where(bricks['BRICKID']==brickid)[0][0] + + brickname = str(bricks['BRICKNAME'][brick_index]) + if bricks['PHOTSYS'][brick_index]=='N': + field = 'north' + elif bricks['PHOTSYS'][brick_index]=='S': + field = 'south' + else: + raise ValueError + # bitmask_fn = '/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/{}/coadd/{}/{}/legacysurvey-{}-maskbits.fits.fz'.format(field, brickname[:3], brickname, brickname) + bitmask_fn = os.path.join(bitmask_dir, '{}/coadd/{}/{}/{}-{}mask.fits.gz'.format(field, brickname[:3], brickname, brickname,tprl)) + + bitmask_img = fitsio.read(bitmask_fn) + + header = fits.open(bitmask_fn)[1].header + w = wcs.WCS(header) + + coadd_x, coadd_y = w.wcs_world2pix(ra, dec, 0) + coadd_x, coadd_y = np.round(coadd_x).astype(int), np.round(coadd_y).astype(int) + + bitmask = bitmask_img[coadd_y, coadd_x] + + return bitmask + +def wrapper(bid_index,bidorder,bidcnts,bid_unique,cat): + + idx = bidorder[bidcnts[bid_index]:bidcnts[bid_index+1]] + brickid = bid_unique[bid_index] + + ra, dec = cat['RA'][idx], cat['DEC'][idx] + + bitmask = bitmask_radec(brickid, ra, dec) + + data = Table() + data['idx'] = idx + data[tprl+'_mask'] = bitmask + data['TARGETID'] = cat['TARGETID'][idx] + + return data + + +def mkfile(input_path,output_path): + try: + cat = fitsio.read(input_path, rows=None, columns=[tprl+'_mask']) + return 'file already has '+tpr.lower()+'_mask column' + except: + print('adding '+tprl+'_mask column') + + try: + cat = Table(fitsio.read(input_path, rows=None, columns=['RA', 'DEC', 'BRICKID','TARGETID'])) + except ValueError: + cat = Table(fitsio.read(input_path, rows=None, columns=['RA', 'DEC','TARGETID'])) + + print(len(cat)) + + #for col in cat.colnames: + # cat.rename_column(col, col.upper()) + + #if 'TARGET_RA' in cat.colnames: + # cat.rename_columns(['TARGET_RA', 'TARGET_DEC'], ['RA', 'DEC']) + + if 'BRICKID' not in cat.colnames: + from desiutil import brick + tmp = brick.Bricks(bricksize=0.25) + cat['BRICKID'] = tmp.brickid(cat['RA'], cat['DEC']) + + # Just some tricks to speed up things up + bid_unique, bidcnts = np.unique(cat['BRICKID'], return_counts=True) + bidcnts = np.insert(bidcnts, 0, 0) + bidcnts = np.cumsum(bidcnts) + bidorder = np.argsort(cat['BRICKID']) + + + # start multiple worker processes + with Pool(processes=n_processes) as pool: + res = pool.map(partial(wrapper,bidorder=bidorder,bidcnts=bidcnts,bid_unique=bid_unique,cat=cat), np.arange(len(bid_unique))) + #partial(func, b=second_arg), a_args + + res = vstack(res) + res.sort('idx') + res.remove_column('idx') + print('done, now writing out') + #cat = Table(fitsio.read(input_path)) + catf = fitsio.FITS(input_path,'rw') + + if len(cat) != len(res): + print('mismatched lengths, somehow get brick mask removed data!!!') + + else: + #res = join(cat,res,keys=['TARGETID']) + catf[1].insert_column(tprl+'_mask',res[tprl+'_mask']) + catf.close() + #if output_path.endswith('.fits'): + # res.write(output_path,overwrite=True) + #else: + # np.write(output_path, np.array(res[tp.lower+'_mask'])) + #del cat + del res + print('Done!', time.strftime("%H:%M:%S", time.gmtime(time.time() - time_start))) + +# bricks = Table(fitsio.read('/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/survey-bricks.fits.gz')) +bricks = Table(fitsio.read('/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/randoms/survey-bricks-dr9-randoms-0.48.0.fits')) + +if debug: + rows = np.arange(int(1e3)) +else: + rows = None + +input_path = indirfull+tp+'_full_noveto.dat.fits' +output_path = input_path #we will over-write, just adding new column + +mkfile(input_path,output_path) + +for ri in range(args.minr,args.maxr): + input_path = indirfull+tp+'_'+str(ri)+'_full_noveto.ran.fits' + output_path = input_path #we will over-write, just adding new column + + mkfile(input_path,output_path) + print('adding mask column to '+tp+' random number '+str(ri)) + diff --git a/scripts/mock_tools/lss_cat_match_dr16.py b/scripts/mock_tools/lss_cat_match_dr16.py new file mode 100755 index 000000000..6677b59d3 --- /dev/null +++ b/scripts/mock_tools/lss_cat_match_dr16.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 + +import argparse +import glob +import os +from pathlib import Path + +from astropy import units as u +from astropy.coordinates import match_coordinates_sky, SkyCoord +from astropy.table import Table, hstack +import numpy as np +import fitsio + +parser = argparse.ArgumentParser(description="Match the input catalog to DR16 LSS.") + +parser.add_argument("-o", "--out-dir", type=str, required=True, help="Directory to save matched catalog to.") +parser.add_argument("--tracer",help='tracer type to match between',type=str,default='ELG') +parser.add_argument("--version",help='LSS catalog version',type=str,default='test') +parser.add_argument("--specrel",help='LSS catalog version',type=str,default='daily') + +args = parser.parse_args() + +out_loc = Path(args.out_dir) +if not os.path.isdir(out_loc): + os.mkdir(out_loc) + + + +if args.specrel == 'daily': + survey = 'main' + +if args.specrel == 'guadalupe': + survey = 'DA02' +if args.specrel == 'fuji': + survey = 'SV3' +ROOT = "/global/cfs/cdirs/desi/survey/catalogs/"+survey+"/LSS/"+args.specrel+"/LSScats/"+args.version+"/" +fname = args.tracer+'_full.dat.fits' +with fitsio.FITS(ROOT + fname) as h: + tab = h[1].read() + sel = tab['ZWARN'] != 999999 #reject the targets that were not observed + desi_table = tab[sel] +print("Loaded "+fname+"... matching "+str(len(tab[sel]))+' rows') + + + +# Pull out the RA/DEC for use in matching. +desi_ra = desi_table["RA"] +desi_dec = desi_table["DEC"] + +desi_skycoords = SkyCoord(ra=desi_ra, dec=desi_dec, unit="deg") + +# Loads DR16 +DR16_ROOT = "/global/cfs/cdirs/sdss/staging/dr16/eboss/lss/catalogs/DR16/" +dr16_fname = "eBOSS_"+args.tracer+"_full_ALLdata-vDR16.fits" + +cols_eboss = ["RA", "DEC", "Z", "PLATE", "MJD", "FIBERID","IMATCH"] + +with fitsio.FITS(DR16_ROOT + dr16_fname) as h: + eboss_table = h[1].read_columns(columns=cols_eboss) + sel = eboss_table['IMATCH'] == 1 + sel |= eboss_table['IMATCH'] == 2 + eboss_table = eboss_table[sel] +print("Loaded "+dr16_fname+"... matching "+str(len(eboss_table))+' rows') + +eboss_ra = np.asarray([i["RA"] for i in eboss_table]) +eboss_dec = np.asarray([i["DEC"] for i in eboss_table]) +eboss_skycoords = SkyCoord(ra=eboss_ra, dec=eboss_dec, unit="deg") + +# This is the line that actually matches the two table RA/DECs to each other +print("Matching...") +idx, sep2d, dist3d = match_coordinates_sky(desi_skycoords, eboss_skycoords) + +# 2d seperation in arc seconds to constrain our search radius. +d2d = np.asarray(sep2d.to(u.arcsec)) + +# Keep everything whose match is within 1 arcsecond +# Eseentially deciding everything that close is "correct" +match_keep = d2d < 1 +_, keep_counts = np.unique(idx[match_keep], return_counts=True) +print("Matched "+str(np.sum(match_keep))+" entries from input catalog to DR16 LSS "+args.tracer+ " catalog.") + +# If there are any double matches we'll need to handle that +if np.any(keep_counts) > 1: + print("Double matches found...") + +# Reduces the tables to the matched entries using the indices of matches +desi_keep = Table(desi_table[match_keep]) +eboss_keep = Table(eboss_table[idx][match_keep]) +eboss_keep.rename_column("Z", "Z_SDSS") +eboss_keep.remove_columns(['RA','DEC']) +joined = hstack([desi_keep, eboss_keep]) + +# Drops the SDSS RA/DEC from the joined table, since we already have these from +# the DESI portion of the table. +#del joined["RA"] +#del joined["DEC"] + +# Setting the save name. +out_name = args.tracer+"_cat_"+args.specrel+'_'+args.version+"_LSSfull_DR16_match.fits" + +joined.write(out_loc / out_name, format="fits", overwrite=True) + diff --git a/scripts/mock_tools/mkBGS_flavors.py b/scripts/mock_tools/mkBGS_flavors.py new file mode 100644 index 000000000..f3620606c --- /dev/null +++ b/scripts/mock_tools/mkBGS_flavors.py @@ -0,0 +1,127 @@ +#standard python +import sys +import os +import shutil +import unittest +from datetime import datetime +import json +import numpy as np +import fitsio +import glob +import argparse +from astropy.table import Table,join,unique,vstack + +#from this package +import LSS.SV3.cattools as ct +import LSS.common_tools as common + +from LSS.tabulated_cosmo import TabulatedDESI +cosmo = TabulatedDESI() +dis_dc = cosmo.comoving_radial_distance + +if os.environ['NERSC_HOST'] == 'cori': + scratch = 'CSCRATCH' +elif os.environ['NERSC_HOST'] == 'perlmutter': + scratch = 'PSCRATCH' +else: + print('NERSC_HOST is not cori or permutter but is '+os.environ['NERSC_HOST']) + sys.exit('NERSC_HOST not known (code only works on NERSC), not proceeding') + + +parser = argparse.ArgumentParser() +parser.add_argument("--tracer", help="tracer type to be selected; BGS_ANY or BGS_BRIGHT",default='BGS_BRIGHT') +parser.add_argument("--survey", help="e.g., SV3, DA02, main",default='SV3') +parser.add_argument("--verspec",help="version for redshifts",default='fuji') +parser.add_argument("--basedir", help="base directory for output, default is CSCRATCH",default=os.environ[scratch]) +parser.add_argument("--version", help="catalog version; use 'test' unless you know what you are doing!",default='test') +parser.add_argument("--clus", help="make the data clustering files; these are cut to a small subset of columns",default='n') +parser.add_argument("--clusran", help="make the random clustering files; these are cut to a small subset of columns",default='n') +parser.add_argument("--minr", help="minimum number for random files",default=0) +parser.add_argument("--maxr", help="maximum for random files, default is 1, but 18 are available (use parallel script for all)",default=18) + +parser.add_argument("--mkcats", help="make the subsampled catalogs ",default='y') +parser.add_argument("--nz", help="get n(z) ",default='y') + +args = parser.parse_args() + +dirin = args.basedir+'/'+args.survey+ '/LSS/'+args.verspec+'/LSScats/'+args.version+'/' +dirout = dirin +'BGSsubcats/' + +zw = '' +#if args.survey == 'DA02': +# zw = 'zdone' + +if not os.path.exists(dirout): + os.mkdir(dirout) + print('made '+dirout) + +def dl(z): # Luminosity distance from now to z + return dis_dc(z)*(1.+z) + +def dm(z): + return 5.*np.log10(dl(z)) + 25. + + +def AbsMag(mag,z): + return mag - dm(z) + +def cut_abr_ct(data,maxr=0,minr=-100,minct=-100,maxct=100,zmin=0.01,zmax=0.5): + selz = data['Z'] > zmin + selz &= data['Z'] < zmax + data = data[selz] + r_dered = 22.5 - 2.5*np.log10(data['flux_r_dered']) + g_dered = 22.5 - 2.5*np.log10(data['flux_g_dered']) + + abr = r_dered -dm(data['Z']) + abg = g_dered -dm(data['Z']) + ct = g_dered-r_dered-0.14*(data['Z']-0.1)/0.05 #rough change based on peak of red g-r + sel = abr > minr + sel &= abr < maxr + sel &= ct > minct + sel &= ct < maxct + return data[sel] + +ctc = 0.7 #rough red/blue cut +abl = [-21.5,-20.5,-19.5] +P0 = 7000 +dz = 0.01 +zmin = 0.1 +zmax = 0.5 + +regl = ['_N','_S'] +for reg in regl: + if args.mkcats == 'y': + dat = fitsio.read(dirin+args.tracer+zw+reg+'_clustering.dat.fits') + for ab in abl: + dato = cut_abr_ct(dat,maxr=ab) + outf = dirout+args.tracer+zw+str(ab)+reg+'_clustering.dat.fits' + common.write_LSS(dato,outf) + dato = cut_abr_ct(dat,maxr=ab,maxct=ctc) + outf = dirout+args.tracer+zw+str(ab)+'blue'+reg+'_clustering.dat.fits' + common.write_LSS(dato,outf) + dato = cut_abr_ct(dat,maxr=ab,minct=ctc) + outf = dirout+args.tracer+zw+str(ab)+'red'+reg+'_clustering.dat.fits' + common.write_LSS(dato,outf) + + for rann in range(args.minr,args.maxr): + dat = fitsio.read(dirin+args.tracer+zw+reg+'_'+str(rann)+'_clustering.ran.fits') + for ab in abl: + dato = cut_abr_ct(dat,maxr=ab) + outf = dirout+args.tracer+zw+str(ab)+reg+'_'+str(rann)+'_clustering.ran.fits' + common.write_LSS(dato,outf) + dato = cut_abr_ct(dat,maxr=ab,maxct=ctc) + outf = dirout+args.tracer+zw+str(ab)+'blue'+reg+'_'+str(rann)+'_clustering.ran.fits' + common.write_LSS(dato,outf) + dato = cut_abr_ct(dat,maxr=ab,minct=ctc) + outf = dirout+args.tracer+zw+str(ab)+'red'+reg+'_'+str(rann)+'_clustering.ran.fits' + common.write_LSS(dato,outf) + if args.nz== 'y': + for ab in abl: + for cl in ['','blue','red']: + fb = dirout+args.tracer+zw+str(ab)+cl+reg + fcr = dirin+args.tracer+zw+reg+'_0_clustering.ran.fits' + fcd = fb+'_clustering.dat.fits' + fout = fb+'_nz.txt' + common.mknz(fcd,fcr,fout,bs=dz,zmin=zmin,zmax=zmax) + common.addnbar(fb,bs=dz,zmin=zmin,zmax=zmax,P0=P0) + diff --git a/scripts/mock_tools/mkBGS_flavors_kEE.py b/scripts/mock_tools/mkBGS_flavors_kEE.py new file mode 100644 index 000000000..bc0e2dc4a --- /dev/null +++ b/scripts/mock_tools/mkBGS_flavors_kEE.py @@ -0,0 +1,125 @@ +#standard python +import sys +import os +import sys +import shutil +import unittest +from datetime import datetime +import json +import numpy as np +import fitsio +import glob +import argparse +from astropy.table import Table,join,unique,vstack + +#from kcorr package, needs to be added to path +# ke_code_root = '/global/homes/a/ajross/desicode/DESI_ke' +# sys.path.append(ke_code_root) +# os.environ['CODE_ROOT'] = ke_code_root +# from smith_kcorr import GAMA_KCorrection +# from rest_gmr import smith_rest_gmr +# from tmr_ecorr import tmr_ecorr, tmr_q + +#from this package +import LSS.SV3.cattools as ct +import LSS.common_tools as common + +from LSS.tabulated_cosmo import TabulatedDESI +cosmo = TabulatedDESI() +dis_dc = cosmo.comoving_radial_distance + +if os.environ['NERSC_HOST'] == 'cori': + scratch = os.environ['CSCRATCH'] +elif os.environ['NERSC_HOST'] == 'perlmutter': + scratch = os.environ['PSCRATCH'] +else: + print('NERSC_HOST is not cori or permutter but is '+os.environ['NERSC_HOST']) + sys.exit('NERSC_HOST not known (code only works on NERSC), not proceeding') + + +parser = argparse.ArgumentParser() +parser.add_argument("--tracer", help="tracer type to be selected; BGS_ANY or BGS_BRIGHT",default='BGS_BRIGHT') +parser.add_argument("--survey", help="e.g., SV3, DA02, main",default='SV3') +parser.add_argument("--verspec",help="version for redshifts",default='fuji') +parser.add_argument("--basedir", help="base directory for output, default is (C/P)SCRATCH",default=scratch) +parser.add_argument("--version", help="catalog version; use 'test' unless you know what you are doing!",default='test') +parser.add_argument("--minr", help="minimum number for random files",default=0) +parser.add_argument("--maxr", help="maximum for random files, default is 1, but 18 are available (use parallel script for all)",default=18) + +parser.add_argument("--mkcats", help="make the subsampled catalogs ",default='y') +parser.add_argument("--nz", help="get n(z) ",default='y') + +args = parser.parse_args() + +dirin = args.basedir+'/'+args.survey+ '/LSS/'+args.verspec+'/LSScats/'+args.version+'/' +dirout = dirin +'BGSsubcats/' + +zw = '' +#if args.survey == 'DA02': +# zw = 'zdone' + +if not os.path.exists(dirout): + os.mkdir(dirout) + print('made '+dirout) + + + +def cut_abr_ct(data,maxr=0,minr=-100,minct=-100,maxct=100,zmin=0.01,zmax=0.5): + abr = data['ABSMAG_R'] + ct = data['REST_GMR_0P1'] + sel = abr > minr + sel &= abr < maxr + sel &= ct > minct + sel &= ct < maxct + return data[sel] + +ctc = 0.75 #rough red/blue cut +abl = [-21.5,-20.5,-19.5] +P0 = 7000 +dz = 0.01 +zmin = 0.01 +if args.survey == 'DA02': + zmin = 0.1 +zmax = 0.5 + +regl = ['_N','_S'] +for reg in regl: + if args.mkcats == 'y': + dat = Table(fitsio.read(dirin+args.tracer+zw+reg+'_clustering.dat.fits')) + #selz = dat['Z'] > zmin + #selz &= data['Z'] < zmax + #data = data[selz] + + for ab in abl: + dato = cut_abr_ct(dat,maxr=ab) + outf = dirout+args.tracer+zw+str(ab)+'ke'+reg+'_clustering.dat.fits' + common.write_LSS(dato,outf) + dato = cut_abr_ct(dat,maxr=ab,maxct=ctc) + outf = dirout+args.tracer+zw+str(ab)+'keblue'+reg+'_clustering.dat.fits' + common.write_LSS(dato,outf) + dato = cut_abr_ct(dat,maxr=ab,minct=ctc) + outf = dirout+args.tracer+zw+str(ab)+'kered'+reg+'_clustering.dat.fits' + common.write_LSS(dato,outf) + + for rann in range(args.minr,args.maxr): + dat = fitsio.read(dirin+args.tracer+zw+reg+'_'+str(rann)+'_clustering.ran.fits') + for ab in abl: + dato = cut_abr_ct(dat,maxr=ab) + outf = dirout+args.tracer+zw+str(ab)+'ke'+reg+'_'+str(rann)+'_clustering.ran.fits' + common.write_LSS(dato,outf) + dato = cut_abr_ct(dat,maxr=ab,maxct=ctc) + outf = dirout+args.tracer+zw+str(ab)+'keblue'+reg+'_'+str(rann)+'_clustering.ran.fits' + common.write_LSS(dato,outf) + dato = cut_abr_ct(dat,maxr=ab,minct=ctc) + outf = dirout+args.tracer+zw+str(ab)+'kered'+reg+'_'+str(rann)+'_clustering.ran.fits' + common.write_LSS(dato,outf) + if args.nz== 'y': + for ab in abl: + for cl in ['ke','keblue','kered']: + fb = dirout+args.tracer+zw+str(ab)+cl+reg + fcr = dirin+args.tracer+zw+reg+'_0_clustering.ran.fits' + fcd = fb+'_clustering.dat.fits' + fout = fb+'_nz.txt' + common.mknz(fcd,fcr,fout,bs=dz,zmin=zmin,zmax=zmax) + common.addnbar(fb,bs=dz,zmin=zmin,zmax=zmax,P0=P0) + diff --git a/scripts/mock_tools/mkCat_tar4ang.py b/scripts/mock_tools/mkCat_tar4ang.py new file mode 100644 index 000000000..f0080be7b --- /dev/null +++ b/scripts/mock_tools/mkCat_tar4ang.py @@ -0,0 +1,111 @@ +''' +one executable to create catalogs for given target type meant for angular clustering +''' + + + +#standard python +import sys +import os +import shutil +import unittest +from datetime import datetime +import json +import numpy as np +import fitsio +import glob +import argparse +from astropy.table import Table,join,unique,vstack +from matplotlib import pyplot as plt + +#sys.path.append('../py') + +#from this package +import LSS.imaging.select_samples as ss + +parser = argparse.ArgumentParser() +parser.add_argument("--type", help="tracer type to be selected") +parser.add_argument("--tarver", help="version of targeting",default='0.57.0') +parser.add_argument("--survey", help="e.g., sv1 or main",default='sv3') +parser.add_argument("--basedir", help="base directory for output, default is CSCRATCH",default=os.environ['CSCRATCH']) +parser.add_argument("--version", help="catalog version; use 'test' unless you know what you are doing!",default='test') + +args = parser.parse_args() + +type = args.type +tarver = args.tarver +version = args.version +basedir = args.basedir +survey = args.survey + +if survey == 'main': + tp = 'DESI_TARGET' + sw = '' +if survey == 'sv1': + tp = 'SV1_DESI_TARGET' + sw = 'sv1' +if survey == 'sv3': + tp = 'SV3_DESI_TARGET' + sw = 'sv3' + +outdir = basedir+'/tarcat/v'+version+'/tv'+tarver+'/' +if not os.path.exists( basedir+'/tarcat'): + os.mkdir(basedir+'/tarcat') + print('created '+basedir+'/tarcat') + +if not os.path.exists( basedir+'/tarcat/v'+version): + os.mkdir(basedir+'/tarcat/v'+version) + print('created '+basedir+'/tarcat/v'+version) + +if not os.path.exists(outdir): + os.mkdir(outdir) + print('created '+outdir) + +dirsweeps = '/global/project/projectdirs/cosmo/data/legacysurvey/dr9/south/sweep/9.0/' +dirsweepn = '/global/project/projectdirs/cosmo/data/legacysurvey/dr9/north/sweep/9.0/' +targroot = '/project/projectdirs/desi/target/catalogs/dr9/'+tarver+'/targets/'+survey+'/resolve/' +ranroot = '/global/cfs/cdirs/desi/target/catalogs/dr9/0.49.0/randoms/resolve/randoms-1-' +nran = 10 + +sfs = glob.glob(dirsweeps+'sweep*') +sfn = glob.glob(dirsweepn+'sweep*') + + + +elgandlrgbits = [1,5,6,7,8,9,11,12,13] #these get used to veto imaging area; combination of bits applied to ELGs and LRGs in DR8 targeting + +mkbsamp = True #make the base sample +domaskd = True #mask data based on mask bits above +domaskr = True #mask randoms +'test' +print('type being used for bright/dark '+type[:3]) + +#columns to select from target sample +keys = ['RA', 'DEC', 'BRICKID', 'BRICKNAME','MORPHTYPE','DCHISQ','FLUX_G', 'FLUX_R', 'FLUX_Z','FLUX_W1','FLUX_W2','MW_TRANSMISSION_G', 'MW_TRANSMISSION_R', 'MW_TRANSMISSION_Z', 'MW_TRANSMISSION_W1', 'MW_TRANSMISSION_W2','FLUX_IVAR_G', 'FLUX_IVAR_R', 'FLUX_IVAR_Z','NOBS_G', 'NOBS_R', 'NOBS_Z','PSFDEPTH_G', 'PSFDEPTH_R', 'PSFDEPTH_Z', 'GALDEPTH_G', 'GALDEPTH_R',\ + 'GALDEPTH_Z','FIBERFLUX_G', 'FIBERFLUX_R', 'FIBERFLUX_Z', 'FIBERTOTFLUX_G', 'FIBERTOTFLUX_R', 'FIBERTOTFLUX_Z',\ + 'MASKBITS', 'EBV', 'PHOTSYS','TARGETID',tp,'SHAPE_R'] + + +if mkbsamp: #concatenate target files for given type, with column selection hardcoded + prog = 'dark' + if type[:3] == 'BGS': + prog = 'bright' + ss.gather_targets(type,targroot,outdir,tarver,survey,prog,keys=keys) + +if domaskd: + dd = fitsio.read(outdir+type+sw +'targetsDR9v'+tarver.strip('.')+'.fits' ) + dd = ss.mask(dd,elgandlrgbits) + outf = outdir+type+sw +'targetsDR9v'+tarver.strip('.')+'_masked.fits' + fitsio.write(outf,dd,clobber=True) + print('wrote to '+outf) + +if domaskr: + for ii in range(0,nran): + rr = fitsio.read(ranroot+str(ii)+'.fits',columns=['RA','DEC','BRICKID','PHOTSYS','NOBS_G','NOBS_R','NOBS_Z','MASKBITS']) + #need to restrict columns on line above otherwise run out of memory + rr = ss.mask(rr,elgandlrgbits) + outf = outdir+'randomsDR9v'+tarver.strip('.')+'_'+str(ii)+'_masked.fits' + fitsio.write(outf,rr,clobber=True) + print('wrote to '+outf) + + diff --git a/scripts/mock_tools/mkemlin.py b/scripts/mock_tools/mkemlin.py new file mode 100644 index 000000000..294ccf1c7 --- /dev/null +++ b/scripts/mock_tools/mkemlin.py @@ -0,0 +1,100 @@ +#standard python +import sys +import os +import shutil +import unittest +from datetime import datetime +import json +import numpy as np +import healpy as hp +import fitsio +import glob +import argparse +from astropy.table import Table,join,unique,vstack +from matplotlib import pyplot as plt +from desitarget.io import read_targets_in_tiles +from desitarget.mtl import inflate_ledger +from desimodel.footprint import is_point_in_desi +import desimodel.footprint as foot +from desitarget import targetmask + +#import logging +#logging.getLogger().setLevel(logging.ERROR) + + +#sys.path.append('../py') #this requires running from LSS/bin, *something* must allow linking without this but is not present in code yet + +#from this package +#try: +import LSS.main.cattools as ct +from LSS.globals import main + +parser = argparse.ArgumentParser() +parser.add_argument("--prog", help="dark or bright is supported",default='dark') + +args = parser.parse_args() +print(args) + +specrel = 'daily' +prog = args.prog +progu = prog.upper() + + +mainp = main(prog) + +mt = mainp.mtld +tiles = mainp.tiles + +wd = mt['SURVEY'] == 'main' +wd &= mt['ZDONE'] == 'true' +wd &= mt['FAPRGRM'] == prog +mtd = mt[wd] +print('found '+str(len(mtd))+' '+prog+' time main survey tiles with zdone true for '+specrel+' version of reduced spectra') + + +tiles4comb = Table() +tiles4comb['TILEID'] = mtd['TILEID'] +tiles4comb['ZDATE'] = mtd['ARCHIVEDATE'] +tiles4comb['THRUDATE'] = mtd['LASTNIGHT'] + +tiles.keep_columns(['TILEID','RA','DEC']) +#print(tiles.dtype.names) + +tiles4comb = join(tiles4comb,tiles,keys=['TILEID']) + +print('check that length of tiles4comb matches '+str(len(tiles4comb))) + + +outdir = '/global/cfs/cdirs/desi/survey/catalogs/main/LSS/daily/emtiles/' +guadtiles = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/DA02/LSS/guadalupe/datcomb_'+prog+'_spec_zdone.fits',columns=['TILEID']) +guadtiles = np.unique(guadtiles['TILEID']) +gtids = np.isin(tiles4comb['TILEID'],guadtiles) +tiles4em = tiles4comb[~gtids] +ndone = 0 + +def mkEMtile(ii): + if ii >= len(tiles4em): + print('out of range!') + else: + tile,zdate,tdate = tiles4em['TILEID'][ii],tiles4em['ZDATE'][ii],tiles4em['THRUDATE'][ii] + outf = outdir+'emline-'+str(tile)+'.fits' + if not os.path.isfile(outf): + tdate = str(tdate) + ct.combEMdata_daily(tile,zdate,tdate,outf=outf) + print('wrote '+outf) + +if __name__ == '__main__': + + from multiprocessing import Pool + N = 64 + if os.environ['NERSC_HOST'] == 'perlmutter': + N = 128 + print('using 128 cpus') + for n in range(0,len(tiles4em),N): + p = Pool(N) + inds = [] + for i in range(n,n+N): + inds.append(i) + p.map(mkEMtile,inds) + print(n,len(tiles4em)) + diff --git a/scripts/mock_tools/mknzplots.py b/scripts/mock_tools/mknzplots.py new file mode 100644 index 000000000..8421cbbb9 --- /dev/null +++ b/scripts/mock_tools/mknzplots.py @@ -0,0 +1,66 @@ +import sys,os +import argparse + +import numpy as np +from matplotlib import pyplot as plt + +parser = argparse.ArgumentParser() +parser.add_argument("--survey", help="current choices are SV3,DA02,or main",default='SV3') +parser.add_argument("--version", help="catalog version",default='test') +parser.add_argument("--verspec",help="version for redshifts",default='everest') + + +args = parser.parse_args() +print(args) +catdir='/global/cfs/cdirs/desi/survey/catalogs/' +indir = catdir +args.survey+'/LSS/' +args.verspec+'/LSScats/'+args.version+'/' + +dirout = indir+'/plots/' + +if not os.path.exists(dirout): + os.mkdir(dirout) + print('made '+dirout) + + +types = ['ELG','ELG_LOP','LRG','ELG_LOPnotqso','QSO','BGS_ANY','BGS_BRIGHT'] +if args.survey == 'SV3': + types = ['ELG','ELG_HIP','LRG','LRG_main','ELG_HIPnotqso','QSO','BGS_ANY','BGS_BRIGHT'] + + + +for tp in types: + wzm = '' + if args.survey != 'SV3': + wzm = 'zdone' + + regl = ['_N','_S'] + cl = ['-r','-b'] + ll = ['BASS/MzLS','DECaLS'] + p = False + for reg,c,l in zip(regl,cl,ll): + fn = indir+tp+wzm+reg+'_nz.dat' + if os.path.exists(fn): + p = True + zdat = np.loadtxt(fn).transpose() + plt.plot(zdat[0],zdat[3],c,label=l) + + else: + print('did not find '+fn) + if p: + if tp[:3] == 'ELG': + plt.ylim(0,.0013) + print(tp) + if tp[:3] == 'BGS': + plt.ylim(0,.05) + plt.xlim(0,.6) + print(tp) + + plt.legend() + plt.xlabel('z (redshift)') + plt.ylabel(r'$n(z)~ (h$Mpc$)^3$') + plt.title(args.survey+' '+tp) + + + plt.savefig(dirout+'nz'+args.survey+tp+'.png') + plt.clf() + \ No newline at end of file diff --git a/scripts/mock_tools/perfiber_success_stats.py b/scripts/mock_tools/perfiber_success_stats.py new file mode 100644 index 000000000..c6d5b2703 --- /dev/null +++ b/scripts/mock_tools/perfiber_success_stats.py @@ -0,0 +1,175 @@ +import numpy as np +#!pip install astropy +#!pip install fitsio +from scipy import stats +from scipy.stats import norm +import fitsio +import glob +import os +import sys +import matplotlib.pyplot as plt +import statistics +import argparse +import astropy +from astropy.table import Table,join +from astropy.time import Time +from astropy.io import fits + +import LSS.common_tools as common + + +parser = argparse.ArgumentParser() + +basedir='/global/cfs/cdirs/desi/survey/catalogs' +parser.add_argument("--tracer", help="tracer type to be selected",default='all') +parser.add_argument("--basedir", help="base directory for input/output",default=basedir) +parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='DA02') +parser.add_argument("--verspec",help="version for redshifts",default='guadalupe') +parser.add_argument("--mkfiles",help="whether or not to make the files",default='n') +#parser.add_argument("--tracer",help="tracer type (e.g., LRG)",default='LRG') + +args = parser.parse_args() +basedir = args.basedir +survey = args.survey +specver = args.verspec +#tp = args.tracer + + + +#ff = fitsio.read(filepathLF) +#hdul = fits.open(filepathLF) +#ff2 = fitsio.read(filepathBGS) +#hdul = fits.open(filepathBGS) + +if args.tracer == 'all': + tracers = ['QSO','LRG','ELG','BGS_ANY'] +else: + tracers = [args.tracer] + +if args.mkfiles == 'y': + for tp in tracers: + if survey == 'DA02': + if tp == 'LRG': + bit = 1 #for selecting LRG + if tp == 'ELG': + bit = 2 + if tp == 'QSO': + bit = 4 + if tp == 'BGS_ANY': + zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_bright_tarspecwdup_zdone.fits' + dz = Table(fitsio.read(zf)) + + desitarg = 'BGS_TARGET' + wtype = dz[desitarg] > 0#((dz[desitarg] & bit) > 0) + else: + zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_dark_tarspecwdup_zdone.fits' + dz = Table(fitsio.read(zf)) + desitarg = 'DESI_TARGET' + wtype = ((dz[desitarg] & bit) > 0) + if tp == 'ELG': + wtype &= ((dz[desitarg] & 4) == 0) #remove QSO + print(len(dz[wtype])) + #dz = dz[wtype&wg] + dz = dz[wtype] + + dz = common.cut_specdat(dz) + from LSS.globals import main + pars = main(tp,args.verspec) + + + + elif survey == 'SV3': + #ys.exit('not written for SV3 yet') + if tp != 'BGS_ANY': + zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_dark_tarspecwdup_Alltiles.fits' + dz = Table(fitsio.read(zf)) + desitarg = 'SV3_DESI_TARGET' + if tp == 'LRG': + bit = 1 #for selecting LRG + if tp == 'ELG': + bit = 2 + if tp == 'QSO': + bit = 4 + wtype = ((dz[desitarg] & bit) > 0) + if tp == 'ELG': + wtype &= ((dz[desitarg] & 4) == 0) #remove QSO + else: + zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_bright_tarspecwdup_Alltiles.fits' + dz = Table(fitsio.read(zf)) + desitarg = 'SV3_BGS_TARGET' + wtype = dz[desitarg] > 0#((dz[desitarg] & bit) > 0) + + print(len(dz[wtype])) + #dz = dz[wtype&wg] + dz = dz[wtype] + wz = dz['COADD_FIBERSTATUS'] == 0 + dz = dz[wz] + + else: + zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_'+tp+'_tarspecwdup_zdone.fits' + dz = Table(fitsio.read(zf)) + if tp == 'ELG': + wtype = ((dz['DESI_TARGET'] & 4) == 0) #remove QSO + dz = dz[wtype] + dz = common.cut_specdat(dz) + from LSS.globals import main + pars = main(tp,args.verspec) + + + z_tot = dz['ZWARN'] != 999999 + z_tot &= dz['ZWARN']*0 == 0 + + + if tp == 'LRG': + z_suc= dz['ZWARN']==0 + z_suc &= dz['DELTACHI2']>15 + z_suc &= dz['Z']<1.5 + + if tp == 'ELG': + o2f = fitsio.read(pars.elgzf,columns=['TARGETID','LOCATION','TILEID','OII_FLUX','OII_FLUX_IVAR']) + dz = join(dz,o2f,keys=['TARGETID','TILEID','LOCATION']) + o2c = np.log10(dz['OII_FLUX'] * np.sqrt(dz['OII_FLUX_IVAR']))+0.2*np.log10(dz['DELTACHI2']) + z_suc = o2c > 0.9 + + if tp == 'QSO': + qsozf = pars.qsozf + if specver == 'guadalupe': + qsozf = '/global/cfs/cdirs/desi/users/edmondc/QSO_catalog/guadalupe/QSO_cat_guadalupe_cumulative.fits' + arz = Table(fitsio.read(qsozf)) + arz.keep_columns(['TARGETID','LOCATION','TILEID','Z','Z_QN']) + arz['TILEID'] = arz['TILEID'].astype(int) + + #arz = fitsio.read(qsozf,columns=['TARGETID','LOCATION','TILEID','Z','Z_QN']) + + #arz['TILEID'] = arz['TILEID'].astype(int) + dz = join(dz,arz,keys=['TARGETID','TILEID','LOCATION'],join_type='left',uniq_col_name='{col_name}{table_name}',table_names=['','_QF']) + #dz['Z'].name = 'Z_RR' #rename the original redrock redshifts + #dz['Z_QF'].name = 'Z' #the redshifts from the quasar file should be used instead + + z_suc = dz['Z_QF'].mask == False + + + if tp == 'BGS_ANY': + z_suc = dz['ZWARN']==0 + z_suc &= dz['DELTACHI2']>40 + + #print(len(ff[z_suc]),len(ff[z_tot])) + print("zsuccess rate for "+tp,len(dz[z_suc&z_tot])/len(dz[z_tot])) + fibl,n_tot = np.unique(dz[z_tot]['FIBER'],return_counts=True) + fiblg,n_g = np.unique(dz[z_suc&z_tot]['FIBER'],return_counts=True) + fib_test = np.isin(fibl,fiblg) + z_tot &= np.isin(dz['FIBER'],fibl[fib_test]) + fibl,n_tot = np.unique(dz[z_tot]['FIBER'],return_counts=True) + + if np.array_equal(fibl,fiblg): + gfrac = n_g/n_tot + else: + sys.exit('need to put something in for mismatch fiber lists') + + fn = basedir+'/'+survey+'/LSS/'+specver+"/"+tp+'_zsuccess.txt' + fo = open(fn,'w') + for ii in range(len(fibl)): + fo.write(str(fibl[ii])+' '+str(n_g[ii]/n_tot[ii])+' '+str(n_g[ii])+' '+str(n_tot[ii])+'\n') + fo.close() + + diff --git a/scripts/mock_tools/pkrun.py b/scripts/mock_tools/pkrun.py new file mode 100644 index 000000000..669b8ab0c --- /dev/null +++ b/scripts/mock_tools/pkrun.py @@ -0,0 +1,278 @@ +#!/usr/bin/env python +# coding: utf-8 + +# To run: srun -n 64 python pkrun.py --tracer ELG... + +import os +import argparse +import logging + +import numpy as np +from astropy.table import Table, vstack +from matplotlib import pyplot as plt + +from pypower import CatalogFFTPower, PowerSpectrumStatistics, CatalogSmoothWindow, PowerSpectrumSmoothWindow, PowerSpectrumOddWideAngleMatrix, PowerSpectrumSmoothWindowMatrix, utils, setup_logging +from LSS.tabulated_cosmo import TabulatedDESI + +from xirunpc import read_clustering_positions_weights, concatenate_data_randoms, compute_angular_weights, catalog_dir, get_regions, get_zlims, get_scratch_dir + + +os.environ['OMP_NUM_THREADS'] = os.environ['NUMEXPR_MAX_THREADS'] = '1' +logger = logging.getLogger('pkrun') + + +def barrier_idle(mpicomm, tag=0, sleep=0.01): + """ + MPI barrier fonction that solves the problem that idle processes occupy 100% CPU. + See: https://goo.gl/NofOO9. + """ + import time + size = mpicomm.size + if size == 1: return + rank = mpicomm.rank + mask = 1 + while mask < size: + dst = (rank + mask) % size + src = (rank - mask + size) % size + req = mpicomm.isend(None, dst, tag) + while not mpicomm.Iprobe(src, tag): + time.sleep(sleep) + mpicomm.recv(None, src, tag) + req.Wait() + mask <<= 1 + + +def compute_power_spectrum(edges, distance, dtype='f8', wang=None, weight_type='default', tracer='ELG', tracer2=None, rec_type=None, ells=(0, 2, 4), boxsize=5000., nmesh=1024, dowin=False, option=None, mpicomm=None, mpiroot=0, **kwargs): + + autocorr = tracer2 is None + catalog_kwargs = kwargs.copy() + catalog_kwargs['weight_type'] = weight_type + catalog_kwargs['concatenate'] = True + with_shifted = rec_type is not None + + if 'angular' in weight_type and wang is None: + #wang = compute_angular_weights(nthreads=1, dtype=dtype, weight_type=weight_type, tracer=tracer, tracer2=tracer2, mpicomm=mpicomm, mpiroot=mpiroot, **kwargs) + # Does not run faster, why? + # Because the number of cores is ncores // mpicomm.size + nthreads = 64 + color = mpicomm.rank % nthreads == 0 + subcomm = mpicomm.Split(color, 0) + if color: + wang = compute_angular_weights(nthreads=nthreads, dtype=dtype, weight_type=weight_type, tracer=tracer, tracer2=tracer2, mpicomm=subcomm, mpiroot=0, **kwargs) + barrier_idle(mpicomm) + wang = mpicomm.bcast(wang, root=0) + exit() + + data_positions1, data_weights1, data_positions2, data_weights2 = None, None, None, None + randoms_positions1, randoms_weights1, randoms_positions2, randoms_weights2 = None, None, None, None + shifted_positions1, shifted_weights1, shifted_positions2, shifted_weights2 = None, None, None, None + + if mpicomm is None or mpicomm.rank == mpiroot: + + data, randoms = read_clustering_positions_weights(distance, name=['data', 'randoms'], rec_type=rec_type, tracer=tracer, option=option, **catalog_kwargs) + if with_shifted: + shifted = randoms # above returned shifted randoms + randoms = read_clustering_positions_weights(distance, name='randoms', rec_type=False, tracer=tracer, option=option, **catalog_kwargs) + (data_positions1, data_weights1), (randoms_positions1, randoms_weights1) = concatenate_data_randoms(data, randoms, **catalog_kwargs) + if with_shifted: + shifted_positions1, shifted_weights1 = concatenate_data_randoms(data, shifted, **catalog_kwargs)[1] + + if not autocorr: + data, randoms = read_clustering_positions_weights(distance, name=['data', 'randoms'], rec_type=rec_type, tracer=tracer2, option=option, **catalog_kwargs) + if with_shifted: + shifted = randoms + randoms = read_clustering_positions_weights(distance, name='randoms', rec_type=False, tracer=tracer2, option=option, **catalog_kwargs) + (data_positions2, data_weights2), (randoms_positions2, randoms_weights2) = concatenate_data_randoms(data, randoms, **catalog_kwargs) + if with_shifted: + shifted_positions2, shifted_weights2 = concatenate_data_randoms(data, shifted, **catalog_kwargs)[1] + + kwargs = {} + kwargs.update(wang or {}) + + result = CatalogFFTPower(data_positions1=data_positions1, data_weights1=data_weights1, + data_positions2=data_positions2, data_weights2=data_weights2, + randoms_positions1=randoms_positions1, randoms_weights1=randoms_weights1, + randoms_positions2=randoms_positions2, randoms_weights2=randoms_weights2, + shifted_positions1=shifted_positions1, shifted_weights1=shifted_weights1, + shifted_positions2=shifted_positions2, shifted_weights2=shifted_weights2, + edges=edges, ells=ells, boxsize=boxsize, nmesh=nmesh, resampler='tsc', interlacing=3, + position_type='rdd', dtype=dtype, direct_limits=(0., 1.), direct_limit_type='degree', # direct_limits, (0, 1) degree + **kwargs, mpicomm=mpicomm, mpiroot=mpiroot).poles + wawm = None + if dowin: + windows = [] + boxsizes = [scale * boxsize for scale in [20., 5., 1.]] + edges = {'step': 2. * np.pi / boxsizes[0]} + for boxsize in boxsizes: + windows.append(CatalogSmoothWindow(randoms_positions1=randoms_positions1, randoms_weights1=randoms_weights1, + power_ref=result, edges=edges, boxsize=boxsize, position_type='rdd', + mpicomm=mpicomm, mpiroot=mpiroot).poles) + window = PowerSpectrumSmoothWindow.concatenate_x(*windows, frac_nyq=0.9) + if mpicomm.rank == mpiroot: + # Let us compute the wide-angle and window function matrix + kout = result.k # output k-bins + ellsout = [0, 2, 4] # output multipoles + ellsin = [0, 2, 4] # input (theory) multipoles + wa_orders = 1 # wide-angle order + sep = np.geomspace(1e-4, 4e3, 1024*16) # configuration space separation for FFTlog + kin_rebin = 4 # rebin input theory to save memory + kin_lim = (0, 2e1) # pre-cut input (theory) ks to save some memory + # Input projections for window function matrix: + # theory multipoles at wa_order = 0, and wide-angle terms at wa_order = 1 + projsin = ellsin + PowerSpectrumOddWideAngleMatrix.propose_out(ellsin, wa_orders=wa_orders) + # Window matrix + wm = PowerSpectrumSmoothWindowMatrix(kout, projsin=projsin, projsout=ellsout, window=window, sep=sep, kin_rebin=kin_rebin, kin_lim=kin_lim) + # We resum over theory odd-wide angle + wawm = wm.copy() + wawm.resum_input_odd_wide_angle() + + return result, wang, wawm + + +def get_edges(): + return {'min':0., 'step':0.001} + + +def power_fn(file_type='npy', region='', tracer='ELG', tracer2=None, zmin=0, zmax=np.inf, rec_type=False, weight_type='default', bin_type='lin', out_dir='.'): + if tracer2: tracer += '_' + tracer2 + if rec_type: tracer += '_' + rec_type + if region: tracer += '_' + region + root = '{}_{}_{}_{}_{}'.format(tracer, zmin, zmax, weight_type, bin_type) + if file_type == 'npy': + return os.path.join(out_dir, 'pkpoles_{}.npy'.format(root)) + return os.path.join(out_dir, '{}_{}.txt'.format(file_type, root)) + + +def window_fn(file_type='npy', region='', tracer='ELG', tracer2=None, zmin=0, zmax=np.inf, rec_type=False, weight_type='default', bin_type='lin', out_dir='.'): + if tracer2: tracer += '_' + tracer2 + if rec_type: tracer += '_' + rec_type + if region: tracer += '_' + region + root = '{}_{}_{}_{}_{}'.format(tracer, zmin, zmax, weight_type, bin_type) + if file_type == 'npy': + return os.path.join(out_dir, 'window_smooth_{}.npy'.format(root)) + return os.path.join(out_dir, '{}_{}.txt'.format(file_type, root)) + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser() + parser.add_argument('--tracer', help='tracer(s) to be selected - 2 for cross-correlation', type=str, nargs='+', default=['ELG']) + parser.add_argument('--basedir', help='where to find catalogs', type=str, default='/global/cfs/cdirs/desi/survey/catalogs/') + parser.add_argument('--survey', help='e.g., SV3 or main', type=str, choices=['SV3', 'DA02', 'main'], default='SV3') + parser.add_argument('--verspec', help='version for redshifts', type=str, default='guadalupe') + parser.add_argument('--version', help='catalog version', type=str, default='test') + parser.add_argument('--ran_sw', help='extra string in random name', type=str, default='') + parser.add_argument('--region', help='regions; by default, run on N, S; pass NS to run on concatenated N + S', type=str, nargs='*', choices=['N', 'S', 'NS','NGC','SGC'], default=None) + parser.add_argument('--zlim', help='z-limits, or options for z-limits, e.g. "highz", "lowz", "fullonly"', type=str, nargs='*', default=None) + parser.add_argument('--weight_type', help='types of weights to use; use "default_angular_bitwise" for PIP with angular upweighting; "default" just uses WEIGHT column', type=str, default='default') + parser.add_argument('--boxsize', help='box size', type=float, default=8000.) + parser.add_argument('--nmesh', help='mesh size', type=int, default=1024) + parser.add_argument('--nran', help='number of random files to combine together (1-18 available)', type=int, default=4) + parser.add_argument('--outdir', help='base directory for output (default: SCRATCH)', type=str, default=None) + parser.add_argument('--calc_win', help='also calculate window?; use "y" for yes', default='n') + parser.add_argument('--vis', help='show plot of each pk?', action='store_true', default=False) + parser.add_argument('--rebinning', help='whether to rebin the pk or just keep the original .npy file', default='n') + + #only relevant for reconstruction + parser.add_argument('--rec_type', help='reconstruction algorithm + reconstruction convention', choices=['IFTrecsym', 'IFTreciso', 'MGrecsym', 'MGreciso'], type=str, default=None) + + setup_logging() + args = parser.parse_args() + if args.calc_win == 'n': + args.calc_win = False + if args.calc_win == 'y': + args.calc_win = True + + if args.rebinning == 'n': + args.rebinning = False + if args.rebinning == 'y': + args.rebinning = True + + from pypower import mpi + mpicomm = mpi.COMM_WORLD + mpiroot = 0 + + if os.path.normpath(args.basedir) == os.path.normpath('/global/cfs/cdirs/desi/survey/catalogs/'): + cat_dir = catalog_dir(base_dir=args.basedir, survey=args.survey, verspec=args.verspec, version=args.version) + elif os.path.normpath(args.basedir) == os.path.normpath('/global/project/projectdirs/desi/users/acarnero/mtl_mock000_univ1/'): + cat_dir = args.basedir + args.region = [''] + else: + cat_dir = args.basedir + if mpicomm is None or mpicomm.rank == mpiroot: + logger.info('Catalog directory is {}.'.format(cat_dir)) + + if args.outdir is None: + out_dir = os.path.join(get_scratch_dir(), args.survey) + else: + out_dir = args.outdir + if mpicomm is None or mpicomm.rank == mpiroot: + logger.info('Output directory is {}.'.format(out_dir)) + + tracer, tracer2 = args.tracer[0], None + if len(args.tracer) > 1: + tracer2 = args.tracer[1] + if len(args.tracer) > 2: + raise ValueError('Provide <= 2 tracers!') + if tracer2 == tracer: + tracer2 = None # otherwise counting of self-pairs + catalog_kwargs = dict(tracer=tracer, tracer2=tracer2, survey=args.survey, cat_dir=cat_dir, rec_type=args.rec_type,ran_sw=args.ran_sw) # survey required for zdone + distance = TabulatedDESI().comoving_radial_distance + + regions = args.region + if regions is None: + regions = get_regions(args.survey, rec=bool(args.rec_type)) + + if args.zlim is None: + zlims = get_zlims(tracer, tracer2=tracer2) + elif not args.zlim[0].replace('.', '').isdigit(): + option = args.zlim[0] + zlims = get_zlims(tracer, tracer2=tracer2, option=option) + else: + zlims = [float(zlim) for zlim in args.zlim] + zlims = list(zip(zlims[:-1], zlims[1:])) + ([(zlims[0], zlims[-1])] if len(zlims) > 2 else []) # len(zlims) == 2 == single redshift range + + bin_type = 'lin' + rebinning_factors = [1, 5, 10] + if mpicomm.rank == mpiroot: + logger.info('Computing power spectrum multipoles in regions {} in redshift ranges {}.'.format(regions, zlims)) + + for zmin, zmax in zlims: + base_file_kwargs = dict(tracer=tracer, tracer2=tracer2, zmin=zmin, zmax=zmax, rec_type=args.rec_type, weight_type=args.weight_type, bin_type=bin_type, out_dir=os.path.join(out_dir, 'pk')) + for region in regions: + if mpicomm.rank == mpiroot: + logger.info('Computing power spectrum in region {} in redshift range {}.'.format(region, (zmin, zmax))) + edges = get_edges() + wang = None + result, wang, window = compute_power_spectrum(edges=edges, distance=distance, nrandoms=args.nran, region=region, zlim=(zmin, zmax), weight_type=args.weight_type, boxsize=args.boxsize, nmesh=args.nmesh, wang=wang, dowin=args.calc_win, mpicomm=mpicomm, mpiroot=mpiroot, **catalog_kwargs) + fn = power_fn(file_type='npy', region=region, **base_file_kwargs) + result.save(fn) + if window is not None: + fn = window_fn(file_type='npy', region=region, **base_file_kwargs) + window.save(fn) + + all_regions = regions.copy() + if mpicomm.rank == mpiroot: + if 'N' in regions and 'S' in regions: # let's combine + result = sum([PowerSpectrumStatistics.load(power_fn(file_type='npy', region=region, **base_file_kwargs)) for region in ['N', 'S']]) + result.save(power_fn(file_type='npy', region='NScomb', **base_file_kwargs)) + all_regions.append('NScomb') + if args.rebinning: + for region in all_regions: + txt_kwargs = base_file_kwargs.copy() + txt_kwargs.update(region=region) + result = PowerSpectrumStatistics.load(power_fn(file_type='npy', **txt_kwargs)) + for factor in rebinning_factors: + #result = PowerSpectrumStatistics.load(fn) + rebinned = result[:(result.shape[0]//factor)*factor:factor] + txt_kwargs.update(bin_type=bin_type+str(factor)) + fn_txt = power_fn(file_type='pkpoles', **txt_kwargs) + rebinned.save_txt(fn_txt) + + if args.vis: + k, poles = rebinned(return_k=True, complex=False) + for pole in poles: plt.plot(k, k*pole) + tracers = tracer + if tracer2 is not None: tracers += ' x ' + tracer2 + plt.title('{} {:.2f} < z {:.2f} in {}'.format(tracers, zmin, zmax, region)) + plt.show() diff --git a/scripts/mock_tools/qso_cat_match_dr16q.py b/scripts/mock_tools/qso_cat_match_dr16q.py new file mode 100644 index 000000000..9b155b0ca --- /dev/null +++ b/scripts/mock_tools/qso_cat_match_dr16q.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python3 + +import argparse +import glob +import os +from pathlib import Path + +from astropy import units as u +from astropy.coordinates import match_coordinates_sky, SkyCoord +from astropy.table import Table, hstack +import numpy as np +import fitsio + +parser = argparse.ArgumentParser(description="Match the input catalog to DR16Q.") + +parser.add_argument("-o", "--out-dir", type=str, required=True, help="Directory to save matched catalog to.") +group = parser.add_mutually_exclusive_group(required=True) +group.add_argument("-f", "--fuji", action="store_true", help="Match against Fuji catalog.") +group.add_argument("-g", "--guadalupe", action="store_true", help="Match against Guadalupe catalog.") +group.add_argument("-d", "--daily", action="store_true", help="Match against daily catalog.") +group.add_argument("-a", "--all", action="store_true", help="Match against combined catalogs.") + +args = parser.parse_args() + +out_loc = Path(args.out_dir) +if not os.path.isdir(out_loc): + os.mkdir(out_loc) + +# Load each of the two releases individually +releases = [] +if args.guadalupe or args.all: + releases.append("guadalupe") +if args.fuji or args.all: + releases.append("fuji") +if args.daily or args.all: + releases.append("daily") + +desi_tables = {} + +for r in releases: + if r == 'daily': + ROOT = "/global/cfs/cdirs/desi/survey/catalogs/main/LSS/daily/" + fname = "QSO_catalog.fits" + else: + ROOT = f"/global/cfs/cdirs/desi/users/edmondc/QSO_catalog/{r}/" + fname = f"QSO_cat_{r}_healpix.fits" + + with fitsio.FITS(ROOT + fname) as h: + desi_tables[r] = h[1].read() + print(f"Loaded {fname}...") + +# Combine the two releases into a single table +desi_table_combined = desi_table = np.concatenate([desi_tables[k] for k in releases]) + +# Pull out the RA/DEC for use in matching. +desi_ra = np.asarray([i["TARGET_RA"] for i in desi_table]) +desi_dec = np.asarray([i["TARGET_DEC"] for i in desi_table]) + +desi_skycoords = SkyCoord(ra=desi_ra, dec=desi_dec, unit="deg") + +# Loads DR16 +DR16Q_ROOT = "/global/cfs/cdirs/sdss/staging/dr16/eboss/qso/DR16Q/" +dr16q_fname = "DR16Q_v4.fits" + +cols_eboss = ["RA", "DEC", "Z", "PLATE", "MJD", "FIBERID"] + +with fitsio.FITS(DR16Q_ROOT + dr16q_fname) as h: + eboss_table = h[1].read_columns(columns=cols_eboss) + print(f"Loaded {dr16q_fname}...") + +eboss_ra = np.asarray([i["RA"] for i in eboss_table]) +eboss_dec = np.asarray([i["DEC"] for i in eboss_table]) +eboss_skycoords = SkyCoord(ra=eboss_ra, dec=eboss_dec, unit="deg") + +# This is the line that actually matches the two table RA/DECs to each other +print("Matching...") +idx, sep2d, dist3d = match_coordinates_sky(desi_skycoords, eboss_skycoords) + +# 2d seperation in arc seconds to constrain our search radius. +d2d = np.asarray(sep2d.to(u.arcsec)) + +# Keep everything whose match is within 1 arcsecond +# Eseentially deciding everything that close is "correct" +match_keep = d2d < 1 +_, keep_counts = np.unique(idx[match_keep], return_counts=True) +print(f"Matched {np.sum(match_keep)} entries from input catalog to DR16Q.") + +# If there are any double matches we'll need to handle that +if np.any(keep_counts) > 1: + print("Double matches found...") + +# Reduces the tables to the matched entries using the indices of matches +desi_keep = Table(desi_table[match_keep]) +eboss_keep = Table(eboss_table[idx][match_keep]) +eboss_keep.rename_column("Z", "Z_SDSS") +joined = hstack([desi_keep, eboss_keep]) + +# Drops the SDSS RA/DEC from the joined table, since we already have these from +# the DESI portion of the table. +del joined["RA"] +del joined["DEC"] + +# Setting the save name. +out_name = "QSO_cat_fujilupe_healpix_DR16Q_match.fits" +if args.fuji: + out_name = "QSO_cat_fuji_healpix_DR16Q_match.fits" +elif args.guadalupe: + out_name = "QSO_cat_guadalupe_healpix_DR16Q_match.fits" +elif args.daily: + out_name = "QSO_cat_daily_tile_DR16Q_match.fits" + +joined.write(out_loc / out_name, format="fits", overwrite=True) + diff --git a/scripts/mock_tools/readwrite_pixel_bitmask.py b/scripts/mock_tools/readwrite_pixel_bitmask.py new file mode 100644 index 000000000..2943c43f8 --- /dev/null +++ b/scripts/mock_tools/readwrite_pixel_bitmask.py @@ -0,0 +1,145 @@ +# Get bitmask values from pixel-level per-brick masks for a catalog +# Examples: +# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --tracer lrg --input catalog.fits --output catalog_lrgmask_v1.1.npy +# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --tracer lrg --input /global/cfs/cdirs/desi/target/catalogs/dr9/0.49.0/randoms/resolve/randoms-1-0.fits --output $CSCRATCH/temp/randoms-1-0-lrgmask_v1.1.fits +# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --tracer lrg --input /global/cfs/cdirs/desi/users/rongpu/targets/dr9.0/1.0.0/resolve/dr9_lrg_south_1.0.0_basic.fits --output $CSCRATCH/temp/dr9_lrg_south_1.0.0_lrgmask_v1.1.fits + +from __future__ import division, print_function +import sys, os, glob, time, warnings, gc +import numpy as np +import matplotlib.pyplot as plt +from astropy.table import Table, vstack, hstack, join +import fitsio + +from astropy.io import fits +from astropy import wcs + +from multiprocessing import Pool +import argparse + + +time_start = time.time() + +n_processes = 32 + +parser = argparse.ArgumentParser() +parser.add_argument('-t', '--tracer', required=True) +parser.add_argument('-i', '--input', required=True) +#parser.add_argument('-o', '--output', required=True) +parser.add_argument('-v', '--version', default='none', required=False) +parser.add_argument('-rv', '--tarver', default='targetsDR9v1.1.1', required=False) +parser.add_argument( '--ran', default=False, required=False,type=bool) +args = parser.parse_args() + + +input_path = '/global/cfs/cdirs/desi/survey/catalogs/main/LSS/'+args.input+args.tarver+'.fits' +output_path = '/global/cfs/cdirs/desi/survey/catalogs/main/LSS/'+args.input+args.tarver+'_'+args.tracer+'imask.fits' +if args.ran: + input_path = '/global/cfs/cdirs/desi/target/catalogs/dr9/0.49.0/randoms/resolve/randoms-1-'+str(args.input)+'.fits' + output_path = '/global/cfs/cdirs/desi/survey/catalogs/main/LSS/randoms-1-'+str(args.input)+args.tracer+'imask.fits' + +tracer = args.tracer.lower() +version = args.version + +version_dict = {'lrg': 'v1.1', 'elg': 'v1'} +if version=='none': + version = version_dict[tracer] + +bitmask_dir = '/global/cfs/cdirs/desi/survey/catalogs/brickmasks/{}/{}'.format(tracer.upper(), version) + +# input_path = '/global/cfs/cdirs/desi/target/catalogs/dr9/0.49.0/randoms/resolve/randoms-1-0.fits' +# output_path = '/global/cscratch1/sd/rongpu/temp/randoms-1-0-lrgmask_v1.fits' + +if os.path.isfile(output_path): + raise ValueError(output_path+' already exists!') + + +def bitmask_radec(brickid, ra, dec): + + brick_index = np.where(bricks['BRICKID']==brickid)[0][0] + + brickname = str(bricks['BRICKNAME'][brick_index]) + if bricks['PHOTSYS'][brick_index]=='N': + field = 'north' + elif bricks['PHOTSYS'][brick_index]=='S': + field = 'south' + else: + # raise ValueError + # Outside DR9 footprint; assign mask bit 7 + bitmask = np.full(len(ra), 2**7, dtype=np.uint8) + return bitmask + + # bitmask_fn = '/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/{}/coadd/{}/{}/legacysurvey-{}-maskbits.fits.fz'.format(field, brickname[:3], brickname, brickname) + bitmask_fn = os.path.join(bitmask_dir, '{}/coadd/{}/{}/{}-{}mask.fits.gz'.format(field, brickname[:3], brickname, brickname, tracer)) + + bitmask_img = fitsio.read(bitmask_fn) + + header = fits.open(bitmask_fn)[1].header + w = wcs.WCS(header) + + coadd_x, coadd_y = w.wcs_world2pix(ra, dec, 0) + coadd_x, coadd_y = np.round(coadd_x).astype(int), np.round(coadd_y).astype(int) + + bitmask = bitmask_img[coadd_y, coadd_x] + + return bitmask + + +def wrapper(bid_index): + + idx = bidorder[bidcnts[bid_index]:bidcnts[bid_index+1]] + brickid = bid_unique[bid_index] + + ra, dec = cat['RA'][idx], cat['DEC'][idx] + tid = cat['TARGETID'][idx] + bitmask = bitmask_radec(brickid, ra, dec) + + data = Table() + data['idx'] = idx + data['{}_mask'.format(tracer)] = bitmask + data['TARGETID'] = tid + + return data + + +# bricks = Table(fitsio.read('/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/survey-bricks.fits.gz')) +bricks = Table(fitsio.read('/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/randoms/survey-bricks-dr9-randoms-0.48.0.fits')) + +try: + cat = Table(fitsio.read(input_path, rows=None, columns=['RA', 'DEC', 'BRICKID', 'TARGETID'])) +except ValueError: + cat = Table(fitsio.read(input_path, rows=None, columns=['RA', 'DEC', 'TARGETID'])) + +print(len(cat)) + +for col in cat.colnames: + cat.rename_column(col, col.upper()) + +if 'TARGET_RA' in cat.colnames: + cat.rename_columns(['TARGET_RA', 'TARGET_DEC'], ['RA', 'DEC']) + +if 'BRICKID' not in cat.colnames: + from desiutil import brick + tmp = brick.Bricks(bricksize=0.25) + cat['BRICKID'] = tmp.brickid(cat['RA'], cat['DEC']) + +# Just some tricks to speed up things up +bid_unique, bidcnts = np.unique(cat['BRICKID'], return_counts=True) +bidcnts = np.insert(bidcnts, 0, 0) +bidcnts = np.cumsum(bidcnts) +bidorder = np.argsort(cat['BRICKID']) + +# start multiple worker processes +with Pool(processes=n_processes) as pool: + res = pool.map(wrapper, np.arange(len(bid_unique))) + +res = vstack(res) +res.sort('idx') +res.remove_column('idx') + +if output_path.endswith('.fits'): + res.write(output_path) +else: + np.write(output_path, np.array(res['{}_mask'.format(tracer)])) + +print('Done!', time.strftime("%H:%M:%S", time.gmtime(time.time() - time_start))) diff --git a/scripts/mock_tools/recon.py b/scripts/mock_tools/recon.py new file mode 100644 index 000000000..bf0ec8307 --- /dev/null +++ b/scripts/mock_tools/recon.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python +# coding: utf-8 + +import os +import argparse +import logging + +import numpy as np +from astropy.table import Table, vstack + +from pyrecon import MultiGridReconstruction, IterativeFFTReconstruction, IterativeFFTParticleReconstruction, utils, setup_logging +from LSS.tabulated_cosmo import TabulatedDESI + +from xirunpc import get_clustering_positions_weights, catalog_dir, catalog_fn, get_regions, get_zlims, get_scratch_dir + + +logger = logging.getLogger('recon') + + +def run_reconstruction(Reconstruction, distance, data_fn, randoms_fn, data_rec_fn, randoms_rec_fn, f=0.8, bias=1.2, boxsize=None, nmesh=None, cellsize=7, smoothing_radius=15, nthreads=8, convention='reciso', dtype='f4', **kwargs): + + if np.ndim(randoms_fn) == 0: randoms_fn = [randoms_fn] + if np.ndim(randoms_rec_fn) == 0: randoms_rec_fn = [randoms_rec_fn] + + logger.info('Loading {}.'.format(data_fn)) + data = Table.read(data_fn) + (ra, dec, dist), data_weights, mask = get_clustering_positions_weights(data, distance, name='data', return_mask=True, **kwargs) + data = data[mask] + data_positions = utils.sky_to_cartesian(dist, ra, dec, dtype=dtype) + recon = Reconstruction(f=f, bias=bias, boxsize=boxsize, nmesh=nmesh, cellsize=cellsize, los='local', positions=data_positions, nthreads=nthreads, fft_engine='fftw', dtype=dtype) + + recon.assign_data(data_positions, data_weights) + for fn in randoms_fn: + logger.info('Loading {}.'.format(fn)) + (ra, dec, dist), randoms_weights = get_clustering_positions_weights(Table.read(fn), distance, name='randoms', **kwargs) + randoms_positions = utils.sky_to_cartesian(dist, ra, dec, dtype=dtype) + recon.assign_randoms(randoms_positions, randoms_weights) + + recon.set_density_contrast(smoothing_radius=smoothing_radius) + recon.run() + + field = 'disp+rsd' + if type(recon) is IterativeFFTParticleReconstruction: + data_positions_rec = recon.read_shifted_positions('data', field=field) + else: + data_positions_rec = recon.read_shifted_positions(data_positions, field=field) + + distance_to_redshift = utils.DistanceToRedshift(distance) + catalog = Table(data) + dist, ra, dec = utils.cartesian_to_sky(data_positions_rec) + catalog['RA'], catalog['DEC'], catalog['Z'] = ra, dec, distance_to_redshift(dist) + logger.info('Saving {}.'.format(data_rec_fn)) + utils.mkdir(os.path.dirname(data_rec_fn)) + catalog.write(data_rec_fn, format='fits', overwrite=True) + + field = 'disp+rsd' if convention == 'recsym' else 'disp' + for fn, rec_fn in zip(randoms_fn, randoms_rec_fn): + catalog = Table.read(fn) + (ra, dec, dist), randoms_weights, mask = get_clustering_positions_weights(catalog, distance, name='randoms', return_mask=True, **kwargs) + catalog = catalog[mask] + randoms_positions = utils.sky_to_cartesian(dist, ra, dec, dtype=dtype) + dist, ra, dec = utils.cartesian_to_sky(recon.read_shifted_positions(randoms_positions, field=field)) + catalog['RA'], catalog['DEC'], catalog['Z'] = ra, dec, distance_to_redshift(dist) + logger.info('Saving {}.'.format(rec_fn)) + utils.mkdir(os.path.dirname(rec_fn)) + catalog.write(rec_fn, format='fits', overwrite=True) + + +def run_realspace_reconstruction(Reconstruction, distance, data_fn, randoms_fn, data_rec_fn, f=0.8, bias=1.2, boxsize=None, nmesh=None, cellsize=7, smoothing_radius=15, nthreads=8, dtype='f4', **kwargs): + + convention = 'RSD' + + if np.ndim(randoms_fn) == 0: randoms_fn = [randoms_fn] + #if np.ndim(randoms_rec_fn) == 0: randoms_rec_fn = [randoms_rec_fn] + + logger.info('Loading {}.'.format(data_fn)) + data = Table.read(data_fn) + (ra, dec, dist), data_weights, mask = get_clustering_positions_weights(data, distance, name='data', return_mask=True, **kwargs) + data = data[mask] + data_positions = utils.sky_to_cartesian(dist, ra, dec, dtype=dtype) + recon = Reconstruction(f=f, bias=bias, boxsize=boxsize, nmesh=nmesh, cellsize=cellsize, los='local', positions=data_positions, nthreads=nthreads, fft_engine='fftw', dtype=dtype) + + recon.assign_data(data_positions, data_weights) + for fn in randoms_fn: + logger.info('Loading {}.'.format(fn)) + (ra, dec, dist), randoms_weights = get_clustering_positions_weights(Table.read(fn), distance, name='randoms', **kwargs) + randoms_positions = utils.sky_to_cartesian(dist, ra, dec, dtype=dtype) + recon.assign_randoms(randoms_positions, randoms_weights) + + recon.set_density_contrast(smoothing_radius=smoothing_radius) + recon.run() + + field = 'rsd' + if type(recon) is IterativeFFTParticleReconstruction: + data_positions_rec = recon.read_shifted_positions('data', field=field) + else: + data_positions_rec = recon.read_shifted_positions(data_positions, field=field) + + distance_to_redshift = utils.DistanceToRedshift(distance) + catalog = Table(data) + dist, ra, dec = utils.cartesian_to_sky(data_positions_rec) + catalog['RA'], catalog['DEC'], catalog['Z'] = ra, dec, distance_to_redshift(dist) + logger.info('Saving {}.'.format(data_rec_fn)) + utils.mkdir(os.path.dirname(data_rec_fn)) + catalog.write(data_rec_fn, format='fits', overwrite=True) + + +def get_f_bias(tracer='ELG'): + if tracer.startswith('ELG') or tracer.startswith('QSO'): + return 0.9, 1.3 + if tracer.startswith('LRG'): + return 0.8, 2. + if tracer.startswith('BGS'): + return 0.67, 1.5 + + return 0.8, 1.2 + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser() + parser.add_argument('--tracer', help='tracer to be selected', type=str, default='ELG') + parser.add_argument('--indir', help='where to find catalogs', type=str, default='/global/cfs/cdirs/desi/survey/catalogs/') + parser.add_argument('--survey', help='e.g., SV3 or main', type=str, choices=['SV3', 'DA02', 'main'], default='DA02') + parser.add_argument('--verspec', help='version for redshifts', type=str, default='guadalupe') + parser.add_argument('--version', help='catalog version', type=str, default='test') + parser.add_argument('--region', help='regions; by default, run on all regions', type=str, nargs='*', choices=['NGC','SGC','N', 'S', 'DN', 'DS', ''], default=None) + parser.add_argument('--zlim', help='z-limits, or options for z-limits, e.g. "highz", "lowz"', type=str, nargs='*', default=None) + parser.add_argument('--weight_type', help='types of weights to use; "default" just uses WEIGHT column', type=str, default='default') + parser.add_argument('--nran', help='number of random files to combine together (1-18 available)', type=int, default=5) + parser.add_argument('--nthreads', help='number of threads', type=int, default=64) + parser.add_argument('--outdir', help='base directory for output (default: SCRATCH)', type=str, default=None) + parser.add_argument('--algorithm', help='reconstruction algorithm', type=str, choices=['MG', 'IFT', 'IFTP'], default='MG') + parser.add_argument('--convention', help='reconstruction convention', type=str, choices=['reciso', 'recsym'], default='reciso') + parser.add_argument('--f', help='growth rate', type=float, default=None) + parser.add_argument('--bias', help='bias', type=float, default=None) + parser.add_argument('--boxsize', help='box size', type=float, default=None) + parser.add_argument('--nmesh', help='mesh size', type=int, default=None) + parser.add_argument('--cellsize', help='cell size', type=float, default=7) + parser.add_argument('--smoothing_radius', help='smoothing radius', type=float, default=15) + parser.add_argument('--prepare_blinding', help='Use this flag to create a realspace catalog, thtat can be used as innput for RSD blinding', type=bool,default=False)#,action='store_true' + + setup_logging() + args = parser.parse_args() + + Reconstruction = {'MG': MultiGridReconstruction, 'IFT': IterativeFFTReconstruction, 'IFTP': IterativeFFTParticleReconstruction}[args.algorithm] + + if os.path.normpath(args.indir) == os.path.normpath('/global/cfs/cdirs/desi/survey/catalogs/'): + cat_dir = catalog_dir(base_dir=args.indir, survey=args.survey, verspec=args.verspec, version=args.version) + elif os.path.normpath(args.indir) == os.path.normpath('/global/project/projectdirs/desi/users/acarnero/mtl_mock000_univ1/'): + cat_dir = args.indir + args.region = [''] + else: + cat_dir = args.indir + logger.info('Input directory is {}.'.format(cat_dir)) + + if args.outdir is None: + out_dir = os.path.join(get_scratch_dir(), args.survey) + else: + out_dir = args.outdir + logger.info('Output directory is {}.'.format(out_dir)) + + distance = TabulatedDESI().comoving_radial_distance + + f, bias = get_f_bias(args.tracer) + if args.f is not None: f = args.f + if args.bias is not None: bias = args.bias + + regions = args.region + if regions is None: + regions = get_regions(args.survey, rec=True) + + if args.zlim is None: + zlims = get_zlims(args.tracer) + elif not args.zlim[0].replace('.', '').isdigit(): + zlims = get_zlims(args.tracer, option=args.zlim[0]) + else: + zlims = [float(zlim) for zlim in args.zlim] + zlims = [(zlims[0], zlims[-1])] + + for zmin, zmax in zlims: + for region in regions: + logger.info('Running reconstruction in region {} in redshift range {} with f, bias = {}.'.format(region, (zmin, zmax), (f, bias))) + catalog_kwargs = dict(tracer=args.tracer, region=region, ctype='clustering', nrandoms=args.nran, survey=args.survey) + data_fn = catalog_fn(**catalog_kwargs, cat_dir=cat_dir, name='data') + randoms_fn = catalog_fn(**catalog_kwargs, cat_dir=cat_dir, name='randoms') + data_rec_fn = catalog_fn(**catalog_kwargs, cat_dir=out_dir, rec_type=args.algorithm+args.convention, name='data') + randoms_rec_fn = catalog_fn(**catalog_kwargs, cat_dir=out_dir, rec_type=args.algorithm+args.convention, name='randoms') + data_realspacerec_fn = catalog_fn(**catalog_kwargs, cat_dir=out_dir, rec_type=args.algorithm+'rsd', name='data') + if args.prepare_blinding: + run_realspace_reconstruction(Reconstruction, distance, data_fn, randoms_fn, data_realspacerec_fn, f=f, bias=bias, boxsize=args.boxsize, nmesh=args.nmesh, cellsize=args.cellsize, smoothing_radius=args.smoothing_radius, nthreads=args.nthreads, dtype='f4', zlim=(zmin, zmax), weight_type=args.weight_type) + else: + run_reconstruction(Reconstruction, distance, data_fn, randoms_fn, data_rec_fn, randoms_rec_fn, f=f, bias=bias, boxsize=args.boxsize, nmesh=args.nmesh, cellsize=args.cellsize, smoothing_radius=args.smoothing_radius, nthreads=args.nthreads, convention=args.convention, dtype='f4', zlim=(zmin, zmax), weight_type=args.weight_type) diff --git a/scripts/mock_tools/summary_numbers.py b/scripts/mock_tools/summary_numbers.py new file mode 100644 index 000000000..3cfd9ae23 --- /dev/null +++ b/scripts/mock_tools/summary_numbers.py @@ -0,0 +1,51 @@ +import matplotlib.pyplot as plt +import numpy as np +import os +import sys +import argparse + +import fitsio +from astropy.table import join,Table +import healpy as hp + +from LSS.imaging import densvar + +parser = argparse.ArgumentParser() +parser.add_argument("--version", help="catalog version",default='EDAbeta') +parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='SV3') +parser.add_argument("--tracers", help="all runs all for given survey",default='all') +parser.add_argument("--verspec",help="version for redshifts",default='fuji') +args = parser.parse_args() + + +indir = '/global/cfs/cdirs/desi/survey/catalogs/'+args.survey+'/LSS/'+args.verspec+'/LSScats/'+args.version+'/' +zcol = 'Z' +nran = 18 + +tps = [args.tracers] +if args.tracers == 'all': + tps = ['QSO','LRG','BGS_BRIGHT','ELG_LOPnotqso'] + +zdw = ''#'zdone' + +regl = ['_N','_S'] + +if args.survey == 'SV3' and args.tracers == 'all': + tps = ['QSO','LRG','BGS_ANY','ELGnotqso'] + +tot = 0 +for tp in tps: + + for nr in range(0,nran): + rffh = fitsio.read_header(indir+tp+zdw+'_'+str(nr)+'_full.ran.fits',ext=1) + print(tp+' area is '+str(rffh['NAXIS2']/2500)+' deg2, using random '+str(nr)) + + tot_tp = 0 + for reg in regl: + dtf = fitsio.read_header(indir+tp+zdw+reg+'_clustering.dat.fits',ext=1) + ncat = dtf['NAXIS2'] + print('number for '+tp+' in '+reg +' is '+str(ncat)) + tot_tp += ncat + print('number for '+tp+' is '+str(tot_tp)) + tot += tot_tp +print('total number for '+args.survey +' is '+str(tot)) \ No newline at end of file diff --git a/scripts/mock_tools/xiruncz.py b/scripts/mock_tools/xiruncz.py new file mode 100644 index 000000000..e3467d588 --- /dev/null +++ b/scripts/mock_tools/xiruncz.py @@ -0,0 +1,193 @@ +#make sure to type these two commands: +#export OMP_NUM_THREADS=64 +#module load gsl +#python xiruncz.py --type ELG_HIP +import subprocess +import sys +import argparse +import os +#sys.path.append('../py') +#import LSS.mkCat_singletile.xitools as xt +#import LSS.SV3.xitools as xt + + +parser = argparse.ArgumentParser() +parser.add_argument("--type", help="tracer type to be selected") +parser.add_argument("--basedir", help="base directory for output, default is desi catalog directory",default='/global/cfs/cdirs/desi/survey/catalogs') +parser.add_argument("--version", help="catalog version; use 'test' unless you know what you are doing!",default='test') +parser.add_argument("--verspec",help="version for redshifts",default='everest') +parser.add_argument("--survey",help="e.g., SV3 or main",default='SV3') +parser.add_argument("--nran",help="number of random files to combine together (1-18 available)",default=10) + +args = parser.parse_args() + +type = args.type +basedir = args.basedir +version = args.version +specrel = args.verspec +survey = args.survey +nran = int(args.nran) + +if survey == 'SV3': + import LSS.SV3.xitools as xt +if survey == 'main': + import LSS.main.xitools as xt + +lssdir = basedir+'/'+survey+'/LSS/'+specrel+'/LSScats/' +#dirout = svdir+'LSScats/'+version+'/' + +zmask = [''] +minn = 0 + +subt = None +if type == 'LRGAlltiles' or type == 'LRGAlltiles_main': + zl = [0.32,0.6,0.8,1.05,1.3] + #minn = 2 + #zmin=0.32 + #zmax=1.05 + +if type == 'LRG': + zl = [0.4,0.6,0.8,1.1] +# minn = 5 + #zmin=0.32 + #zmax=1.05 + + +if type == 'LRG_OPT': + subt = type + zmin=0.6 + zmax=1. + type = 'LRG' + +if type == 'LRG_IR': + subt = type + zmin=0.6 + zmax=1. + type = 'LRG' + + +if type[:3] == 'ELG':# or type == 'ELG_HIP': + #minn = 5 + zl = [0.8,1.1,1.5] + #zmask = ['','_zmask'] + + #zmin = 0.8 + #zmax = 1.6 + +#if type == 'ELG_HIP': +# zmin = 0.8 +# zmax = 1.6 +if type == 'ELG_HIP16': + minn = 5 + zl = [1,1.6] + type = 'ELG_HIP' + +if type == 'ELG16': + minn = 5 + zl = [1,1.6] + type = 'ELG' + + +if type == 'ELGlz': + zmin = 0.6 + zmax = 0.8 + type = 'ELG' + +if type == 'ELGmz': + zmin = 0.8 + zmax = 1.1 + type = 'ELG' + +if type == 'ELGhz': + zmin = 1.1 + zmax = 1.6 + type = 'ELG' + +if type == 'ELGmhz': + zmin = 0.6 + zmax = 1.497 + type = 'ELG' + +if type == 'ELGhz497': + zmin = 1.1 + zmax = 1.497 + type = 'ELG' + +if type == 'QSO': + zl = [0.8,1.1,1.5,2.1] + #zmin = 1. + #zmax = 2.1 + +if type == 'QSOhiz': + zmin = 1.6 + zmax = 2.1 + type = 'QSO' + +if type == 'QSOlya': + #zmin = 2.1 + #zmax = 3.5 + zl = [2.1,3.5] + type = 'QSO' + + +if type == 'QSO_RF_4PASS': + subt = type + zmin = 1.6 + zmax = 2.1 + type = 'QSO' + +if type == 'ELG_FDR_GFIB': + subt = type + zmin = 1.1 + zmax = 1.6 + type = 'ELG' + +if type[:3] == 'BGS': + #minn = 2 + zl = [0.1,0.3,0.5] + #zmin = 0.1 + #zmax = 0.5 + +if type == 'BGS_hiz': + zmin = 0.3 + zmax = 0.5 + type = 'BGS_ANY' + +ranwt1=False + +regl = ['_N','_S'] + +if survey == 'main': + regl = ['_DN','_DS','_N','_S'] + +for i in range(0,len(zl)): + if i == len(zl)-1: + zmin=zl[0] + zmax=zl[-1] + else: + zmin = zl[i] + zmax = zl[i+1] + print(zmin,zmax) + for zma in zmask: + for reg in regl: + xt.prep4czxi(type,zmin,zmax,nran=nran,indir=lssdir,ver=version,minn=minn,reg=zma+reg,outdir=os.environ['CSCRATCH']+'/cz/',ranwt1=ranwt1,subt=subt) + subprocess.run(['chmod','+x','czpc.sh']) + subprocess.run('./czpc.sh') + fa = '' + if ranwt1: + fa = 'ranwt1' + if subt is not None: + fa += subt + xt.calcxi_dataCZ(type,zmin,zmax,minn=minn,reg=zma+reg,ver=version,fa=fa) + + + xt.prep4czxi(type,zmin,zmax,nran=nran,indir=lssdir,ver=version,minn=minn,reg=zma,outdir=os.environ['CSCRATCH']+'/cz/',ranwt1=ranwt1,subt=subt) + subprocess.run(['chmod','+x','czpc.sh']) + subprocess.run('./czpc.sh') + fa = '' + if ranwt1: + fa = 'ranwt1' + if subt is not None: + fa += subt + xt.calcxi_dataCZ(type,zmin,zmax,minn=minn,ver=version,fa=fa,reg=zma) + diff --git a/scripts/mock_tools/xirunpc.py b/scripts/mock_tools/xirunpc.py new file mode 100644 index 000000000..548c0e60a --- /dev/null +++ b/scripts/mock_tools/xirunpc.py @@ -0,0 +1,672 @@ +#!/usr/bin/env python +# coding: utf-8 + +import os +import argparse +import logging + +import numpy as np +from astropy.table import Table, vstack +from matplotlib import pyplot as plt + +from pycorr import TwoPointCorrelationFunction, TwoPointEstimator, KMeansSubsampler, utils, setup_logging +from LSS.tabulated_cosmo import TabulatedDESI + + +logger = logging.getLogger('xirunpc') + + +def get_scratch_dir(): + if os.environ['NERSC_HOST'] == 'cori': + scratch_dir = os.environ['CSCRATCH'] + os.system('export OMP_NUM_THREADS=64') + elif os.environ['NERSC_HOST'] == 'perlmutter': + scratch_dir = os.environ['PSCRATCH'] + os.system('export OMP_NUM_THREADS=128') + else: + msg = 'NERSC_HOST is not cori or permutter but is {};\n'.format(os.environ['NERSC_HOST']) + msg += 'NERSC_HOST not known (code only works on NERSC), not proceeding' + raise ValueError(msg) + return scratch_dir + + +def get_zlims(tracer, tracer2=None, option=None): + + if tracer2 is not None: + zlims1 = get_zlims(tracer, option=option) + zlims2 = get_zlims(tracer2, option=option) + return [zlim for zlim in zlims1 if zlim in zlims2] + + if tracer.startswith('LRG'): + zlims = [0.4, 0.6, 0.8, 1.1] + + if tracer.startswith('ELG'):# or type == 'ELG_HIP': + zlims = [0.8, 1.1, 1.6] + if option: + if option == 'safez': + zlims = [0.9, 1.48] + if 'extended' in option: + logger.warning('extended is no longer a meaningful option') + #zlims = [0.8, 1.1, 1.6] + if 'smallshells' in option: + zlims = [0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6] + + if tracer.startswith('QSO'): + zlims = [0.8, 1.1, 1.6, 2.1, 3.5] + if option == 'highz': + zlims = [2.1, 3.5] + if option == 'lowz': + zlims = [0.8, 2.1] + + if tracer.startswith('BGS'): + zlims = [0.1, 0.3, 0.5] + if option == 'lowz': + zlims = [0.1, 0.3] + if option == 'highz': + zlims = [0.3, 0.5] + + if option == 'fullonly': + zlims = [zlims[0], zlims[-1]] + + return zlims + + +def get_regions(survey, rec=False): + regions = ['N', 'S']#, ''] + #if survey in ['main', 'DA02']: + # regions = ['DN', 'DS', 'N', 'S'] + # if rec: regions = ['DN', 'N'] + return regions + + +def select_region(ra, dec, region): + mask_ra = (ra > 100 - dec) + mask_ra &= (ra < 280 + dec) + if region == 'DN': + mask = dec < 32.375 + mask &= mask_ra + elif region == 'DS': + mask = dec > -25 + mask &= ~mask_ra + else: + raise ValueError('Input region must be one of ["DN", "DS"].') + return mask + + +def catalog_dir(survey='main', verspec='guadalupe', version='test', base_dir='/global/cfs/cdirs/desi/survey/catalogs'): + return os.path.join(base_dir, survey, 'LSS', verspec, 'LSScats', version) + + + +def catalog_fn(tracer='ELG', region='', ctype='clustering', name='data', ran_sw='',rec_type=False, nrandoms=4, cat_dir=None, survey='main', **kwargs): + if cat_dir is None: + cat_dir = catalog_dir(survey=survey, **kwargs) + #if survey in ['main', 'DA02']: + # tracer += 'zdone' + if 'edav1' in cat_dir: + cat_dir += ctype + + if ctype == 'full': + region = '' + dat_or_ran = name[:3] + if name == 'randoms' and tracer == 'LRG_main' and ctype == 'full': + tracer = 'LRG' + if region: region = '_' + region + if rec_type: + dat_or_ran = '{}.{}'.format(rec_type, dat_or_ran) + if name == 'data': + return os.path.join(cat_dir, '{}{}_{}.{}.fits'.format(tracer, region, ctype, dat_or_ran)) + return [os.path.join(cat_dir, '{}{}{}_{:d}_{}.{}.fits'.format(tracer, ran_sw, region, iran, ctype, dat_or_ran)) for iran in range(nrandoms)] + + +def get_clustering_positions_weights(catalog, distance, zlim=(0., np.inf),maglim=None, weight_type='default', name='data', return_mask=False, option=None): + + if maglim is None: + mask = (catalog['Z'] >= zlim[0]) & (catalog['Z'] < zlim[1]) + if maglim is not None: + mask = (catalog['Z'] >= zlim[0]) & (catalog['Z'] < zlim[1]) & (catalog['ABSMAG_R'] >= maglim[0]) & (catalog['ABSMAG_R'] < maglim[1]) + + if option: + if 'elgzmask' in option: + zmask = ((catalog['Z'] >= 1.49) & (catalog['Z'] < 1.52)) + mask &= ~zmask + logger.info('Using {:d} rows for {}.'.format(mask.sum(), name)) + positions = [catalog['RA'][mask], catalog['DEC'][mask], distance(catalog['Z'][mask])] + weights = np.ones_like(positions[0]) + + if 'completeness_only' in weight_type and 'bitwise' in weight_type: + raise ValueError('inconsistent choices were put into weight_type') + + if name == 'data': + if 'zfail' in weight_type: + weights *= catalog['WEIGHT_ZFAIL'][mask] + if 'default' in weight_type and 'bitwise' not in weight_type: + weights *= catalog['WEIGHT'][mask] + if 'RF' in weight_type: + weights *= catalog['WEIGHT_RF'][mask]*catalog['WEIGHT_COMP'][mask] + if 'completeness_only' in weight_type: + weights = catalog['WEIGHT_COMP'][mask] + if 'EB' in weight_type: + weights *= catalog['WEIGHT_SYSEB'][mask]*catalog['WEIGHT_COMP'][mask] + if 'FKP' in weight_type: + weights *= catalog['WEIGHT_FKP'][mask] + if 'bitwise' in weight_type: + if catalog['BITWEIGHTS'].ndim == 2: weights = list(catalog['BITWEIGHTS'][mask].T) + [weights] + else: weights = [catalog['BITWEIGHTS'][mask]] + [weights] + + if name == 'randoms': + if 'default' in weight_type: + weights *= catalog['WEIGHT'][mask] + if 'RF' in weight_type: + weights *= catalog['WEIGHT_RF'][mask]*catalog['WEIGHT_COMP'][mask] + if 'zfail' in weight_type: + weights *= catalog['WEIGHT_ZFAIL'][mask] + if 'completeness_only' in weight_type: + weights = catalog['WEIGHT_COMP'][mask] + if 'EB' in weight_type: + weights *= catalog['WEIGHT_SYSEB'][mask]*catalog['WEIGHT_COMP'][mask] + if 'FKP' in weight_type: + weights *= catalog['WEIGHT_FKP'][mask] + + if return_mask: + return positions, weights, mask + return positions, weights + + +def _concatenate(arrays): + if isinstance(arrays[0], (tuple, list)): # e.g., list of bitwise weights for first catalog + array = [np.concatenate([arr[iarr] for arr in arrays], axis=0) for iarr in range(len(arrays[0]))] + else: + array = np.concatenate(arrays, axis=0) # e.g. individual weights for first catalog + return array + + +def read_clustering_positions_weights(distance, zlim =(0., np.inf), maglim =None,weight_type='default', name='data', concatenate=False, option=None, region=None, **kwargs): + + if 'GC' in region: + region = [region] + + def read_positions_weights(name): + positions, weights = [], [] + for reg in region: + cat_fns = catalog_fn(ctype='clustering', name=name, region=reg, **kwargs) + logger.info('Loading {}.'.format(cat_fns)) + isscalar = not isinstance(cat_fns, (tuple, list)) + if isscalar: + cat_fns = [cat_fns] + positions_weights = [get_clustering_positions_weights(Table.read(cat_fn), distance, zlim=zlim, maglim=maglim, weight_type=weight_type, name=name, option=option) for cat_fn in cat_fns] + if isscalar: + positions.append(positions_weights[0][0]) + weights.append(positions_weights[0][1]) + else: + p, w = [tmp[0] for tmp in positions_weights], [tmp[1] for tmp in positions_weights] + if concatenate: + p, w = _concatenate(p), _concatenate(w) + positions.append(p) + weights.append(w) + return positions, weights + + if isinstance(name, (tuple, list)): + return [read_positions_weights(n) for n in name] + return read_positions_weights(name) + + +def get_full_positions_weights(catalog, name='data', weight_type='default', fibered=False, region='', return_mask=False): + + mask = np.ones(len(catalog), dtype='?') + if region in ['DS', 'DN']: + mask &= select_region(catalog['RA'], catalog['DEC'], region) + elif region: + mask &= catalog['PHOTSYS'] == region.strip('_') + + if fibered: mask &= catalog['LOCATION_ASSIGNED'] + positions = [catalog['RA'][mask], catalog['DEC'][mask], catalog['DEC'][mask]] + if name == 'data' and fibered and 'bitwise' in weight_type: + if catalog['BITWEIGHTS'].ndim == 2: weights = list(catalog['BITWEIGHTS'][mask].T) + else: weights = [catalog['BITWEIGHTS'][mask]] + else: weights = np.ones_like(positions[0]) + if return_mask: + return positions, weights, mask + return positions, weights + + +def read_full_positions_weights(name='data', weight_type='default', fibered=False, region='', **kwargs): + + def read_positions_weights(name): + positions, weights = [], [] + for reg in region: + cat_fn = catalog_fn(ctype='full', name=name, **kwargs) + logger.info('Loading {}.'.format(cat_fn)) + if isinstance(cat_fn, (tuple, list)): + catalog = vstack([Table.read(fn) for fn in cat_fn]) + else: + catalog = Table.read(cat_fn) + p, w = get_full_positions_weights(catalog, name=name, weight_type=weight_type, fibered=fibered, region=reg) + positions.append(p) + weights.append(w) + return positions, weights + + if isinstance(name, (tuple, list)): + return [read_positions_weights(n) for n in name] + return read_positions_weights(name) + + +def normalize_data_randoms_weights(data_weights, randoms_weights, weight_attrs=None): + # Renormalize randoms / data for each input catalogs + # data_weights should be a list (for each N/S catalogs) of weights + import inspect + from pycorr.twopoint_counter import _format_weights, get_inverse_probability_weight + if weight_attrs is None: weight_attrs = {} + weight_attrs = {k: v for k, v in weight_attrs.items() if k in inspect.getargspec(get_inverse_probability_weight).args} + wsums, weights = {}, {} + for name, catalog_weights in zip(['data', 'randoms'], [data_weights, randoms_weights]): + wsums[name], weights[name] = [], [] + for w in catalog_weights: + w, nbits = _format_weights(w, copy=True) # this will sort bitwise weights first, then single individual weight + iip = get_inverse_probability_weight(w[:nbits], **weight_attrs) if nbits else 1. + iip = iip * w[nbits] + wsums[name].append(iip.sum()) + weights[name].append(w) + wsum_data, wsum_randoms = sum(wsums['data']), sum(wsums['randoms']) + for icat, w in enumerate(weights['randoms']): + factor = wsums['data'][icat] / wsums['randoms'][icat] * wsum_randoms / wsum_data + w[-1] *= factor + logger.info('Rescaling randoms weights of catalog {:d} by {:.4f}.'.format(icat, factor)) + return weights['data'], weights['randoms'] + + +def concatenate_data_randoms(data, randoms=None, **kwargs): + + if randoms is None: + positions, weights = data + return _concatenate(positions), _concatenate(weights) + + positions, weights = {}, {} + for name in ['data', 'randoms']: + positions[name], weights[name] = locals()[name] + for name in positions: + concatenated = not isinstance(positions[name][0][0], (tuple, list)) # first catalog, unconcatenated [RA, DEC, distance] (False) or concatenated RA (True)? + if concatenated: + positions[name] = _concatenate(positions[name]) + else: + positions[name] = [_concatenate([p[i] for p in positions[name]]) for i in range(len(positions['randoms'][0]))] + data_weights, randoms_weights = [], [] + if concatenated: + wd, wr = normalize_data_randoms_weights(weights['data'], weights['randoms'], weight_attrs=kwargs.get('weight_attrs', None)) + weights['data'], weights['randoms'] = _concatenate(wd), _concatenate(wr) + else: + for i in range(len(weights['randoms'][0])): + wd, wr = normalize_data_randoms_weights(weights['data'], [w[i] for w in weights['randoms']], weight_attrs=kwargs.get('weight_attrs', None)) + data_weights.append(_concatenate(wd)) + randoms_weights.append(_concatenate(wr)) + weights['data'] = data_weights[0] + for wd in data_weights[1:]: + for w0, w in zip(weights['data'], wd): assert np.all(w == w0) + weights['randoms'] = randoms_weights + return [(positions[name], weights[name]) for name in ['data', 'randoms']] + + +def compute_angular_weights(nthreads=8, dtype='f8', tracer='ELG', tracer2=None, mpicomm=None, mpiroot=None, **kwargs): + + autocorr = tracer2 is None + catalog_kwargs = kwargs + + fibered_data_positions1, fibered_data_weights1, fibered_data_positions2, fibered_data_weights2 = None, None, None, None + parent_data_positions1, parent_data_weights1, parent_data_positions2, parent_data_weights2 = None, None, None, None + parent_randoms_positions1, parent_randoms_weights1, parent_randoms_positions2, parent_randoms_weights2 = None, None, None, None + + if mpicomm is None or mpicomm.rank == mpiroot: + + fibered_data = read_full_positions_weights(name='data', fibered=True, tracer=tracer, **catalog_kwargs) + parent_data, parent_randoms = read_full_positions_weights(name=['data', 'randoms'], fibered=False, tracer=tracer, **catalog_kwargs) + fibered_data_positions1, fibered_data_weights1 = concatenate_data_randoms(fibered_data) + (parent_data_positions1, parent_data_weights1), (parent_randoms_positions1, parent_randoms_weights1) = concatenate_data_randoms(parent_data, parent_randoms, **catalog_kwargs) + if not autocorr: + fibered_data = read_full_positions_weights(name='data', fibered=True, tracer=tracer2, **catalog_kwargs) + parent_data, parent_randoms = read_full_positions_weights(name=['data', 'randoms'], fibered=False, tracer=tracer2, **catalog_kwargs) + fibered_data_positions2, fibered_data_weights2 = concatenate_data_randoms(fibered_data) + (parent_data_positions2, parent_data_weights2), (parent_randoms_positions2, parent_randoms_weights2) = concatenate_data_randoms(parent_data, parent_randoms, **catalog_kwargs) + + tedges = np.logspace(-4., 0.5, 41) + # First D1D2_parent/D1D2_PIP angular weight + wangD1D2 = TwoPointCorrelationFunction('theta', tedges, data_positions1=fibered_data_positions1, data_weights1=fibered_data_weights1, + data_positions2=fibered_data_positions2, data_weights2=fibered_data_weights2, + randoms_positions1=parent_data_positions1, randoms_weights1=parent_data_weights1, + randoms_positions2=parent_data_positions2, randoms_weights2=parent_data_weights2, + estimator='weight', engine='corrfunc', position_type='rdd', nthreads=nthreads, + dtype=dtype, mpicomm=mpicomm, mpiroot=mpiroot) + + # First D1R2_parent/D1R2_IIP angular weight + # Input bitwise weights are automatically turned into IIP + if autocorr: + parent_randoms_positions2, parent_randoms_weights2 = parent_randoms_positions1, parent_randoms_weights1 + wangD1R2 = TwoPointCorrelationFunction('theta', tedges, data_positions1=fibered_data_positions1, data_weights1=fibered_data_weights1, + data_positions2=parent_randoms_positions2, data_weights2=parent_randoms_weights2, + randoms_positions1=parent_data_positions1, randoms_weights1=parent_data_weights1, + randoms_positions2=parent_randoms_positions2, randoms_weights2=parent_randoms_weights2, + estimator='weight', engine='corrfunc', position_type='rdd', nthreads=nthreads, + dtype=dtype, mpicomm=mpicomm, mpiroot=mpiroot) + wangR1D2 = None + if not autocorr: + wangR1D2 = TwoPointCorrelationFunction('theta', tedges, data_positions1=parent_randoms_positions1, data_weights1=parent_randoms_weights1, + data_positions2=fibered_data_positions2, data_weights2=fibered_data_weights2, + randoms_positions1=parent_randoms_positions1, randoms_weights1=parent_randoms_weights1, + randoms_positions2=parent_data_positions2, randoms_weights2=parent_data_weights2, + estimator='weight', engine='corrfunc', position_type='rdd', nthreads=nthreads, + dtype=dtype, mpicomm=mpicomm, mpiroot=mpiroot) + + wang = {} + wang['D1D2_twopoint_weights'] = wangD1D2 + wang['D1R2_twopoint_weights'] = wangD1R2 + wang['R1D2_twopoint_weights'] = wangR1D2 + + return wang + + +def compute_correlation_function(corr_type, edges, distance, nthreads=8, dtype='f8', wang=None, split_randoms_above=30., weight_type='default', tracer='ELG', tracer2=None, rec_type=None, njack=120, option=None, mpicomm=None, mpiroot=None, **kwargs): + + autocorr = tracer2 is None + catalog_kwargs = kwargs.copy() + catalog_kwargs['weight_type'] = weight_type + with_shifted = rec_type is not None + + if 'angular' in weight_type and wang is None: + + wang = compute_angular_weights(nthreads=nthreads, dtype=dtype, weight_type=weight_type, tracer=tracer, tracer2=tracer2, mpicomm=mpicomm, mpiroot=mpiroot, **kwargs) + + data_positions1, data_weights1, data_samples1, data_positions2, data_weights2, data_samples2 = None, None, None, None, None, None + randoms_positions1, randoms_weights1, randoms_samples1, randoms_positions2, randoms_weights2, randoms_samples2 = None, None, None, None, None, None + shifted_positions1, shifted_weights1, shifted_samples1, shifted_positions2, shifted_weights2, shifted_samples2 = None, None, None, None, None, None + jack_positions = None + + if mpicomm is None or mpicomm.rank == mpiroot: + + data, randoms = read_clustering_positions_weights(distance, name=['data', 'randoms'], rec_type=rec_type, tracer=tracer, option=option, **catalog_kwargs) + if with_shifted: + shifted = randoms # above returned shifted randoms + randoms = read_clustering_positions_weights(distance, name='randoms', rec_type=False, tracer=tracer, option=option, **catalog_kwargs) + (data_positions1, data_weights1), (randoms_positions1, randoms_weights1) = concatenate_data_randoms(data, randoms, **catalog_kwargs) + if with_shifted: + shifted_positions1, shifted_weights1 = concatenate_data_randoms(data, shifted, **catalog_kwargs)[1] + jack_positions = data_positions1 + + if not autocorr: + data, randoms = read_clustering_positions_weights(distance, name=['data', 'randoms'], rec_type=rec_type, tracer=tracer2, option=option, **catalog_kwargs) + if with_shifted: + shifted = randoms + randoms = read_clustering_positions_weights(distance, name='randoms', rec_type=False, tracer=tracer2, option=option, **catalog_kwargs) + (data_positions2, data_weights2), (randoms_positions2, randoms_weights2) = concatenate_data_randoms(data, randoms, **catalog_kwargs) + if with_shifted: + shifted_positions2, shifted_weights2 = concatenate_data_randoms(data, shifted, **catalog_kwargs)[1] + jack_positions = [np.concatenate([p1, p2], axis=0) for p1, p2 in zip(jack_positions, data_positions2)] + + if njack >= 2: + subsampler = KMeansSubsampler('angular', positions=jack_positions, nsamples=njack, nside=512, random_state=42, position_type='rdd', + dtype=dtype, mpicomm=mpicomm, mpiroot=mpiroot) + + if mpicomm is None or mpicomm.rank == mpiroot: + data_samples1 = subsampler.label(data_positions1) + randoms_samples1 = [subsampler.label(p) for p in randoms_positions1] + if with_shifted: + shifted_samples1 = [subsampler.label(p) for p in shifted_positions1] + if not autocorr: + data_samples2 = subsampler.label(data_positions2) + randoms_samples2 = [subsampler.label(p) for p in randoms_positions2] + if with_shifted: + shifted_samples2 = [subsampler.label(p) for p in shifted_positions2] + + kwargs = {} + kwargs.update(wang or {}) + randoms_kwargs = dict(randoms_positions1=randoms_positions1, randoms_weights1=randoms_weights1, randoms_samples1=randoms_samples1, + randoms_positions2=randoms_positions2, randoms_weights2=randoms_weights2, randoms_samples2=randoms_samples2, + shifted_positions1=shifted_positions1, shifted_weights1=shifted_weights1, shifted_samples1=shifted_samples1, + shifted_positions2=shifted_positions2, shifted_weights2=shifted_weights2, shifted_samples2=shifted_samples2) + + zedges = np.array(list(zip(edges[0][:-1], edges[0][1:]))) + mask = zedges[:,0] >= split_randoms_above + zedges = [zedges[~mask], zedges[mask]] + split_edges, split_randoms = [], [] + for ii, zedge in enumerate(zedges): + if zedge.size: + split_edges.append([np.append(zedge[:,0], zedge[-1,-1])] + list(edges[1:])) + split_randoms.append(ii > 0) + + results = [] + if mpicomm is None: + nran = len(randoms_positions1) + else: + nran = mpicomm.bcast(len(randoms_positions1) if mpicomm.rank == mpiroot else None, root=mpiroot) + for i_split_randoms, edges in zip(split_randoms, split_edges): + result = 0 + D1D2 = None + for iran in range(nran if i_split_randoms else 1): + tmp_randoms_kwargs = {} + if i_split_randoms: + # On scales above split_randoms_above, sum correlation function over multiple randoms + for name, arrays in randoms_kwargs.items(): + if arrays is None: + continue + else: + tmp_randoms_kwargs[name] = arrays[iran] + else: + # On scales below split_randoms_above, concatenate randoms + for name, arrays in randoms_kwargs.items(): + if arrays is None: + continue + elif isinstance(arrays[0], (tuple, list)): # e.g., list of bitwise weights + array = [np.concatenate([arr[iarr] for arr in arrays], axis=0) for iarr in range(len(arrays[0]))] + else: + array = np.concatenate(arrays, axis=0) + tmp_randoms_kwargs[name] = array + tmp = TwoPointCorrelationFunction(corr_type, edges, data_positions1=data_positions1, data_weights1=data_weights1, data_samples1=data_samples1, + data_positions2=data_positions2, data_weights2=data_weights2, data_samples2=data_samples2, + engine='corrfunc', position_type='rdd', nthreads=nthreads, dtype=dtype, **tmp_randoms_kwargs, **kwargs, + D1D2=D1D2, mpicomm=mpicomm, mpiroot=mpiroot) + D1D2 = tmp.D1D2 + result += tmp + results.append(result) + return results[0].concatenate_x(*results), wang + + +def get_edges(corr_type='smu', bin_type='lin'): + + if bin_type == 'log': + sedges = np.geomspace(0.01, 100., 49) + elif bin_type == 'lin': + sedges = np.linspace(0., 200, 201) + else: + raise ValueError('bin_type must be one of ["log", "lin"]') + if corr_type == 'smu': + edges = (sedges, np.linspace(-1., 1., 201)) #s is input edges and mu evenly spaced between -1 and 1 + elif corr_type == 'rppi': + if bin_type == 'lin': + edges = (sedges, np.linspace(-200., 200, 401)) #transverse and radial separations are coded to be the same here + else: + edges = (sedges, np.linspace(0., 40., 41)) + elif corr_type == 'theta': + edges = np.linspace(0., 4., 101) + else: + raise ValueError('corr_type must be one of ["smu", "rppi", "theta"]') + return edges + + +def corr_fn(file_type='npy', region='', tracer='ELG', tracer2=None, zmin=0, zmax=np.inf, rec_type=False, weight_type='default', bin_type='lin', njack=0, nrandoms=8, split_randoms_above=10, out_dir='.', option=None, wang=None): + if tracer2: tracer += '_' + tracer2 + if rec_type: tracer += '_' + rec_type + if region: tracer += '_' + region + if option: + zmax = str(zmax) + option + split = '_split{:.0f}'.format(split_randoms_above) if split_randoms_above < np.inf else '' + wang = '{}_'.format(wang) if wang is not None else '' + root = '{}{}_{}_{}_{}_{}_njack{:d}_nran{:d}{}'.format(wang, tracer, zmin, zmax, weight_type, bin_type, njack, nrandoms, split) + if file_type == 'npy': + return os.path.join(out_dir, 'allcounts_{}.npy'.format(root)) + return os.path.join(out_dir, '{}_{}.txt'.format(file_type, root)) + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser() + parser.add_argument('--tracer', help='tracer(s) to be selected - 2 for cross-correlation', type=str, nargs='+', default=['ELG']) + parser.add_argument('--basedir', help='where to find catalogs', type=str, default='/global/cfs/cdirs/desi/survey/catalogs/') + parser.add_argument('--survey', help='e.g., SV3, DA02, etc.', type=str, default='SV3') + parser.add_argument('--verspec', help='version for redshifts', type=str, default='guadalupe') + parser.add_argument('--version', help='catalog version', type=str, default='test') + parser.add_argument('--region', help='regions; by default, run on N, S; pass NS to run on concatenated N + S', type=str, nargs='*', choices=['N', 'S', 'NS','NGC','SGC'], default=None) + parser.add_argument('--zlim', help='z-limits, or options for z-limits, e.g. "highz", "lowz", "fullonly"', type=str, nargs='*', default=None) + parser.add_argument('--maglim', help='absolute r-band magnitude limits', type=str, nargs='*', default=None) + parser.add_argument('--corr_type', help='correlation type', type=str, nargs='*', choices=['smu', 'rppi', 'theta'], default=['smu', 'rppi']) + parser.add_argument('--weight_type', help='types of weights to use; use "default_angular_bitwise" for PIP with angular upweighting; "default" just uses WEIGHT column', type=str, default='default') + parser.add_argument('--bin_type', help='binning type', type=str, choices=['log', 'lin'], default='lin') + parser.add_argument('--nran', help='number of random files to combine together (1-18 available)', type=int, default=4) + parser.add_argument('--split_ran_above', help='separation scale above which RR are summed over each random file;\ + typically, most efficient for xi < 1, i.e. sep > 10 Mpc/h;\ + see https://arxiv.org/pdf/1905.01133.pdf', type=float, default=20) + parser.add_argument('--njack', help='number of jack-knife subsamples; 0 for no jack-knife error estimates', type=int, default=60) + parser.add_argument('--nthreads', help='number of threads', type=int, default=64) + parser.add_argument('--outdir', help='base directory for output (default: SCRATCH)', type=str, default=None) + #parser.add_argument('--mpi', help='whether to use MPI', action='store_true', default=False) + parser.add_argument('--vis', help='show plot of each xi?', action='store_true', default=False) + parser.add_argument('--rebinning', help='whether to rebin the xi or just keep the original .npy file', default='y') + + #only relevant for reconstruction + parser.add_argument('--rec_type', help='reconstruction algorithm + reconstruction convention', choices=['IFTPrecsym', 'IFTPreciso','IFTrecsym', 'IFTreciso', 'MGrecsym', 'MGreciso'], type=str, default=None) + + setup_logging() + args = parser.parse_args() + + if args.rebinning == 'n': + args.rebinning = False + if args.rebinning == 'y': + args.rebinning = True + + mpicomm, mpiroot = None, None + if True:#args.mpi: + from pycorr import mpi + mpicomm = mpi.COMM_WORLD + mpiroot = 0 + + if os.path.normpath(args.basedir) == os.path.normpath('/global/cfs/cdirs/desi/survey/catalogs/'): + cat_dir = catalog_dir(base_dir=args.basedir, survey=args.survey, verspec=args.verspec, version=args.version) + elif os.path.normpath(args.basedir) == os.path.normpath('/global/project/projectdirs/desi/users/acarnero/mtl_mock000_univ1/'): + cat_dir = args.basedir + args.region = [''] + else: + cat_dir = args.basedir + if mpicomm is None or mpicomm.rank == mpiroot: + logger.info('Catalog directory is {}.'.format(cat_dir)) + + if args.outdir is None: + out_dir = os.path.join(get_scratch_dir(), args.survey) + else: + out_dir = args.outdir + if mpicomm is None or mpicomm.rank == mpiroot: + logger.info('Output directory is {}.'.format(out_dir)) + + tracer, tracer2 = args.tracer[0], None + if len(args.tracer) > 1: + tracer2 = args.tracer[1] + if len(args.tracer) > 2: + raise ValueError('Provide <= 2 tracers!') + if tracer2 == tracer: + tracer2 = None # otherwise counting of self-pairs + catalog_kwargs = dict(tracer=tracer, tracer2=tracer2, survey=args.survey, cat_dir=cat_dir, rec_type=args.rec_type) # survey required for zdone + distance = TabulatedDESI().comoving_radial_distance + + regions = args.region + if regions is None: + regions = get_regions(args.survey, rec=bool(args.rec_type)) + + option = None + if args.zlim is None: + zlims = get_zlims(tracer, tracer2=tracer2) + elif not args.zlim[0].replace('.', '').isdigit(): + option = args.zlim[0] + zlims = get_zlims(tracer, tracer2=tracer2, option=option) + else: + zlims = [float(zlim) for zlim in args.zlim] + + + if args.maglim is not None: + magmin = float(args.maglim[0]) + magmax = float(args.maglim[1]) + maglims = (magmin,magmax) + else: + maglims = None + + zlims = list(zip(zlims[:-1], zlims[1:])) + ([(zlims[0], zlims[-1])] if len(zlims) > 2 else []) # len(zlims) == 2 == single redshift range + rebinning_factors = [1, 4, 5, 10] if 'lin' in args.bin_type else [1, 2, 4] + pi_rebinning_factors = [1, 4, 5, 10] if 'log' in args.bin_type else [1] + if mpicomm is None or mpicomm.rank == mpiroot: + logger.info('Computing correlation functions {} in regions {} in redshift ranges {}.'.format(args.corr_type, regions, zlims)) + + for zmin, zmax in zlims: + base_file_kwargs = dict(tracer=tracer, tracer2=tracer2, zmin=zmin, zmax=zmax, rec_type=args.rec_type, weight_type=args.weight_type, bin_type=args.bin_type, njack=args.njack, nrandoms=args.nran, split_randoms_above=args.split_ran_above, option=option) + for region in regions: + wang = None + for corr_type in args.corr_type: + if mpicomm is None or mpicomm.rank == mpiroot: + logger.info('Computing correlation function {} in region {} in redshift range {}.'.format(corr_type, region, (zmin, zmax))) + edges = get_edges(corr_type=corr_type, bin_type=args.bin_type) + result, wang = compute_correlation_function(corr_type, edges=edges, distance=distance, nrandoms=args.nran, split_randoms_above=args.split_ran_above, nthreads=args.nthreads, region=region, zlim=(zmin, zmax), maglim=maglims, weight_type=args.weight_type, njack=args.njack, wang=wang, mpicomm=mpicomm, mpiroot=mpiroot, option=option, **catalog_kwargs) + # Save pair counts + if mpicomm is None or mpicomm.rank == mpiroot: + result.save(corr_fn(file_type='npy', region=region, out_dir=os.path.join(out_dir, corr_type), **base_file_kwargs)) + if mpicomm is None or mpicomm.rank == mpiroot: + if wang is not None: + for name in wang: + if wang[name] is not None: + wang[name].save(corr_fn(file_type='npy', region=region, out_dir=os.path.join(out_dir, 'wang'), **base_file_kwargs, wang=name)) + + # Save combination and .txt files + for corr_type in args.corr_type: + all_regions = regions.copy() + if mpicomm is None or mpicomm.rank == mpiroot: + if 'N' in regions and 'S' in regions: # let's combine + result = sum([TwoPointCorrelationFunction.load( + corr_fn(file_type='npy', region=region, out_dir=os.path.join(out_dir, corr_type), **base_file_kwargs)).normalize() for region in ['N', 'S']]) + result.save(corr_fn(file_type='npy', region='NScomb', out_dir=os.path.join(out_dir, corr_type), **base_file_kwargs)) + all_regions.append('NScomb') + if args.rebinning: + for region in all_regions: + txt_kwargs = base_file_kwargs.copy() + txt_kwargs.update(region=region, out_dir=os.path.join(out_dir, corr_type)) + result = TwoPointCorrelationFunction.load(corr_fn(file_type='npy', **txt_kwargs)) + for factor in rebinning_factors: + #result = TwoPointEstimator.load(fn) + rebinned = result[:(result.shape[0] // factor) * factor:factor] + txt_kwargs.update(bin_type=args.bin_type+str(factor)) + if corr_type == 'smu': + fn_txt = corr_fn(file_type='xismu', **txt_kwargs) + rebinned.save_txt(fn_txt) + fn_txt = corr_fn(file_type='xipoles', **txt_kwargs) + rebinned.save_txt(fn_txt, ells=(0, 2, 4)) + fn_txt = corr_fn(file_type='xiwedges', **txt_kwargs) + rebinned.save_txt(fn_txt, wedges=(-1., -2./3, -1./3, 0., 1./3, 2./3, 1.)) + elif corr_type == 'rppi': + fn_txt = corr_fn(file_type='wp', **txt_kwargs) + rebinned.save_txt(fn_txt, pimax=40.) + for pifac in pi_rebinning_factors: + rebinned = result[:(result.shape[0]//factor)*factor:factor,:(result.shape[1]//pifac)*pifac:pifac] + txt_kwargs.update(bin_type=args.bin_type+str(factor)+'_'+str(pifac)) + fn_txt = corr_fn(file_type='xirppi', **txt_kwargs) + rebinned.save_txt(fn_txt) + elif corr_type == 'theta': + fn_txt = corr_fn(file_type='theta', **txt_kwargs) + rebinned.save_txt(fn_txt) + + if args.vis: + if corr_type == 'smu': + sep, xis = rebinned(ells=(0, 2, 4), return_sep=True, return_std=False) + elif corr_type == 'rppi': + sep, xis = rebinned(pimax=40, return_sep=True, return_std=False) + else: + sep, xis = rebinned(return_sep=True, return_std=False) + if args.bin_type == 'log': + for xi in xis: plt.loglog(sep, xi) + if args.bin_type == 'lin': + for xi in xis: plt.plot(sep, sep**2 * xi) + tracers = tracer + if tracer2 is not None: tracers += ' x ' + tracer2 + plt.title('{} {:.2f} < z {:.2f} in {}'.format(tracers, zmin, zmax, region)) + plt.show() diff --git a/scripts/xirunpc.py b/scripts/xirunpc.py index dd0e173f5..39aca790d 100644 --- a/scripts/xirunpc.py +++ b/scripts/xirunpc.py @@ -455,6 +455,8 @@ def compute_correlation_function(corr_type, edges, distance, nthreads=8, dtype=' jack_positions = [np.concatenate([p1, p2], axis=0) for p1, p2 in zip(jack_positions, data_positions2)] if njack >= 2: + print('jack_positions') + print(jack_positions) subsampler = KMeansSubsampler('angular', positions=jack_positions, nsamples=njack, nside=512, random_state=42, position_type='rdd', dtype=dtype, mpicomm=mpicomm, mpiroot=mpiroot) From 851dde04b2c894aa51f5ac572913aaa5e61fcb8f Mon Sep 17 00:00:00 2001 From: jalasker Date: Fri, 20 Oct 2023 13:47:39 -0700 Subject: [PATCH 005/297] Y1 reproduction test working pre-reprocessing. Fixed by changing to ordering fiberassign based on MTLTIME rather than the RUNDATE. --- py/LSS/SV3/altmtltools.py | 473 ++++++++++++++++++++------------------ py/LSS/SV3/fatools.py | 38 +-- 2 files changed, 270 insertions(+), 241 deletions(-) diff --git a/py/LSS/SV3/altmtltools.py b/py/LSS/SV3/altmtltools.py index ed328ab92..ea2e3edb7 100644 --- a/py/LSS/SV3/altmtltools.py +++ b/py/LSS/SV3/altmtltools.py @@ -1,5 +1,6 @@ from desiutil.iers import freeze_iers freeze_iers() +import collections.abc from time import time import healpy as hp import pickle @@ -403,161 +404,141 @@ def checkMTLChanged(MTLFile1, MTLFile2): def makeTileTrackerFN(dirName, survey, obscon): return dirName + '/{0}survey-{1}obscon-TileTracker.ecsv'.format(survey, obscon.upper()) +def makeTileTracker(altmtldir, survey = 'main', obscon = 'DARK', startDate = None, + endDate = None, overwrite = True): + """Create action file which orders all actions to do with AMTL in order + in which real survey did them. -def makeTileTracker(altMTLDir, survey = 'main', obscon = 'dark', retroactive = False, - overwrite = False, startDate = None, endDate = None): - - # JL altMTLDir includes the UnivNNN - log.info('generating tile tracker file') - outputFN = makeTileTrackerFN(altMTLDir,survey, obscon) - if os.path.isfile(outputFN) and (not overwrite): - log.warning('Output File {0} already exists'.format(outputFN)) - log.warning('returning to AMTL initialization') - return 0 - if (startDate is None) or (startDate == ''): - startDate = 19990101 - if (endDate is None) or (endDate == ''): - endDate = 21991231 - startDate = int(startDate) - endDate = int(endDate) - surveyOpsTrunkDir = '/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/' - origMTLDoneTiles = Table.read(surveyOpsTrunkDir + '/mtl/mtl-done-tiles.ecsv') - #amtlTileFN = origMTLDir + '/mtl-done-tiles.ecsv' - origMTLDoneOverrides = Table.read(surveyOpsTrunkDir + '/mtl/mtl-done-overrides.ecsv') - #amtlOverrideFN = origMTLDir + '/mtl-done-overrides.ecsv' - origMTLTilesSpecStatus = Table.read(surveyOpsTrunkDir + '/ops/tiles-specstatus.ecsv') - ''' - if os.path.isfile(amtlTileFN): - altMTLDoneTiles = Table.read(amtlTileFN) - else: - altMTLDoneTiles = Table() - if os.path.isfile(amtlOverrideFN): - altMTLDoneOverrides = Table.read(amtlOverrideFN) - else: - altMTLDoneOverrides = Table() - ''' - TrimmedTiles = origMTLTilesSpecStatus[np.char.lower(origMTLTilesSpecStatus['SURVEY']) == survey.lower()] - TrimmedTiles = TrimmedTiles[np.char.lower(TrimmedTiles['FAPRGRM']) == obscon.lower()] - - TrimmedTiles.sort(keys = ['ARCHIVEDATE', 'LASTNIGHT']) - #TrimmedTiles.sort(keys = ['TIMESTAMP', 'LASTNIGHT']) - TrimmedTileIDs = TrimmedTiles['TILEID'] - #origMTLDoneTiles.sort(keys = ['ARCHIVEDATE', 'ZDATE']) - origMTLDoneTiles.sort(keys = ['TIMESTAMP', 'ZDATE']) - TILEID, ARCHIVEDATE, ZDATE, FADATE,ALTFADATE, FAMTLDATE, ALTARCHIVEDATE, ORIGMTLDATE, ORIGMTLTIMESTAMP, REPROCFLAG, OVERRIDEFLAG, OBSCONS, SURVEYS = [],[],[],[],[],[],[],[],[],[],[],[],[] - for omtlDoneTile in origMTLDoneTiles: - #TILEID TIMESTAMP VERSION PROGRAM ZDATE ARCHIVEDATE - thisTileID = omtlDoneTile['TILEID'] - if not (thisTileID in TrimmedTileIDs): - continue - thists = str(thisTileID).zfill(6) - FAOrigName = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+thists[:3]+'/fiberassign-'+thists+'.fits.gz' - fhtOrig = fitsio.read_header(FAOrigName) - thisVersion = omtlDoneTile['VERSION'] - thisProgram = omtlDoneTile['PROGRAM'] - thisZDate = omtlDoneTile['ZDATE'] - thisArchiveDate = omtlDoneTile['ARCHIVEDATE'] - thisOrigMTLTimestamp = omtlDoneTile['TIMESTAMP'] - thisOrigMTLDate = thisOrigMTLTimestamp.split('T')[0].replace('-', '') - if thisArchiveDate < startDate: - thisAltArchiveDate = thisArchiveDate - elif thisArchiveDate > endDate: - continue - else: - thisAltArchiveDate = None + Parameters + ---------- + altmtldir : :class:`str` + Path to the directory for a single realization of alternate MTL + ledgers. e.g. /pscratch/u/user/simName/Univ000/ + obscon : :class:`str`, optional, defaults to "dark" + A string matching ONE obscondition in the desitarget bitmask yaml + file (i.e. in `desitarget.targetmask.obsconditions`), e.g. "DARK" + Governs how priorities are set when merging targets. + survey : :class:`str`, optional, defaults to "main" + Used to look up the correct ledger, in combination with `obscon`. + Options are ``'main'`` and ``'svX``' (where X is 1, 2, 3 etc.) + for the main survey and different iterations of SV, respectively. + + Returns + ------- + + [Nothing] + Notes + ----- + - Writes a tiletracker file to {altmtldir}/{survey.lower()}survey-{obscon.upper()}obscon-TileTracker.ecsv + """ - thisReprocFlag = thisTileID in TILEID - if thisReprocFlag: - thisFAMTLTime = None - thisFADate = None - thisAltFADate = None - else: - thisFAMTLTime = fhtOrig['RUNDATE'] + TileTrackerFN = makeTileTrackerFN(altmtldir, survey, obscon) - thisFADate = thisFAMTLTime.split('T')[0].replace('-', '') - if thisArchiveDate < startDate: - thisAltFADate = thisFADate - else: - thisAltArchiveDate = None - thisAltFADate = None - - #thisOverrideFlag = thisTileID in OverrideTileID - TILEID.append(thisTileID) - ARCHIVEDATE.append(thisArchiveDate) - ZDATE.append(thisZDate) - FADATE.append(thisFADate) - FAMTLDATE.append(thisFAMTLTime) - ALTFADATE.append(thisAltFADate) - ALTARCHIVEDATE.append(thisAltArchiveDate) - ORIGMTLDATE.append(thisOrigMTLDate) - ORIGMTLTIMESTAMP.append(thisOrigMTLTimestamp) - REPROCFLAG.append(thisReprocFlag) - OVERRIDEFLAG.append(None) - OBSCONS.append(obscon) - SURVEYS.append(survey) + if (survey.lower() == 'main') or (survey.lower() == 'y1'): - - - TilesToProcessNearlyInOrder = [TILEID, ARCHIVEDATE, ZDATE, ALTFADATE, FADATE, FAMTLDATE, ALTARCHIVEDATE, ORIGMTLDATE, ORIGMTLTIMESTAMP, REPROCFLAG, OVERRIDEFLAG, OBSCONS, SURVEYS] - - if survey.lower() == 'sv3': - firstSurveyDate = 20210404 - elif survey.lower() == 'main': - firstSurveyDate = 20210513 + surveyForTSS = 'main' + if survey.lower() == 'y1': + TileFN = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/tiles-{0}.fits'.format(obscon.upper()) + else: + TileFN = '/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/ops/tiles-main.ecsv' + elif survey.lower() == 'sv3': + surveyForTSS = 'sv3' + TileFN = '/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/tiles-{0}.fits'.format(obscon.upper()) else: - log.warning('SURVEY SHOULD BE EITHER `sv3` OR `main`, BUT WAS GIVEN AS {0}'.format(survey.lower())) - firstSurveyDate = 20200101 - - t = Table(TilesToProcessNearlyInOrder, - names=('TILEID', 'ARCHIVEDATE', 'ZDATE', 'ALTFADATE', 'FADATE', 'FAMTLTIME', 'ALTARCHIVEDATE', 'ORIGMTLDATE', 'ORIGMTLTIMESTAMP', 'REPROCFLAG', 'OVERRIDEFLAG', 'OBSCON', 'SURVEY'), - meta={'Name': 'AltMTLTileTracker', 'StartDate': startDate, 'EndDate': endDate, 'Today': max(int(startDate), firstSurveyDate )}) - t.sort(['ORIGMTLTIMESTAMP','ZDATE']) - #t.sort(['ORIGMTLTIMESTAMP','FAMTLTIME']) - t.write(outputFN, format='ascii.ecsv') - return 1 + raise ValueError('only valid values for `survey` are `main` and `sv3.` {0} was provided'.format(survey)) + FABaseDir = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/' + Tiles = Table.read(TileFN) -def tiles_to_be_processed_alt(altmtldir, obscon = 'dark', survey = 'main', today = None, mode = 'fa'): + TSSFN = '/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/ops/tiles-specstatus.ecsv' + TSS = Table.read(TSSFN) + MTLDTFN = '/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/mtl-done-tiles.ecsv' - TileTrackerFN = makeTileTrackerFN(altmtldir, survey, obscon) - TileTracker = Table.read(TileTrackerFN, format = 'ascii.ecsv') - - if mode.lower() == 'fa': - dateKey = 'FADATE' - log.info('len(TileTracker) pre removal of Nones = {0}'.format(len(TileTracker))) - TileTracker = TileTracker[TileTracker[dateKey] != None] - log.info('len(TileTracker) post removal of Nones = {0}'.format(len(TileTracker))) - - elif mode.lower() == 'update': - dateKey = 'ORIGMTLDATE' - else: - raise ValueError('mode must be either `fa` or `update`. You provided {0}'.format(mode)) - + MTLDT = Table.read(MTLDTFN) - if not (today is None): - log.info('dateKey = {0}'.format(dateKey)) - log.info('today = {0}'.format(today)) - log.info('TileTracker.shape = {0}'.format(len(TileTracker))) - log.info('some example dates = {0}'.format(TileTracker[dateKey][0:4])) - TileTracker = TileTracker[TileTracker[dateKey].astype(int) == int(today)] + #tiles-specstatus file filtered to only matching obscon and surveySURVEY FAPRGRM + TSS_Sel = TSS[(TSS['SURVEY'] == surveyForTSS) & (TSS['FAPRGRM'] == obscon.lower())] + + TilesSel = np.unique(TSS_Sel['TILEID']) + + TileIDs = [] + TypeOfActions = [] + TimesOfActions = [] + doneFlag = [] + archiveDates = [] + + for tileid in TilesSel: + print('tileid = {0}'.format(tileid)) + + ts = str(tileid).zfill(6) + + thisTileMTLDT = MTLDT[MTLDT['TILEID'] == tileid] + + if len(thisTileMTLDT) > 1: + thisTileMTLDT.sort('TIMESTAMP') + elif len(thisTileMTLDT) == 0: + continue + else: + log.info(len(thisTileMTLDT)) + log.info(thisTileMTLDT['ARCHIVEDATE']) + log.info(thisTileMTLDT['ARCHIVEDATE'][0]) + log.info(type(thisTileMTLDT['ARCHIVEDATE'][0])) + if thisTileMTLDT['ARCHIVEDATE'][0] > int(endDate): + continue + reprocFlag = False + thisFAFN = FABaseDir + f'/{ts[0:3]}/fiberassign-{ts}.fits' + + thisfhtOrig = fitsio.read_header(thisFAFN) + thisfadate = thisfhtOrig['MTLTIME'] + thisfadate = desitarget.mtl.add_to_iso_date(thisfadate, 1) + thisfanite = int(''.join(thisfadate.split('T')[0].split('-'))) + if thisfanite > endDate: + continue + + TileIDs.append(tileid) + TypeOfActions.append('fa') + TimesOfActions.append(thisfadate) + archiveDates.append(thisfanite) + if thisfanite < startDate: + doneFlag.append(True) + else: + doneFlag.append(False) + + for update in thisTileMTLDT: + + + thisupdateTimestamp = update['TIMESTAMP'] + thisupdateNite = int(''.join(thisupdateTimestamp.split('T')[0].split('-'))) + if (thisupdateNite > endDate): + continue + + TileIDs.append(tileid) + if reprocFlag: + TypeOfActions.append('reproc') + else: + TypeOfActions.append('update') + TimesOfActions.append(thisupdateTimestamp) + if (thisupdateNite < startDate): + doneFlag.append(True) + else: + doneFlag.append(False) + archiveDates.append(update['ARCHIVEDATE']) + reprocFlag = True + ActionList = [TileIDs, TypeOfActions, TimesOfActions, doneFlag, archiveDates] + t = Table(ActionList, + names=('TILEID', 'ACTIONTYPE', 'ACTIONTIME', 'DONEFLAG', 'ARCHIVEDATE'), + meta={'Name': 'AltMTLTileTracker', 'StartDate': startDate, 'EndDate': endDate, 'amtldir':altmtldir}) + t.sort(['ACTIONTIME', 'ACTIONTYPE', 'TILEID']) + t.write(TileTrackerFN, format='ascii.ecsv', overwrite = overwrite) - indices = np.where( ((TileTracker['OBSCON'] == obscon.upper()) | (TileTracker['OBSCON'] == obscon.lower())) & (TileTracker['SURVEY'] == survey.upper()) | (TileTracker['SURVEY'] == survey.lower()) ) - log.info('indices = {0}'.format(indices)) - returnTiles = TileTracker[indices] - #returnTiles = returnTiles[np.where((returnTiles['SURVEY'] == survey.upper()) | (returnTiles['SURVEY'] == survey.lower()))] - if mode.lower() == 'update': - returnTiles = returnTiles[np.where(returnTiles['ALTARCHIVEDATE'] == None)] - if mode.lower() == 'fa': - returnTiles = returnTiles[np.where(returnTiles['ALTFADATE'] == None)] - return returnTiles def trimToMTL(notMTL, MTL, debug = False, verbose = False): # JL trims a target file, which possesses all of the information in an MTL, down @@ -640,12 +621,16 @@ def initializeAlternateMTLs(initMTL, outputMTL, nAlt = 2, genSubset = None, seed else: log.debug('startdate') log.debug(startDate) - initialentries = allentries[allentries["TIMESTAMP"] < startDate] + initialentries = allentries[allentries["TIMESTAMP"] <= startDate] subpriorsInit = initialentries["SUBPRIORITY"] origmtltilefn = os.path.join(origmtldir, get_mtl_tile_file_name(secondary=False)) altmtltilefn = os.path.join(altmtldir, get_mtl_tile_file_name(secondary=False)) startDateShort = int(startDate.split('T')[0].replace('-', '')) + if ('T' in endDate) & ('-' in endDate): + endDateShort = int(endDate.split('T')[0].replace('-', '')) + else: + endDateShort = int(endDate) if verbose or debug: log.info('generate subset? {0}'.format(genSubset)) @@ -711,8 +696,10 @@ def initializeAlternateMTLs(initMTL, outputMTL, nAlt = 2, genSubset = None, seed thisTileTrackerFN = makeTileTrackerFN(finalDir.format(n), survey, obscon) log.info('path to tiletracker = {0}'.format(thisTileTrackerFN)) if not os.path.isfile(thisTileTrackerFN): - makeTileTracker(outputMTLDir, survey = survey, obscon = obscon, retroactive = False, - overwrite = False, startDate = startDate, endDate = endDate) + makeTileTracker(finalDir.format(n), survey = survey, obscon = obscon,overwrite = False, + startDate = startDateShort, endDate = endDateShort) + #makeTileTracker(outputMTLDir, survey = survey, obscon = obscon,overwrite = False, + #startDate = startDateShort, endDate = endDateShort) subpriors = initialentries['SUBPRIORITY'] if (not reproducing) and shuffleSubpriorities: @@ -907,9 +894,10 @@ def initializeAlternateMTLs(initMTL, outputMTL, nAlt = 2, genSubset = None, seed #JL - reset TARGET_STATES based on new target bits. This step isn't necessary for AMTL function but makes debugging using target states vastly easier. initialentries['TARGET_STATE'][ELGNewHIP & np.invert(QSOs)] = np.broadcast_to(np.array(['ELG_HIP|UNOBS']), np.sum(ELGNewHIP & np.invert(QSOs) ) ) - - io.write_mtl(outputMTLDir, initialentries, survey=survey, obscon=obscon, extra=meta, nsidefile=meta['FILENSID'], hpxlist = [meta['FILEHPX']]) - log.info('wrote MTLs') + retval = io.write_mtl(outputMTLDir, initialentries, survey=survey, obscon=obscon, extra=meta, nsidefile=meta['FILENSID'], hpxlist = [meta['FILEHPX']]) + if debug or verbose: + log.info('(nowrite = False) ntargs, fn = {0}'.format(retval)) + log.info('wrote MTLs to {0}'.format(outputMTLDir)) if saveBackup and (not usetmp): if not os.path.exists(str(outputMTLDir) +'/' + str(survey).lower() + '/' +str(obscon).lower() + '/orig/'): os.makedirs(str(outputMTLDir) +'/' + str(survey).lower() + '/' +str(obscon).lower() + '/orig/') @@ -928,10 +916,12 @@ def initializeAlternateMTLs(initMTL, outputMTL, nAlt = 2, genSubset = None, seed if debug: log.info('tempdir contents before copying') log.info(glob.glob(outputMTLDir + '/*' )) + log.info(glob.glob(outputMTLDir + '/main/dark/*' )) copyfile(str(outputMTLDir) +'/' + str(survey).lower() + '/' + str(obscon).lower() + '/' + str(fn), str(finalDir.format(n)) +'/' + str(survey).lower() + '/' +str(obscon).lower() + '/' + str(fn)) if debug: log.info('tempdir contents after copying') log.info(glob.glob(outputMTLDir + '/*' )) + log.info(glob.glob(outputMTLDir + '/main/dark/*' )) if saveBackup and not os.path.exists(str(outputMTLDir) +'/' + str(survey).lower() + '/' +str(obscon).lower() + '/orig/' + str(fn)): #JL Potentially move the saveBackup copying to an afterburner @@ -995,12 +985,16 @@ def quickRestartFxn(ndirs = 1, altmtlbasedir = None, survey = 'sv3', obscon = 'd for fn in restartMTLs: copyfile(fn, altmtldirRestart +'/' + survey + '/' + obscon + '/' + fn.split('/')[-1]) -def do_fiberassignment(altmtldir, survey = 'sv3', obscon = 'dark', today = None, +def do_fiberassignment(altmtldir, FATiles, survey = 'sv3', obscon = 'dark', verbose = False, debug = False, getosubp = False, redoFA = False, mock = False): - FATiles = tiles_to_be_processed_alt(altmtldir, obscon = obscon, survey = survey, today = today, mode = 'fa') + #FATiles = tiles_to_be_processed_alt(altmtldir, obscon = obscon, survey = survey, today = today, mode = 'fa') if len(FATiles): try: log.info('FATiles[0] = {0}'.format(FATiles[0])) + if isinstance(FATiles[0], (collections.abc.Sequence, np.ndarray)): + pass + else: + FATiles = [FATiles] except: log.info('cannot access element 0 of FATiles') log.info('FATiles = {0}'.format(FATiles)) @@ -1013,11 +1007,11 @@ def do_fiberassignment(altmtldir, survey = 'sv3', obscon = 'dark', today = None, fadates = [] - if len(FATiles): - log.info('len FATiles = {0}'.format(len(FATiles))) - pass - else: - return OrigFAs, AltFAs, AltFAs2, TSs, fadates, FATiles + #if len(FATiles): + # log.info('len FATiles = {0}'.format(len(FATiles))) + # pass + #else: + # return OrigFAs, AltFAs, AltFAs2, TSs, fadates, FATiles for t in FATiles: log.info('t = {0}'.format(t)) #JL This loop takes each of the original fiberassignments for each of the tiles on $date @@ -1042,8 +1036,8 @@ def do_fiberassignment(altmtldir, survey = 'sv3', obscon = 'dark', today = None, log.info('fbadirbase = {0}'.format(fbadirbase)) log.info('ts = {0}'.format(ts)) - log.info('t[reprocflag] (should be false if here)= {0}'.format(t['REPROCFLAG'])) - assert(not bool(t['REPROCFLAG'])) + ##log.info('t[reprocflag] (should be false if here)= {0}'.format(t['REPROCFLAG'])) + ##assert(not bool(t['REPROCFLAG'])) #if str(ts) == str(3414).zfill(6): # raise ValueError('Not only do I create the backup here but I also need to fix the reproc flag') @@ -1090,6 +1084,7 @@ def do_fiberassignment(altmtldir, survey = 'sv3', obscon = 'dark', today = None, if getosubp and verbose: log.info('checking contents of fiberassign directory before calling get_fba_from_newmtl') log.info(glob.glob(fbadir + '/*' )) + #get_fba_fromnewmtl(ts,mtldir=altmtldir + survey.lower() + '/',outdir=fbadirbase, getosubp = getosubp, overwriteFA = redoFA, verbose = verbose, mock = mock, targver = targver)#, targets = targets) get_fba_fromnewmtl(ts,mtldir=altmtldir + survey.lower() + '/',outdir=fbadirbase, getosubp = getosubp, overwriteFA = redoFA, verbose = verbose, mock = mock, targver = targver)#, targets = targets) command_run = (['bash', fbadir + 'fa-' + ts + '.sh']) if verbose: @@ -1107,7 +1102,7 @@ def do_fiberassignment(altmtldir, survey = 'sv3', obscon = 'dark', today = None, return OrigFAs, AltFAs, AltFAs2, TSs, fadates, FATiles -def make_fibermaps(altmtldir, OrigFAs, AltFAs, AltFAs2, TSs, fadates, tiles, survey = 'sv3', obscon = 'dark', changeFiberOpt = None, verbose = False, debug = False, getosubp = False, redoFA = False, today = None): +def make_fibermaps(altmtldir, OrigFAs, AltFAs, AltFAs2, TSs, fadates, tiles, survey = 'sv3', obscon = 'dark', changeFiberOpt = None, verbose = False, debug = False, getosubp = False, redoFA = False): A2RMap = {} R2AMap = {} if verbose: @@ -1145,7 +1140,7 @@ def make_fibermaps(altmtldir, OrigFAs, AltFAs, AltFAs2, TSs, fadates, tiles, sur log.info('dumping out fiber map to pickle file') with open(FAMapName, 'wb') as handle: pickle.dump((A2RMap, R2AMap), handle, protocol=pickle.HIGHEST_PROTOCOL) - thisUTCDate = get_utc_date(survey=survey) + #thisUTCDate = get_utc_date(survey=survey) if verbose: log.info('---') log.info('unique keys in R2AMap = {0:d}'.format(np.unique(R2AMap.keys()).shape[0])) @@ -1154,32 +1149,43 @@ def make_fibermaps(altmtldir, OrigFAs, AltFAs, AltFAs2, TSs, fadates, tiles, sur log.info('---') log.info('unique keys in A2RMap = {0:d}'.format(np.unique(A2RMap.keys()).shape[0])) log.info('---') - retval = write_amtl_tile_tracker(altmtldir, [t], thisUTCDate, today, obscon = obscon, survey = survey, mode = 'fa') + #retval = write_amtl_tile_tracker(altmtldir, [t], obscon = obscon, survey = survey, mode = 'fa') + retval = write_amtl_tile_tracker(altmtldir, [t], obscon = obscon, survey = survey) log.info('write_amtl_tile_tracker retval = {0}'.format(retval)) return A2RMap, R2AMap -def update_alt_ledger(altmtldir,althpdirname, altmtltilefn, survey = 'sv3', obscon = 'dark', today = None, +def update_alt_ledger(altmtldir,althpdirname, altmtltilefn, actions, survey = 'sv3', obscon = 'dark', today = None, getosubp = False, zcatdir = None, mock = False, numobs_from_ledger = True, targets = None, verbose = False, debug = False): if verbose or debug: log.info('today = {0}'.format(today)) log.info('obscon = {0}'.format(obscon)) log.info('survey = {0}'.format(survey)) - UpdateTiles = tiles_to_be_processed_alt(altmtldir, obscon = obscon, survey = survey, today = today, mode = 'update') - log.info('updatetiles = {0}'.format(UpdateTiles)) + #UpdateTiles = tiles_to_be_processed_alt(altmtldir, obscon = obscon, survey = survey, today = today, mode = 'update') + #log.info('updatetiles = {0}'.format(UpdateTiles)) # ADM grab the zcat directory (in case we're relying on $ZCAT_DIR). zcatdir = get_zcat_dir(zcatdir) # ADM And contruct the associated ZTILE filename. ztilefn = os.path.join(zcatdir, get_ztile_file_name()) - if len(UpdateTiles): - pass - else: - return althpdirname, altmtltilefn, ztilefn, None - for t in UpdateTiles: - if t['REPROCFLAG']: - raise ValueError('Make sure backup is made and reprocessing logic is correct before beginning reprocessing.') + #if len(UpdateTiles): + # pass + #else: + # return althpdirname, altmtltilefn, ztilefn, None + #isinstance(FATiles[0], (collections.abc.Sequence, np.ndarray)) + if not isinstance(actions['TILEID'], (collections.abc.Sequence, np.ndarray)): + actions = [actions] + log.info('actions = {0}'.format(actions)) + for t in actions: + log.info('t = {0}'.format(t)) + if t['ACTIONTYPE'].lower() == 'reproc': + raise ValueError('Reprocessing should be handled elsewhere.') + #raise ValueError('Make sure backup is made and reprocessing logic is correct before beginning reprocessing.') ts = str(t['TILEID']).zfill(6) - fbadirbase = altmtldir + '/fa/' + survey.upper() + '/' + t['FADATE'] + '/' + FAOrigName = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz' + fhtOrig = fitsio.read_header(FAOrigName) + fadate = fhtOrig['RUNDATE'] + fadate = ''.join(fadate.split('T')[0].split('-')) + fbadirbase = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/' log.info('t = {0}'.format(t)) log.info('fbadirbase = {0}'.format(fbadirbase)) log.info('ts = {0}'.format(ts)) @@ -1240,12 +1246,12 @@ def update_alt_ledger(altmtldir,althpdirname, altmtltilefn, survey = 'sv3', obs assert(didUpdateHappen) if verbose or debug: log.info('if main, should sleep 1 second') - thisUTCDate = get_utc_date(survey=survey) + #thisUTCDate = get_utc_date(survey=survey) if survey == "main": sleep(1) if verbose or debug: log.info('has slept one second') - t["ALTARCHIVEDATE"] = thisUTCDate + #t["ALTARCHIVEDATE"] = thisUTCDate if verbose or debug: log.info('now writing to amtl_tile_tracker') #io.write_mtl_tile_file(altmtltilefn,dateTiles) @@ -1253,14 +1259,15 @@ def update_alt_ledger(altmtldir,althpdirname, altmtltilefn, survey = 'sv3', obs log.info('changes are being registered') log.info('altmtldir = {0}'.format(altmtldir)) log.info('t = {0}'.format(t)) - log.info('thisUTCDate = {0}'.format(thisUTCDate)) + #log.info('thisUTCDate = {0}'.format(thisUTCDate)) log.info('today = {0}'.format(today)) - retval = write_amtl_tile_tracker(altmtldir, [t], thisUTCDate, today, obscon = obscon, survey = survey, mode = 'update') + #retval = write_amtl_tile_tracker(altmtldir, [t], obscon = obscon, survey = survey, mode = 'update') + retval = write_amtl_tile_tracker(altmtldir, [t], obscon = obscon, survey = survey) log.info('write_amtl_tile_tracker retval = {0}'.format(retval)) if verbose or debug: log.info('has written to amtl_tile_tracker') - return althpdirname, altmtltilefn, ztilefn, UpdateTiles + return althpdirname, altmtltilefn, ztilefn, actions #@profile def loop_alt_ledger(obscon, survey='sv3', zcatdir=None, mtldir=None, altmtlbasedir=None, ndirs = 3, numobs_from_ledger=True, @@ -1354,9 +1361,9 @@ def loop_alt_ledger(obscon, survey='sv3', zcatdir=None, mtldir=None, # ADM first grab all of the relevant files. # ADM grab the MTL directory (in case we're relying on $MTL_DIR). - mtldir = get_mtl_dir(mtldir) + ##mtldir = get_mtl_dir(mtldir) # ADM construct the full path to the mtl tile file. - mtltilefn = os.path.join(mtldir, get_mtl_tile_file_name(secondary=secondary)) + ##mtltilefn = os.path.join(mtldir, get_mtl_tile_file_name(secondary=secondary)) # ADM construct the relevant sub-directory for this survey and # ADM set of observing conditions.. form = get_mtl_ledger_format() @@ -1384,6 +1391,7 @@ def loop_alt_ledger(obscon, survey='sv3', zcatdir=None, mtldir=None, if quickRestart: + raise NotImplementedError('There is no way the quick restart will work properly post refactor.') quickRestartFxn(ndirs = ndirs, altmtlbasedir = altmtlbasedir, survey = survey, obscon = obscon, multiproc = multiproc, nproc = nproc) ### JL - this loop is through all realizations serially or (usually) one realization parallelized @@ -1399,48 +1407,55 @@ def loop_alt_ledger(obscon, survey='sv3', zcatdir=None, mtldir=None, altMTLTileTrackerFN = makeTileTrackerFN(altmtldir, survey = survey, obscon = obscon) altMTLTileTracker = Table.read(altMTLTileTrackerFN) - today = altMTLTileTracker.meta['Today'] - endDate = altMTLTileTracker.meta['EndDate'] - if not (singletile is None): - tiles = tiles[tiles['TILEID'] == singletile] + #today = altMTLTileTracker.meta['Today'] + #endDate = altMTLTileTracker.meta['EndDate'] + + actionList = altMTLTileTracker[np.invert(altMTLTileTracker['DONEFLAG'])] + + actionList.sort(['ACTIONTIME']) + #if not (singletile is None): + # tiles = tiles[tiles['TILEID'] == singletile] - if testDoubleDate: - raise NotImplementedError('this block needs to be moved for new organization of tiletracker.') - log.info('Testing Rosette with Doubled Date only') - cond1 = ((tiles['TILEID'] >= 298) & (tiles['TILEID'] <= 324)) - cond2 = ((tiles['TILEID'] >= 475) & (tiles['TILEID'] <= 477)) - log.info(tiles[tiles['TILEID' ] == 314]) - log.info(tiles[tiles['TILEID' ] == 315]) - tiles = tiles[cond1 | cond2 ] + #if testDoubleDate: + # raise NotImplementedError('this block needs to be moved for new organization of tiletracker.') + # log.info('Testing Rosette with Doubled Date only') + # cond1 = ((tiles['TILEID'] >= 298) & (tiles['TILEID'] <= 324)) + # cond2 = ((tiles['TILEID'] >= 475) & (tiles['TILEID'] <= 477)) + # log.info(tiles[tiles['TILEID' ] == 314]) + # log.info(tiles[tiles['TILEID' ] == 315]) + # tiles = tiles[cond1 | cond2 ] #for ots,famtlt,reprocFlag in datepairs: - while int(today) <= int(endDate): - log.info('----------') - log.info('----------') - log.info('----------') - log.info('today = {0}'.format(today)) - log.info('----------') - log.info('----------') - log.info('----------') - log.info('----------') - - OrigFAs, AltFAs, AltFAs2, TSs, fadates, tiles = do_fiberassignment(altmtldir, survey = survey, obscon = obscon, today = today,verbose = verbose, debug = debug, getosubp = getosubp, redoFA = redoFA, mock = mock) - if len(OrigFAs): - A2RMap, R2AMap = make_fibermaps(altmtldir, OrigFAs, AltFAs, AltFAs2, TSs, fadates, tiles, changeFiberOpt = changeFiberOpt, verbose = verbose, debug = debug, survey = survey , obscon = obscon, getosubp = getosubp, redoFA = redoFA, today = today) - - althpdirname, altmtltilefn, ztilefn, tiles = update_alt_ledger(altmtldir,althpdirname, altmtltilefn, survey = survey, obscon = obscon, today = today,getosubp = getosubp, zcatdir = zcatdir, mock = mock, numobs_from_ledger = numobs_from_ledger, targets = targets, verbose = verbose, debug = debug) - retval = write_amtl_tile_tracker(altmtldir, None, None, today, obscon = obscon, survey = survey, mode = 'endofday') - log.info('write_amtl_tile_tracker retval = {0}'.format(retval)) - - today = nextDate(today) - log.info('----------') - log.info('----------') - log.info('----------') - log.info('moving to next day: {0}'.format(today)) - log.info('----------') - log.info('----------') - log.info('----------') + #while int(today) <= int(endDate): + for action in actionList: + + if action['ACTIONTYPE'] == 'fa': + + OrigFAs, AltFAs, AltFAs2, TSs, fadates, tiles = do_fiberassignment(altmtldir, [action], survey = survey, obscon = obscon ,verbose = verbose, debug = debug, getosubp = getosubp, redoFA = redoFA, mock = mock) + assert(len(OrigFAs)) + A2RMap, R2AMap = make_fibermaps(altmtldir, OrigFAs, AltFAs, AltFAs2, TSs, fadates, tiles, changeFiberOpt = changeFiberOpt, verbose = verbose, debug = debug, survey = survey , obscon = obscon, getosubp = getosubp, redoFA = redoFA ) + elif action['ACTIONTYPE'] == 'update': + althpdirname, altmtltilefn, ztilefn, tiles = update_alt_ledger(altmtldir,althpdirname, altmtltilefn, action, survey = survey, obscon = obscon ,getosubp = getosubp, zcatdir = zcatdir, mock = mock, numobs_from_ledger = numobs_from_ledger, targets = targets, verbose = verbose, debug = debug) + elif action['ACTIONTYPE'] == 'reproc': + raise NotImplementedError('make backup, then remove this line and continue') + #returns timedict + retval = reprocess_alt_ledger(hpdirname, zcat, fbadirbase, tile, obscon="DARK") + if debug or verbose: + log.info(f'retval = {retval}') + else: + raise ValueError('actiontype must be `fa`, `update`, or `reproc`.') + #retval = write_amtl_tile_tracker(altmtldir, None, None, today, obscon = obscon, survey = survey, mode = 'endofday') + #log.info('write_amtl_tile_tracker retval = {0}'.format(retval)) + + #today = nextDate(today) + #log.info('----------') + #log.info('----------') + #log.info('----------') + #log.info('moving to next day: {0}'.format(today)) + #log.info('----------') + #log.info('----------') + #log.info('----------') return althpdirname, altmtltilefn, ztilefn, tiles @@ -2075,7 +2090,7 @@ def reprocess_alt_ledger(hpdirname, zcat, fbadirbase, tile, obscon="DARK"): return timedict -def write_amtl_tile_tracker(dirname, tiles, timestamp, today, obscon = 'dark', survey = 'main', mode = 'fa'): +def write_amtl_tile_tracker(dirname, tiles, obscon = 'dark', survey = 'main'): """Write AMTL Processing times into TileTrackers Parameters @@ -2105,27 +2120,29 @@ def write_amtl_tile_tracker(dirname, tiles, timestamp, today, obscon = 'dark', if os.path.isfile(TileTrackerFN): TileTracker = Table.read(TileTrackerFN, format = 'ascii.ecsv') - if mode.lower() == 'update': - dateKey = 'ALTARCHIVEDATE' - elif mode.lower() == 'fa': - dateKey = 'ALTFADATE' - elif mode.lower() == 'endofday': - TileTracker.meta['Today'] = today - TileTracker.write(TileTrackerFN, format = 'ascii.ecsv', overwrite = True) - return 'only wrote today in metadata' + #if mode.lower() == 'update': + # dateKey = 'ALTARCHIVEDATE' + #elif mode.lower() == 'fa': + # dateKey = 'ALTFADATE' + #elif mode.lower() == 'endofday': + # TileTracker.meta['Today'] = today + # TileTracker.write(TileTrackerFN, format = 'ascii.ecsv', overwrite = True) + # return 'only wrote today in metadata' for t in tiles: + log.info('t = {0}'.format(t)) tileid = t['TILEID'] - reprocFlag = t['REPROCFLAG'] - cond = (TileTracker['TILEID'] == tileid) & (TileTracker['REPROCFLAG'] == reprocFlag) + #reprocFlag = t['REPROCFLAG'] + actionType = t['ACTIONTYPE'] + cond = (TileTracker['TILEID'] == tileid) & (TileTracker['ACTIONTYPE'] == actionType) log.info('for tile {0}, number of matching tiles = {1}'.format(tileid, np.sum(cond))) #debugTrap = np.copy(TileTracker[dateKey]) - TileTracker[dateKey][cond] = timestamp + TileTracker['DONEFLAG'][cond] = True - assert(not (np.all(TileTracker[dateKey] is None))) + assert(not (np.all(np.invert(TileTracker['DONEFLAG'])))) - if mode == 'update': - todaysTiles = TileTracker[TileTracker['ORIGMTLDATE'] == today] - #if np.sum(todaysTiles['ALTARCHIVEDATE'] == None) == 0: + #if mode == 'update': + # todaysTiles = TileTracker[TileTracker['ORIGMTLDATE'] == today] + # #if np.sum(todaysTiles['ALTARCHIVEDATE'] == None) == 0: TileTracker.write(TileTrackerFN, format = 'ascii.ecsv', overwrite = True) - return 'wrote more than just today in metadata' \ No newline at end of file + return 'done' \ No newline at end of file diff --git a/py/LSS/SV3/fatools.py b/py/LSS/SV3/fatools.py index eefa6d4af..f29325767 100644 --- a/py/LSS/SV3/fatools.py +++ b/py/LSS/SV3/fatools.py @@ -30,11 +30,11 @@ #JL - Adding a format string to change versioning of photometry. skydir = '/global/cfs/cdirs/desi/target/catalogs/dr9/0.57.0/skies' -#skydirMain = '/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/skies' -skydirMain = '/global/cfs/cdirs/desi/target/catalogs/dr9/{0}/skies' +skydirMain = '/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/skies' +#skydirMain = '/global/cfs/cdirs/desi/target/catalogs/dr9/{0}/skies' tdir = '/global/cfs/cdirs/desi/target/catalogs/dr9/0.57.0/targets/sv3/resolve/' -#tdirMain = '/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' -tdirMain = '/global/cfs/cdirs/desi/target/catalogs/dr9/{0}/targets/main/resolve/' +tdirMain = '/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' +#tdirMain = '/global/cfs/cdirs/desi/target/catalogs/dr9/{0}/targets/main/resolve/' # AR default REF_EPOCH for PMRA=PMDEC=REF_EPOCH=0 objects gaia_ref_epochs = {"dr2": 2015.5} @@ -365,14 +365,24 @@ def get_fba_fromnewmtl(tileid,mtldir=None,getosubp=False,outdir=None,faver=None, elif ('main' in indir.lower()) or ('holding' in indir.lower()): if verbose: log.info('main survey') - altcreate_mtl(tilef, - mtldir+prog, - gaiadr, - fht['PMCORR'], - tarfn, - tdirMain.format(targver)+prog, - survey = 'main', - mock = mock) + if targver == '1.1.1': + log.info('targver (should be 1.1.1) = {0}'.format(targver)) + altcreate_mtl(tilef, + mtldir+prog, + gaiadr, + fht['PMCORR'], + tarfn, + tdirMain.format(targver)+prog, + survey = 'main', + mock = mock) + #tdirMain+prog, + elif targver == '1.0.0': + if not os.path.exists(outdir): + log.info('running makedirs. making {0}'.format(outdir)) + os.makedirs(outdir) + + shutil.copyfile(indir+ts+'-targ.fits', tarfn) + else: log.critical('invalid input directory. must contain either sv3, main, or holding') raise ValueError('indir must contain either sv3, main, or holding') @@ -590,10 +600,12 @@ def altcreate_mtl( #for col in tcol: # columns.append(col) if not mock: + log.info('len(d)= {0}'.format(len(d))) + d = inflate_ledger( d, targdir, columns=columns, header=False, strictcols=False, quick=True ) # AR adding PLATE_RA, PLATE_DEC, PLATE_REF_EPOCH ? - + log.info('len(d)= {0}'.format(len(d))) if add_plate_cols: d = Table(d) d["PLATE_RA"] = d["RA"] From a567138b62090748f046e86e1a94dc7277121dea Mon Sep 17 00:00:00 2001 From: jalasker Date: Thu, 26 Oct 2023 07:57:52 -0700 Subject: [PATCH 006/297] reproduction tests successful --- bin/dateLoopAltMTLBugFix.sh | 6 +- bin/runAltMTLParallel.py | 3 +- py/LSS/SV3/altmtltools.py | 146 ++++++++++++++++++++++++------------ py/LSS/SV3/fatools.py | 13 +++- 4 files changed, 111 insertions(+), 57 deletions(-) diff --git a/bin/dateLoopAltMTLBugFix.sh b/bin/dateLoopAltMTLBugFix.sh index 1663b97f9..c9f80822d 100755 --- a/bin/dateLoopAltMTLBugFix.sh +++ b/bin/dateLoopAltMTLBugFix.sh @@ -32,18 +32,18 @@ echo "$argstring" if [ $QVal = 'interactive' ]; then - srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 03:00:00 --dependency=afterany:16533190 $path2LSS/runAltMTLParallel.py $argstring + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 03:00:00 --dependency=afterany:17342877 $path2LSS/runAltMTLParallel.py $argstring fi if [ $QVal = 'regular' ]; then - srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:16533190 $path2LSS/runAltMTLParallel.py $argstring + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:17342877 $path2LSS/runAltMTLParallel.py $argstring fi if [ $QVal = 'debug' ]; then - srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 00:15:00 --dependency=afterany:16533190 $path2LSS/runAltMTLParallel.py $argstring + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 00:15:00 --dependency=afterany:17342877 $path2LSS/runAltMTLParallel.py $argstring fi #retcode=$? #qR=0 #DO NOT CHANGE. This prevents further restarts after the first if qR is set to 1 at top. diff --git a/bin/runAltMTLParallel.py b/bin/runAltMTLParallel.py index 57febdf75..5af9e20fc 100755 --- a/bin/runAltMTLParallel.py +++ b/bin/runAltMTLParallel.py @@ -34,6 +34,7 @@ parser.add_argument('-tf', '--targfile', dest='targfile', required=False, default = None, type=str, help = 'Location for target file for mocks or data. Only required if mocks are being processed.') parser.add_argument('-v', '--verbose', dest = 'verbose', default=False, action='store_true', help = 'set flag to enter verbose mode') parser.add_argument('-qr', '--quickRestart', dest = 'quickRestart', default=False, action='store_true', help = 'set flag to remove any AMTL updates that have already been performed. Useful for rapidfire debugging of steps in this part of the pipeline.') +parser.add_argument('-rep', '--reproducing', action='store_true', dest='reproducing', default=False, help = 'WARNING: THIS FLAG SHOULD ONLY BE USED FOR DEBUGGING. Pass this flag to confirm to the alt mtl code that you are trying to reproduce real MTLs. This option should (must?) be used in conjunction with --shuffleSubpriorities.', required = False) parser.add_argument('-prof', '--profile', dest = 'profile', default=False, action='store_true', help = 'set flag to profile code time usage. This flag may not profile all components of any particular stage of the AMTL pipeline. ') parser.add_argument('-d', '--debug', dest = 'debug', default=False, action='store_true', help = 'set flag to enter debug mode.') parser.add_argument('-nfl', '--NumObsNotFromLedger', dest = 'numobs_from_ledger', default=True, action='store_false', help = 'If True (flag is NOT set) then inherit the number of observations so far from the ledger rather than expecting it to have a reasonable value in the zcat.') @@ -116,7 +117,7 @@ def procFunc(nproc): print(targets['DEC'][0:5]) else: targets = None - retval = amt.loop_alt_ledger(args.obscon, survey = args.survey, mtldir = args.mtldir, zcatdir = args.zcatdir, altmtlbasedir = args.altMTLBaseDir, ndirs = ndirs, numobs_from_ledger = args.numobs_from_ledger,secondary = args.secondary, getosubp = args.getosubp, quickRestart = args.quickRestart, multiproc = multiproc, nproc = nproc, singleDate = singleDate, redoFA = args.redoFA, mock = args.mock, targets = targets, debug = args.debug, verbose = args.verbose) + retval = amt.loop_alt_ledger(args.obscon, survey = args.survey, mtldir = args.mtldir, zcatdir = args.zcatdir, altmtlbasedir = args.altMTLBaseDir, ndirs = ndirs, numobs_from_ledger = args.numobs_from_ledger,secondary = args.secondary, getosubp = args.getosubp, quickRestart = args.quickRestart, multiproc = multiproc, nproc = nproc, singleDate = singleDate, redoFA = args.redoFA, mock = args.mock, targets = targets, debug = args.debug, verbose = args.verbose, reproducing = args.reproducing) if args.verbose: log.debug('finished with one iteration of procFunc') if type(retval) == int: diff --git a/py/LSS/SV3/altmtltools.py b/py/LSS/SV3/altmtltools.py index ea2e3edb7..54df13fc0 100644 --- a/py/LSS/SV3/altmtltools.py +++ b/py/LSS/SV3/altmtltools.py @@ -1,40 +1,54 @@ from desiutil.iers import freeze_iers freeze_iers() + import collections.abc from time import time -import healpy as hp -import pickle -from astropy.table import Table,join import astropy import astropy.io import astropy.io.fits as pf +from astropy.table import Table,join + +import memory_profiler +from memory_profiler import profile + import desitarget from desitarget import io, mtl from desitarget.cuts import random_fraction_of_trues -import memory_profiler -from memory_profiler import profile from desitarget.mtl import get_mtl_dir, get_mtl_tile_file_name,get_mtl_ledger_format from desitarget.mtl import get_zcat_dir, get_ztile_file_name, tiles_to_be_processed from desitarget.mtl import make_zcat,survey_data_model,update_ledger, get_utc_date + from desitarget.targets import initial_priority_numobs, decode_targetid from desitarget.targetmask import obsconditions, obsmask from desitarget.targetmask import desi_mask, bgs_mask, mws_mask, zwarn_mask + from desiutil.log import get_logger + import fitsio + +import healpy as hp + + from LSS.bitweights import pack_bitweights from LSS.SV3.fatools import get_fba_fromnewmtl import LSS.SV3.fatools as fatools + import matplotlib.pyplot as plt + import numpy as np from numpy import random as rand import numpy.lib.recfunctions as rfn + import os +import pickle import subprocess import sys from time import sleep + import cProfile, pstats import io as ProfileIO from pstats import SortKey + import glob @@ -618,6 +632,7 @@ def initializeAlternateMTLs(initMTL, outputMTL, nAlt = 2, genSubset = None, seed firstTS = allentries[0]["TIMESTAMP"] initialentries = allentries[allentries["TIMESTAMP"] == firstTS] subpriorsInit = initialentries["SUBPRIORITY"] + startDateShort = 19990101 else: log.debug('startdate') log.debug(startDate) @@ -690,6 +705,8 @@ def initializeAlternateMTLs(initMTL, outputMTL, nAlt = 2, genSubset = None, seed log.info('pre creating output dir') if not os.path.exists(outputMTLDir): os.makedirs(outputMTLDir) + if not os.path.exists(finalDir.format(n)): + os.makedirs(finalDir.format(n)) if not os.path.isfile(finalDir.format(n) + '/' + ztilefn): processTileFile(ztilefile, outputMTLDir + ztilefn, startDate, endDate) #os.symlink(ztilefile, outputMTLDir + ztilefn) @@ -986,7 +1003,7 @@ def quickRestartFxn(ndirs = 1, altmtlbasedir = None, survey = 'sv3', obscon = 'd copyfile(fn, altmtldirRestart +'/' + survey + '/' + obscon + '/' + fn.split('/')[-1]) def do_fiberassignment(altmtldir, FATiles, survey = 'sv3', obscon = 'dark', - verbose = False, debug = False, getosubp = False, redoFA = False, mock = False): + verbose = False, debug = False, getosubp = False, redoFA = False, mock = False, reproducing = False): #FATiles = tiles_to_be_processed_alt(altmtldir, obscon = obscon, survey = survey, today = today, mode = 'fa') if len(FATiles): try: @@ -1085,7 +1102,7 @@ def do_fiberassignment(altmtldir, FATiles, survey = 'sv3', obscon = 'dark', log.info('checking contents of fiberassign directory before calling get_fba_from_newmtl') log.info(glob.glob(fbadir + '/*' )) #get_fba_fromnewmtl(ts,mtldir=altmtldir + survey.lower() + '/',outdir=fbadirbase, getosubp = getosubp, overwriteFA = redoFA, verbose = verbose, mock = mock, targver = targver)#, targets = targets) - get_fba_fromnewmtl(ts,mtldir=altmtldir + survey.lower() + '/',outdir=fbadirbase, getosubp = getosubp, overwriteFA = redoFA, verbose = verbose, mock = mock, targver = targver)#, targets = targets) + get_fba_fromnewmtl(ts,mtldir=altmtldir + survey.lower() + '/',outdir=fbadirbase, getosubp = getosubp, overwriteFA = redoFA, verbose = verbose, mock = mock, targver = targver, reproducing = reproducing)#, targets = targets) command_run = (['bash', fbadir + 'fa-' + ts + '.sh']) if verbose: log.info('fa command_run') @@ -1275,7 +1292,7 @@ def loop_alt_ledger(obscon, survey='sv3', zcatdir=None, mtldir=None, getosubp = False, quickRestart = False, redoFA = False, multiproc = False, nproc = None, testDoubleDate = False, changeFiberOpt = None, targets = None, mock = False, - debug = False, verbose = False): + debug = False, verbose = False, reproducing = False): """Execute full MTL loop, including reading files, updating ledgers. Parameters @@ -1432,17 +1449,19 @@ def loop_alt_ledger(obscon, survey='sv3', zcatdir=None, mtldir=None, if action['ACTIONTYPE'] == 'fa': - OrigFAs, AltFAs, AltFAs2, TSs, fadates, tiles = do_fiberassignment(altmtldir, [action], survey = survey, obscon = obscon ,verbose = verbose, debug = debug, getosubp = getosubp, redoFA = redoFA, mock = mock) + OrigFAs, AltFAs, AltFAs2, TSs, fadates, tiles = do_fiberassignment(altmtldir, [action], survey = survey, obscon = obscon ,verbose = verbose, debug = debug, getosubp = getosubp, redoFA = redoFA, mock = mock, reproducing = reproducing) assert(len(OrigFAs)) A2RMap, R2AMap = make_fibermaps(altmtldir, OrigFAs, AltFAs, AltFAs2, TSs, fadates, tiles, changeFiberOpt = changeFiberOpt, verbose = verbose, debug = debug, survey = survey , obscon = obscon, getosubp = getosubp, redoFA = redoFA ) elif action['ACTIONTYPE'] == 'update': althpdirname, altmtltilefn, ztilefn, tiles = update_alt_ledger(altmtldir,althpdirname, altmtltilefn, action, survey = survey, obscon = obscon ,getosubp = getosubp, zcatdir = zcatdir, mock = mock, numobs_from_ledger = numobs_from_ledger, targets = targets, verbose = verbose, debug = debug) elif action['ACTIONTYPE'] == 'reproc': - raise NotImplementedError('make backup, then remove this line and continue') #returns timedict - retval = reprocess_alt_ledger(hpdirname, zcat, fbadirbase, tile, obscon="DARK") + + + retval = reprocess_alt_ledger(altmtldir, action, obscon=obscon, survey = survey) if debug or verbose: log.info(f'retval = {retval}') + else: raise ValueError('actiontype must be `fa`, `update`, or `reproc`.') #retval = write_amtl_tile_tracker(altmtldir, None, None, today, obscon = obscon, survey = survey, mode = 'endofday') @@ -1820,7 +1839,7 @@ def writeBitweights(mtlBaseDir, ndirs = None, hplist = None, debug = False, outd data.write(fn, overwrite = overwrite) -def reprocess_alt_ledger(hpdirname, zcat, fbadirbase, tile, obscon="DARK"): +def reprocess_alt_ledger(altmtldir, action, obscon="dark", survey = 'main', zcatdir = None): """ Reprocess HEALPixel-split ledgers for targets with new redshifts. @@ -1846,6 +1865,33 @@ def reprocess_alt_ledger(hpdirname, zcat, fbadirbase, tile, obscon="DARK"): are the TIMESTAMP at which that tile was reprocessed. """ + tileid = action['TILEID'] + ts = str(tileid).zfill(6) + FABaseDir = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/' + FAFN = FABaseDir + f'/{ts[0:3]}/fiberassign-{ts}.fits' + + fhtOrig = fitsio.read_header(FAFN) + fadate = fhtOrig['RUNDATE'] + fanite = int(''.join(fadate.split('T')[0].split('-'))) + + hpdirname = altmtldir + f'/{survey.lower()}/{obscon.lower()}/' + + fbadirbase = altmtldir + '/fa/' + survey.upper() + '/' + str(fanite) + '/' + + #if getosubp: + # FAMapName = fbadirbase + '/orig/famap-' + ts + '.pickle' + #else: + FAMapName = fbadirbase + '/famap-' + ts + '.pickle' + with open(FAMapName,'rb') as fl: + (A2RMap, R2AMap) = pickle.load(fl,fix_imports = True) + + #zcat = make_zcat(zcatdir, dateTiles, obscon, survey) + zcatdir = get_zcat_dir(zcatdir) + zcat = make_zcat(zcatdir, [action], obscon, survey, allow_overlaps = True) + log.info('ts = {0}'.format(ts)) + altZCat = makeAlternateZCat(zcat, R2AMap, A2RMap) + + #if getosubp: # FAMapName = fbadirbase + '/orig/famap-' + ts + '.pickle' @@ -1854,8 +1900,8 @@ def reprocess_alt_ledger(hpdirname, zcat, fbadirbase, tile, obscon="DARK"): #with open(FAMapName,'rb') as fl: # (A2RMapTemp, R2AMapTemp) = pickle.load(fl,fix_imports = True) t0 = time() - log.info("Reprocessing based on zcat with {} entries...t={:.1f}s" - .format(len(zcat), time()-t0)) + log.info("Reprocessing based on altZCat with {} entries...t={:.1f}s" + .format(len(altZCat), time()-t0)) # ADM the output dictionary. timedict = {} @@ -1876,21 +1922,21 @@ def reprocess_alt_ledger(hpdirname, zcat, fbadirbase, tile, obscon="DARK"): log.critical(msg) raise RuntimeError(msg) - # ADM check the zcat has unique TARGETID/TILEID combinations. + # ADM check the altZCat has unique TARGETID/TILEID combinations. tiletarg = [str(tt["ZTILEID"]) + "-" + str(tt["TARGETID"]) for tt in zcat] if len(set(tiletarg)) != len(tiletarg): - msg = "Passed zcat does NOT have unique TARGETID/TILEID combinations!!!" + msg = "Passed altZCat does NOT have unique TARGETID/TILEID combinations!!!" log.critical(msg) raise RuntimeError(msg) # ADM record the set of tiles that are being reprocessed. - reproctiles = set(zcat["ZTILEID"]) + reproctiles = set(altZCat["ZTILEID"]) # ADM read ALL targets from the relevant ledgers. log.info("Reading (all instances of) targets for {} tiles...t={:.1f}s" .format(len(reproctiles), time()-t0)) nside = desitarget.mtl._get_mtl_nside() - theta, phi = np.radians(90-zcat["DEC"]), np.radians(zcat["RA"]) + theta, phi = np.radians(90-altZCat["DEC"]), np.radians(altZCat["RA"]) pixnum = hp.ang2pix(nside, theta, phi, nest=True) pixnum = list(set(pixnum)) targets = io.read_mtl_in_hp(hpdirname, nside, pixnum, unique=False) @@ -1901,14 +1947,14 @@ def reprocess_alt_ledger(hpdirname, zcat, fbadirbase, tile, obscon="DARK"): # ADM sort by TIMESTAMP to ensure tiles are listed chronologically. targets = targets[np.argsort(targets["TIMESTAMP"])] - # ADM for speed, we only need to work with targets with a zcat entry. + # ADM for speed, we only need to work with targets with a altZCat entry. ntargs = len(targets) nuniq = len(set(targets["TARGETID"])) log.info("Read {} targets with {} unique TARGETIDs...t={:.1f}s" .format(ntargs, nuniq, time()-t0)) - log.info("Limiting targets to {} (unique) TARGETIDs in the zcat...t={:.1f}s" - .format(len(set(zcat["TARGETID"])), time()-t0)) - s = set(zcat["TARGETID"]) + log.info("Limiting targets to {} (unique) TARGETIDs in the altZCat...t={:.1f}s" + .format(len(set(altZCat["TARGETID"])), time()-t0)) + s = set(altZCat["TARGETID"]) ii = np.array([tid in s for tid in targets["TARGETID"]]) targets = targets[ii] nuniq = len(set(targets["TARGETID"])) @@ -1941,37 +1987,37 @@ def reprocess_alt_ledger(hpdirname, zcat, fbadirbase, tile, obscon="DARK"): # ADM remember to sort ii so that the first tiles appear first. orderedtiles = targets["ZTILEID"][sorted(ii)] - # ADM assemble a zcat for all previous and reprocessed observations. - zcatfromtargs = np.zeros(len(targets), dtype=zcat.dtype) - for col in zcat.dtype.names: - zcatfromtargs[col] = targets[col] + # ADM assemble a altZCat for all previous and reprocessed observations. + altZCatfromtargs = np.zeros(len(targets), dtype=zcat.dtype) + for col in altZCat.dtype.names: + altZCatfromtargs[col] = targets[col] # ADM note that we'll retain the TIMESTAMPed order of the old ledger # ADM entries and new redshifts will (deliberately) be listed last. - allzcat = np.concatenate([zcatfromtargs, zcat]) - log.info("Assembled a zcat of {} total observations...t={:.1f}s" - .format(len(allzcat), time()-t0)) + allaltZCat = np.concatenate([altZCatfromtargs, altZCat]) + log.info("Assembled a altZCat of {} total observations...t={:.1f}s" + .format(len(allaltZCat), time()-t0)) # ADM determine the FINAL observation for each TILED-TARGETID combo. # ADM must flip first as np.unique finds the FIRST unique entries. - allzcat = np.flip(allzcat) + allaltZCat = np.flip(allaltZCat) # ADM create a unique hash of TILEID and TARGETID. - tiletarg = [str(tt["ZTILEID"]) + "-" + str(tt["TARGETID"]) for tt in allzcat] + tiletarg = [str(tt["ZTILEID"]) + "-" + str(tt["TARGETID"]) for tt in allaltZCat] # ADM find the final unique combination of TILEID and TARGETID. _, ii = np.unique(tiletarg, return_index=True) # ADM make sure to retain exact reverse-ordering. ii = sorted(ii) # ADM condition on indexes-of-uniqueness and flip back. - allzcat = np.flip(allzcat[ii]) + allaltZCat = np.flip(allaltZCat[ii]) log.info("Found {} final TARGETID/TILEID combinations...t={:.1f}s" - .format(len(allzcat), time()-t0)) + .format(len(allaltZCat), time()-t0)) # ADM mock up a dictionary of timestamps in advance. This is faster # ADM as no delays need to be built into the code. now = get_utc_date(survey="main") timestamps = {t: desitarget.mtl.add_to_iso_date(now, s) for s, t in enumerate(orderedtiles)} - # ADM make_mtl() expects zcats to be in Table form. - allzcat = Table(allzcat) + # ADM make_mtl() expects altZCats to be in Table form. + allaltZCat = Table(allaltZCat) # ADM a merged target list to track and record the final states. mtl = Table(unobs) # ADM to hold the final list of updates per-tile. @@ -1983,23 +2029,23 @@ def reprocess_alt_ledger(hpdirname, zcat, fbadirbase, tile, obscon="DARK"): timestamp = timestamps[tileid] # ADM restrict to the observations on this tile. - zcatmini = allzcat[allzcat["ZTILEID"] == tileid] + altZCatmini = allaltZCat[allaltZCat["ZTILEID"] == tileid] # ADM check there are only unique TARGETIDs on each tile! - if len(set(zcatmini["TARGETID"])) != len(zcatmini): + if len(set(altZCatmini["TARGETID"])) != len(altZCatmini): msg = "There are duplicate TARGETIDs on tile {}".format(tileid) log.critical(msg) raise RuntimeError(msg) - # ADM update NUMOBS in the zcat using previous MTL totals. - mii, zii = desitarget.mtl.match(mtl["TARGETID"], zcatmini["TARGETID"]) - zcatmini["NUMOBS"][zii] = mtl["NUMOBS"][mii] + 1 + # ADM update NUMOBS in the altZCat using previous MTL totals. + mii, zii = desitarget.mtl.match(mtl["TARGETID"], altZCatmini["TARGETID"]) + altZCatmini["NUMOBS"][zii] = mtl["NUMOBS"][mii] + 1 - # ADM restrict to just objects in the zcat that match an UNOBS + # ADM restrict to just objects in the altZCat that match an UNOBS # ADM target (i,e that match something in the MTL). - log.info("Processing {}/{} observations from zcat on tile {}...t={:.1f}s" - .format(len(zii), len(zcatmini), tileid, time()-t0)) + log.info("Processing {}/{} observations from altZCat on tile {}...t={:.1f}s" + .format(len(zii), len(altZCatmini), tileid, time()-t0)) log.info("(i.e. removed secondaries-if-running-primaries or vice versa)") - zcatmini = zcatmini[zii] + altZCatmini = altZCatmini[zii] # ADM ------ # ADM NOTE: We could use trimtozcat=False without matching, and @@ -2008,7 +2054,7 @@ def reprocess_alt_ledger(hpdirname, zcat, fbadirbase, tile, obscon="DARK"): # ADM complexity somewhere, hence trimtozcat=True/matching-back. # ADM ------ # ADM push the observations on this tile through MTL. - zmtl = desitarget.mtl.make_mtl(mtl, oc, zcat=zcatmini, trimtozcat=True, trimcols=True) + zmtl = desitarget.mtl.make_mtl(mtl, oc, zcat=altZCatmini, trimtozcat=True, trimcols=True) # ADM match back to overall merged target list to update states. mii, zii = desitarget.mtl.match(mtl["TARGETID"], zmtl["TARGETID"]) @@ -2019,16 +2065,16 @@ def reprocess_alt_ledger(hpdirname, zcat, fbadirbase, tile, obscon="DARK"): mtl["TIMESTAMP"][mii] = timestamp # ADM trimtozcat=True discards BAD observations. Retain these. - tidmiss = list(set(zcatmini["TARGETID"]) - set(zmtl["TARGETID"])) - tii = desitarget.mtl.match_to(zcatmini["TARGETID"], tidmiss) - zbadmiss = zcatmini[tii] + tidmiss = list(set(altZCatmini["TARGETID"]) - set(zmtl["TARGETID"])) + tii = desitarget.mtl.match_to(altZCatmini["TARGETID"], tidmiss) + zbadmiss = altZCatmini[tii] # ADM check all of the missing observations are, indeed, bad. if np.any(zbadmiss["ZWARN"] & zwarn_mask.mask(Mxbad) == 0): msg = "Some objects skipped by make_mtl() on tile {} are not BAD!!!" msg = msg.format(tileid) log.critical(msg) raise RuntimeError(msg) - log.info("Adding back {} bad observations from zcat...t={:.1f}s" + log.info("Adding back {} bad observations from altZCat...t={:.1f}s" .format(len(zbadmiss), time()-t0)) # ADM update redshift information in MTL for bad observations. @@ -2086,7 +2132,7 @@ def reprocess_alt_ledger(hpdirname, zcat, fbadirbase, tile, obscon="DARK"): done = np.concatenate([ledger, mtlpix.as_array()]) fitsio.write(fn+'.tmp', done, extname='MTL', header=hd, clobber=True) os.rename(fn+'.tmp', fn) - + retval = write_amtl_tile_tracker(altmtldir, [action], obscon = obscon, survey = survey) return timedict diff --git a/py/LSS/SV3/fatools.py b/py/LSS/SV3/fatools.py index f29325767..ac8d71919 100644 --- a/py/LSS/SV3/fatools.py +++ b/py/LSS/SV3/fatools.py @@ -271,7 +271,7 @@ def redo_fba_fromorig(tileid,outdir=None,faver=None, verbose = False,survey='mai fo.close() -def get_fba_fromnewmtl(tileid,mtldir=None,getosubp=False,outdir=None,faver=None, overwriteFA = False,newdir=None, verbose = False, mock = False, targver = '1.1.1'): +def get_fba_fromnewmtl(tileid,mtldir=None,getosubp=False,outdir=None,faver=None, overwriteFA = False,newdir=None, verbose = False, mock = False, targver = '1.1.1', reproducing = False): ts = str(tileid).zfill(6) #get info from origin fiberassign file fht = fitsio.read_header('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz') @@ -365,8 +365,11 @@ def get_fba_fromnewmtl(tileid,mtldir=None,getosubp=False,outdir=None,faver=None, elif ('main' in indir.lower()) or ('holding' in indir.lower()): if verbose: log.info('main survey') - if targver == '1.1.1': - log.info('targver (should be 1.1.1) = {0}'.format(targver)) + if (not reproducing) or (targver == '1.1.1'): + if verbose: + log.info('if reproducing is True, targver must be 1.1.1') + log.info(f'targver = {targver}') + log.info(f'reproducing = {reproducing}') altcreate_mtl(tilef, mtldir+prog, gaiadr, @@ -377,6 +380,10 @@ def get_fba_fromnewmtl(tileid,mtldir=None,getosubp=False,outdir=None,faver=None, mock = mock) #tdirMain+prog, elif targver == '1.0.0': + if verbose: + log.info('targver must be 1.0.0 (or at least not 1.1.1) and reproducing must be True') + log.info(f'targver = {targver}') + log.info(f'reproducing = {reproducing}') if not os.path.exists(outdir): log.info('running makedirs. making {0}'.format(outdir)) os.makedirs(outdir) From 2b6b22b996623967955539a6af1cd63fa24b2204 Mon Sep 17 00:00:00 2001 From: jalasker Date: Mon, 20 Nov 2023 13:34:59 -0800 Subject: [PATCH 007/297] working version of Y1 AMTL with reprocessing but without late tiles. --- bin/Y1Bitweights128RealizationsBRIGHT.sh | 336 ++++++++++++++++++++++ bin/Y1Bitweights128RealizationsDARK.sh | 339 +++++++++++++++++++++++ bin/dateLoopAltMTLBugFix.sh | 6 +- py/LSS/SV3/altmtltools.py | 38 ++- 4 files changed, 701 insertions(+), 18 deletions(-) create mode 100755 bin/Y1Bitweights128RealizationsBRIGHT.sh create mode 100755 bin/Y1Bitweights128RealizationsDARK.sh diff --git a/bin/Y1Bitweights128RealizationsBRIGHT.sh b/bin/Y1Bitweights128RealizationsBRIGHT.sh new file mode 100755 index 000000000..410eb597e --- /dev/null +++ b/bin/Y1Bitweights128RealizationsBRIGHT.sh @@ -0,0 +1,336 @@ +#!/bin/bash +start=`date +%s.%N` + +#simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written +#simName=JL_DebugReprocReprod2 +simName=JL_Y1Run2BRIGHT +#Location where you have cloned the LSS Repo +path2LSS=~/.local/desicode/LSS/bin/ + +# Flags for debug/verbose mode/profiling code time usage. +# Uncomment second set of options to turn on the modes +#debug='' +#verbose='' +profile='' +debug='--debug' +verbose='--verbose' +#profile='--profile' + +if [ -z "$debug" ] +then + echo "\$debug is empty" +else + echo "\$debug is set" + pwd + InitWorkingDirectory=`pwd` + cd $path2LSS + cd .. + pwd + pip install --user . + cd $InitWorkingDirectory + pwd + echo "end of pip in script attempt" +fi + +#Uncomment second option if running on mocks +mock='' +#mock='--mock' + +#ALTMTLHOME is a home directory for all of your alternate MTLs. Default is your scratch directory +#There will be an environment variable $ALTMTLHOME for the "survey alt MTLs" +#However, you should specify your own directory to a. not overwrite the survey alt MTLs +# and b. keep your alt MTLs somewhere that you have control/access + +#Uncomment the following line to set your own/nonscratch directory +#ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ + + +if [[ "${NERSC_HOST}" == "cori" ]]; then + CVal='haswell' + QVal='interactive' + ProcPerNode=32 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$CSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi +elif [[ "${NERSC_HOST}" == "perlmutter" ]]; then + srunConfig='-C cpu -q regular' + CVal='cpu' + QVal='interactive' + ProcPerNode=128 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$PSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi + +else + echo "This code is only supported on NERSC Cori and NERSC Perlmutter. Goodbye" + exit 1234 +fi + + + + +#Options for InitializeAltMTLs + +#Random seed. Change to any integer you want (or leave the same) +#If seed is different between two otherwise identical runs, the initial MTLs will also be different +#seed is also saved in output directory +#seed=14126579 +seed=3593589 +#Number of realizations to generate. Ideally a multiple of 64 for bitweights +#However, you can choose smaller numbers for debugging +ndir=128 + +#Uncomment second option if you want to clobber already existing files for Alt MTL generation +overwrite='' +#overwrite='--overwrite' + +#Observing conditions for generating MTLs (should be all caps "DARK" or "BRIGHT") +#obscon='DARK' +obscon='BRIGHT' + +#Survey to generate MTLs for (should be lowercase "sv3" or "main", sv2, sv1, and cmx are untested and will likely fail) +#survey='sv3' +survey='main' +# options are default None (empty strings). Uncommenting the second options will set them to the Y1 start and end dates. +startDate='' +#endDate='' +#startDate='2021-05-13T08:15:37+00:00' +endDate='2022-06-24T00:00:00+00:00' + +#For rundate formatting in simName, either manually modify the string below +#to be the desired date or comment that line out and uncomment the +#following line to autogenerate date strings. +#To NOT use any date string specification, use the third line, an empty string +#datestring='071322' +#datestring=`date +%y%m%d` +datestring='' + +#Can save time in MTL generation by first writing files to local tmp directory and then copying over later +#uncommenting the second option will directly write to your output directory +usetmp='' +#usetmp='--dontUseTemp' + +if [ -z $usetmp ] +then + outputMTLDirBaseBase=`mktemp -d /dev/shm/"$USER"_tempdirXXXX` +else + outputMTLDirBaseBase=$ALTMTLHOME +fi +printf -v outputMTLDirBase "$outputMTLDirBaseBase/$simName/" $datestring $ndir $survey +printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $survey + +#List of healpixels to create Alt MTLs for +#hpListFile="$path2LSS/MainSurveyHPList_mock.txt" +hpListFile="$path2LSS/MainSurveyHPList.txt" +#hpListFile="$path2LSS/DebugMainHPList.txt" +#hpListFile="$path2LSS/SV3HPList.txt" + +#These two options only are considered if the obscon is BRIGHT +#First option indicates whether to shuffle the top level priorities +#of BGS_FAINT/BGS_FAINT_HIP. Uncomment section option to turn off shuffling of bright time priorities +#Second option indicates what fraction/percent +#of BGS_FAINT to promote to BGS_FAINT_HIP. Default is 20%, same as SV3 + +#shuffleBrightPriorities='--shuffleBrightPriorities' +shuffleBrightPriorities='' + + +#shuffleELGPriorities='' +#shuffleELGPriorities='--shuffleELGPriorities' + +#PromoteFracBGSFaint=0.2 +PromoteFracBGSFaint=0.0 +#PromoteFracELG=0.1 +PromoteFracELG=0.0 + +# location of original MTLs to shuffle. +# Default directory is a read only mount of the CFS filesystem +# You can only access that directory from compute nodes. +# Do NOT use the commented out directory (the normal mount of CFS) +# unless the read only mount is broken +exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ +#exampleLedgerBase=/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ +#exampleLedgerBase=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ +#exampleLedgerBase=$SCRATCH/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ +#Options for DateLoopAltMTL and runAltMTLParallel + +#Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). +#Default = Empty String/False. Uncomment second option if you want to restart from the first observations +#PLEASE DO NOT CHANGEME +echo "Fix QR resetting for new argparse usage" +qR='' +#qR='-qr' + +#Number of observation dates to loop through +#Defaults to 40 dates for SV3 +NObsDates=99999 + +# Whether to submit a new job with dateLoopAltMTL for each date +# or to submit a single job +# multiDate=0 +multiDate='--multiDate' +echo 'setting QVal here for debug. Fix later.' +#QVal='debug' +QVal='regular' +#QVal='interactive' +#Number of nodes to run on. This will launch up to 64*N jobs +#if that number of alternate universes have already been generated +#Calculated automatically from number of sims requested and number of processes per node. Be careful if setting manually +NNodes=$(( ($ndir + $ProcPerNode - 1 )/$ProcPerNode )) +#echo $NNodes +#getosubp: grab subpriorities from the original (exampleledgerbase) MTLs +#This should only be turned on for SV testing/debugging purposes +#This should not be required for main survey debugging. +getosubp='' +#getosubp='--getosubp' + +#shuffleSubpriorities(reproducing) must be left as empty strings to ensure +#subpriorities are shuffled. debug mode for main survey +#will only require these flags to be set by uncommenting second options + +dontShuffleSubpriorities='' +reproducing='' +#dontShuffleSubpriorities='--dontShuffleSubpriorities' +#reproducing='--reproducing' +#Include secondary targets? +secondary='' +#secondary='--secondary' + + +#If running from mocks, must set target directory. +#Otherwise this is optional +targfile='' #CHANGEME IF RUNNING ON MOCKS +#targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory +#targfile='--targfile=/cscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA1.fits' +#targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' + + +#Default is use numobs from ledger. Uncomment second option to set numobs NOT from ledger +numobs_from_ledger='' +#numobs_from_ledger='--NumObsNotFromLedger' + +#Uncomment second line to force redo fiber assignment if it has already been done. +redoFA='' +#redoFA='--redoFA' + + +#Options for MakeBitweightsParallel +#True/False(1/0) as to whether to split bitweight calculation +#among nodes by MPI between realizations +#splitByReal=1 + +#Split the calculation of bitweights into splitByChunk +#chunks of healpixels. +#splitByChunk=1 + +#Set to true (1) if you want to clobber already existing bitweight files +overwrite2='' +#overwrite2='--overwrite' +#Actual running of scripts + +#Copy this script to output directory for reproducbility +thisFileName=$outputMTLFinalDestination/$0 + +echo $thisFileName + +if [ -f "$thisFileName" ] +then + echo "File is found. Checking to see it is identical to the original." + cmp $0 $thisFileName + comp=$? + if [[ $comp -eq 1 ]] + then + echo "Files are not identical." + echo "If this is intended, please delete or edit the original copied script at $thisFileName" + echo "If this is unintended, you can reuse the original copied script at that same location" + echo "goodbye" + exit 3141 + elif [[ $comp -eq 0 ]] + then + echo "files are same, continuing" + else + echo "Something has gone very wrong. Exit code for cmp was $a" + exit $a + fi +else + echo "Copied script is not found. Copying now, making directories as needed." + mkdir -p $outputMTLFinalDestination + cp $SLURM_SUBMIT_DIR $0 $outputMTLFinalDestination/$0 +fi + +if [ -d "$outputMTLFinalDestination" ] +then + echo "output final directory exists" + echo $outputMTLFinalDestination +else + echo "output final directory does not exist. Creating and copying script there" + mkdir -p $outputMTLFinalDestination + cp $0 $outputMTLFinalDestination +fi + +if [ -z $getosubp ] +then + touch $outputMTLFinalDestination/GetOSubpTrue +fi + +printf -v OFIM "%s/Initialize%sAltMTLsParallelOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $date + +echo "srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclusive $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM" +srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclusive $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM +if [ $? -ne 0 ]; then + exit 1234 + endInit=`date +%s.%N` + runtimeInit=$( echo "$endInit - $start" | bc -l ) + echo "runtime for initialization" + echo $runtimeInit +fi + +endInit=`date +%s.%N` +runtimeInit=$( echo "$endInit - $start" | bc -l ) +echo "runtime for initialization" +echo $runtimeInit + +printf -v OFDL "%s/dateLoop%sAltMTLOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring + +runtimeInit=$( echo "$endInit - $start" | bc -l ) +argstring="--altMTLBaseDir=$outputMTLFinalDestination --obscon=$obscon --survey=$survey --ProcPerNode=$ProcPerNode $numobs_from_ledger $redoFA $getosubp $debug $verbose $secondary $mock $targfile $multiDate $reproducing" +echo 'argstring for dateloop' +echo $argstring +nohup bash $path2LSS/dateLoopAltMTLBugFix.sh $NObsDates $NNodes $path2LSS $CVal $QVal $qR $argstring >& $OFDL + +endDL=`date +%s.%N` + +if [ $? -ne 0 ]; then + runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) + echo "runtime for Dateloop of $NObsDates days" + echo $runtimeDateLoop + exit 12345 +fi +runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +echo "runtime for Dateloop of $NObsDates days" +echo $runtimeDateLoop +exit 54321 + + + +printf -v OFBW "%s/MakeBitweights%sOutput%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring +srun --nodes=1 -C $CVal -q $QVal -A desi -t 04:00:00 --mem=120000 $path2LSS/MakeBitweights.py --survey=$survey --obscon=$obscon --ndir=$ndir --ProcPerNode=$ProcPerNode --HPListFile=$hpListFile --outdir=$outputMTLFinalDestination $overwrite2 $verbose $debug >& $OFBW + +endBW=`date +%s.%N` + + + +runtimeInit=$( echo "$endInit - $start" | bc -l ) +runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +runtimeBitweights=$( echo "$endBW - $endDL" | bc -l ) + +echo "runtime for initialization" +echo $runtimeInit +echo "runtime for Dateloop of $NObsDates days" +echo $runtimeDateLoop +echo "runtime for making bitweights" +echo $runtimeBitweights diff --git a/bin/Y1Bitweights128RealizationsDARK.sh b/bin/Y1Bitweights128RealizationsDARK.sh new file mode 100755 index 000000000..b74f5bfc5 --- /dev/null +++ b/bin/Y1Bitweights128RealizationsDARK.sh @@ -0,0 +1,339 @@ +#!/bin/bash +start=`date +%s.%N` + +#simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written +#simName=JL_DebugReprocReprod2 +simName=JL_Y1Run2DARK +#Location where you have cloned the LSS Repo +path2LSS=~/.local/desicode/LSS/bin/ + +# Flags for debug/verbose mode/profiling code time usage. +# Uncomment second set of options to turn on the modes +#debug='' +#verbose='' +profile='' +debug='--debug' +verbose='--verbose' +#profile='--profile' + +if [ -z "$debug" ] +then + echo "\$debug is empty" +else + echo "\$debug is set" + pwd + InitWorkingDirectory=`pwd` + cd $path2LSS + cd .. + pwd + pip install --user . + cd $InitWorkingDirectory + pwd + echo "end of pip in script attempt" +fi + +#Uncomment second option if running on mocks +mock='' +#mock='--mock' + +#ALTMTLHOME is a home directory for all of your alternate MTLs. Default is your scratch directory +#There will be an environment variable $ALTMTLHOME for the "survey alt MTLs" +#However, you should specify your own directory to a. not overwrite the survey alt MTLs +# and b. keep your alt MTLs somewhere that you have control/access + +#Uncomment the following line to set your own/nonscratch directory +#ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ + + +if [[ "${NERSC_HOST}" == "cori" ]]; then + CVal='haswell' + QVal='interactive' + ProcPerNode=32 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$CSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi +elif [[ "${NERSC_HOST}" == "perlmutter" ]]; then + srunConfig='-C cpu -q regular' + CVal='cpu' + QVal='interactive' + ProcPerNode=128 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$PSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi + +else + echo "This code is only supported on NERSC Cori and NERSC Perlmutter. Goodbye" + exit 1234 +fi + + + + +#Options for InitializeAltMTLs + +#Random seed. Change to any integer you want (or leave the same) +#If seed is different between two otherwise identical runs, the initial MTLs will also be different +#seed is also saved in output directory +#seed=14126579 +seed=3593589 +#Number of realizations to generate. Ideally a multiple of 64 for bitweights +#However, you can choose smaller numbers for debugging +ndir=128 + +#Uncomment second option if you want to clobber already existing files for Alt MTL generation +overwrite='' +#overwrite='--overwrite' + +#Observing conditions for generating MTLs (should be all caps "DARK" or "BRIGHT") +obscon='DARK' +#obscon='BRIGHT' + +#Survey to generate MTLs for (should be lowercase "sv3" or "main", sv2, sv1, and cmx are untested and will likely fail) +#survey='sv3' +survey='main' +# options are default None (empty strings). Uncommenting the second options will set them to the Y1 start and end dates. +startDate='' +#endDate='' +#startDate='2021-05-13T08:15:37+00:00' +endDate='2022-06-24T00:00:00+00:00' + +#For rundate formatting in simName, either manually modify the string below +#to be the desired date or comment that line out and uncomment the +#following line to autogenerate date strings. +#To NOT use any date string specification, use the third line, an empty string +#datestring='071322' +#datestring=`date +%y%m%d` +datestring='' + +#Can save time in MTL generation by first writing files to local tmp directory and then copying over later +#uncommenting the second option will directly write to your output directory +usetmp='' +#usetmp='--dontUseTemp' + +if [ -z $usetmp ] +then + outputMTLDirBaseBase=`mktemp -d /dev/shm/"$USER"_tempdirXXXX` +else + outputMTLDirBaseBase=$ALTMTLHOME +fi +printf -v outputMTLDirBase "$outputMTLDirBaseBase/$simName/" $datestring $ndir $survey +printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $survey + +#List of healpixels to create Alt MTLs for +#hpListFile="$path2LSS/MainSurveyHPList_mock.txt" +hpListFile="$path2LSS/MainSurveyHPList.txt" +#hpListFile="$path2LSS/DebugMainHPList.txt" +#hpListFile="$path2LSS/SV3HPList.txt" + +#These two options only are considered if the obscon is BRIGHT +#First option indicates whether to shuffle the top level priorities +#of BGS_FAINT/BGS_FAINT_HIP. Uncomment section option to turn off shuffling of bright time priorities +#Second option indicates what fraction/percent +#of BGS_FAINT to promote to BGS_FAINT_HIP. Default is 20%, same as SV3 + +#shuffleBrightPriorities='--shuffleBrightPriorities' +shuffleBrightPriorities='' + + +#shuffleELGPriorities='' +shuffleELGPriorities='--shuffleELGPriorities' + +#PromoteFracBGSFaint=0.2 +PromoteFracBGSFaint=0.0 +PromoteFracELG=0.1 +#PromoteFracELG=0.0 + +# location of original MTLs to shuffle. +# Default directory is a read only mount of the CFS filesystem +# You can only access that directory from compute nodes. +# Do NOT use the commented out directory (the normal mount of CFS) +# unless the read only mount is broken +exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ +#exampleLedgerBase=/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ +#exampleLedgerBase=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ +#exampleLedgerBase=$SCRATCH/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ +#Options for DateLoopAltMTL and runAltMTLParallel + +#Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). +#Default = Empty String/False. Uncomment second option if you want to restart from the first observations +#PLEASE DO NOT CHANGEME +echo "Fix QR resetting for new argparse usage" +qR='' +#qR='-qr' + +#Number of observation dates to loop through +#Defaults to 40 dates for SV3 +NObsDates=99999 + +# Whether to submit a new job with dateLoopAltMTL for each date +# or to submit a single job +# multiDate=0 +multiDate='--multiDate' +echo 'setting QVal here for debug. Fix later.' +#QVal='debug' +QVal='regular' +#QVal='interactive' +#Number of nodes to run on. This will launch up to 64*N jobs +#if that number of alternate universes have already been generated +#Calculated automatically from number of sims requested and number of processes per node. Be careful if setting manually +NNodes=$(( ($ndir + $ProcPerNode - 1 )/$ProcPerNode )) +#echo $NNodes +#getosubp: grab subpriorities from the original (exampleledgerbase) MTLs +#This should only be turned on for SV testing/debugging purposes +#This should not be required for main survey debugging. +getosubp='' +#getosubp='--getosubp' + +#shuffleSubpriorities(reproducing) must be left as empty strings to ensure +#subpriorities are shuffled. debug mode for main survey +#will only require these flags to be set by uncommenting second options + +dontShuffleSubpriorities='' +reproducing='' +#dontShuffleSubpriorities='--dontShuffleSubpriorities' +#reproducing='--reproducing' +#Include secondary targets? +secondary='' +#secondary='--secondary' + + +#If running from mocks, must set target directory. +#Otherwise this is optional +targfile='' #CHANGEME IF RUNNING ON MOCKS +#targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory +#targfile='--targfile=/cscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA1.fits' +#targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' + + +#Default is use numobs from ledger. Uncomment second option to set numobs NOT from ledger +numobs_from_ledger='' +#numobs_from_ledger='--NumObsNotFromLedger' + +#Uncomment second line to force redo fiber assignment if it has already been done. +redoFA='' +#redoFA='--redoFA' + + +#Options for MakeBitweightsParallel +#True/False(1/0) as to whether to split bitweight calculation +#among nodes by MPI between realizations +#splitByReal=1 + +#Split the calculation of bitweights into splitByChunk +#chunks of healpixels. +#splitByChunk=1 + +#Set to true (1) if you want to clobber already existing bitweight files +overwrite2='' +#overwrite2='--overwrite' +#Actual running of scripts + +#Copy this script to output directory for reproducbility +thisFileName=$outputMTLFinalDestination/$0 + +echo $thisFileName + +if [ -f "$thisFileName" ] +then + echo "File is found. Checking to see it is identical to the original." + cmp $0 $thisFileName + comp=$? + if [[ $comp -eq 1 ]] + then + echo "Files are not identical." + echo "If this is intended, please delete or edit the original copied script at $thisFileName" + echo "If this is unintended, you can reuse the original copied script at that same location" + echo "goodbye" + exit 3141 + elif [[ $comp -eq 0 ]] + then + echo "files are same, continuing" + else + echo "Something has gone very wrong. Exit code for cmp was $a" + exit $a + fi +else + echo "Copied script is not found. Copying now, making directories as needed." + mkdir -p $outputMTLFinalDestination + cp $SLURM_SUBMIT_DIR $0 $outputMTLFinalDestination/$0 +fi + +if [ -d "$outputMTLFinalDestination" ] +then + echo "output final directory exists" + echo $outputMTLFinalDestination +else + echo "output final directory does not exist. Creating and copying script there" + mkdir -p $outputMTLFinalDestination + cp $0 $outputMTLFinalDestination +fi + +if [ -z $getosubp ] +then + touch $outputMTLFinalDestination/GetOSubpTrue +fi + +printf -v OFIM "%s/Initialize%sAltMTLsParallelOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $date + +echo "srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclusive $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM" +srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclusive $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM + +cp -r $outputMTLFinalDestination/ "$ALTMTLHOME/BACKUPInitial_$simName/" +exit 1234 +if [ $? -ne 0 ]; then + exit 1234 + endInit=`date +%s.%N` + runtimeInit=$( echo "$endInit - $start" | bc -l ) + echo "runtime for initialization" + echo $runtimeInit +fi + +endInit=`date +%s.%N` +runtimeInit=$( echo "$endInit - $start" | bc -l ) +echo "runtime for initialization" +echo $runtimeInit + +printf -v OFDL "%s/dateLoop%sAltMTLOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring + +runtimeInit=$( echo "$endInit - $start" | bc -l ) +argstring="--altMTLBaseDir=$outputMTLFinalDestination --obscon=$obscon --survey=$survey --ProcPerNode=$ProcPerNode $numobs_from_ledger $redoFA $getosubp $debug $verbose $secondary $mock $targfile $multiDate $reproducing" +echo 'argstring for dateloop' +echo $argstring +nohup bash $path2LSS/dateLoopAltMTLBugFix.sh $NObsDates $NNodes $path2LSS $CVal $QVal $qR $argstring >& $OFDL + +endDL=`date +%s.%N` + +if [ $? -ne 0 ]; then + runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) + echo "runtime for Dateloop of $NObsDates days" + echo $runtimeDateLoop + exit 12345 +fi +runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +echo "runtime for Dateloop of $NObsDates days" +echo $runtimeDateLoop +exit 54321 + + + +printf -v OFBW "%s/MakeBitweights%sOutput%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring +srun --nodes=1 -C $CVal -q $QVal -A desi -t 04:00:00 --mem=120000 $path2LSS/MakeBitweights.py --survey=$survey --obscon=$obscon --ndir=$ndir --ProcPerNode=$ProcPerNode --HPListFile=$hpListFile --outdir=$outputMTLFinalDestination $overwrite2 $verbose $debug >& $OFBW + +endBW=`date +%s.%N` + + + +runtimeInit=$( echo "$endInit - $start" | bc -l ) +runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +runtimeBitweights=$( echo "$endBW - $endDL" | bc -l ) + +echo "runtime for initialization" +echo $runtimeInit +echo "runtime for Dateloop of $NObsDates days" +echo $runtimeDateLoop +echo "runtime for making bitweights" +echo $runtimeBitweights diff --git a/bin/dateLoopAltMTLBugFix.sh b/bin/dateLoopAltMTLBugFix.sh index c9f80822d..f83ed5196 100755 --- a/bin/dateLoopAltMTLBugFix.sh +++ b/bin/dateLoopAltMTLBugFix.sh @@ -32,18 +32,18 @@ echo "$argstring" if [ $QVal = 'interactive' ]; then - srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 03:00:00 --dependency=afterany:17342877 $path2LSS/runAltMTLParallel.py $argstring + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 03:00:00 --dependency=afterany:17881308 $path2LSS/runAltMTLParallel.py $argstring fi if [ $QVal = 'regular' ]; then - srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:17342877 $path2LSS/runAltMTLParallel.py $argstring + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:17881308 $path2LSS/runAltMTLParallel.py $argstring fi if [ $QVal = 'debug' ]; then - srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 00:15:00 --dependency=afterany:17342877 $path2LSS/runAltMTLParallel.py $argstring + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 00:15:00 --dependency=afterany:17881308 $path2LSS/runAltMTLParallel.py $argstring fi #retcode=$? #qR=0 #DO NOT CHANGE. This prevents further restarts after the first if qR is set to 1 at top. diff --git a/py/LSS/SV3/altmtltools.py b/py/LSS/SV3/altmtltools.py index 54df13fc0..3b38e7b4e 100644 --- a/py/LSS/SV3/altmtltools.py +++ b/py/LSS/SV3/altmtltools.py @@ -12,7 +12,7 @@ from memory_profiler import profile import desitarget -from desitarget import io, mtl +#from desitarget import io, mtl from desitarget.cuts import random_fraction_of_trues from desitarget.mtl import get_mtl_dir, get_mtl_tile_file_name,get_mtl_ledger_format from desitarget.mtl import get_zcat_dir, get_ztile_file_name, tiles_to_be_processed @@ -392,8 +392,8 @@ def makeAlternateZCat(zcat, real2AltMap, alt2RealMap, debug = False, verbose = F return altZCat def checkMTLChanged(MTLFile1, MTLFile2): - MTL1 = io.read_mtl_ledger(MTLFile1, unique = True) - MTL2 = io.read_mtl_ledger(MTLFile2, unique = True) + MTL1 = desitarget.io.read_mtl_ledger(MTLFile1, unique = True) + MTL2 = desitarget.io.read_mtl_ledger(MTLFile2, unique = True) NDiff = 0 NDiff2 = 0 NDiff3 = 0 @@ -911,7 +911,7 @@ def initializeAlternateMTLs(initMTL, outputMTL, nAlt = 2, genSubset = None, seed #JL - reset TARGET_STATES based on new target bits. This step isn't necessary for AMTL function but makes debugging using target states vastly easier. initialentries['TARGET_STATE'][ELGNewHIP & np.invert(QSOs)] = np.broadcast_to(np.array(['ELG_HIP|UNOBS']), np.sum(ELGNewHIP & np.invert(QSOs) ) ) - retval = io.write_mtl(outputMTLDir, initialentries, survey=survey, obscon=obscon, extra=meta, nsidefile=meta['FILENSID'], hpxlist = [meta['FILEHPX']]) + retval = desitarget.io.write_mtl(outputMTLDir, initialentries, survey=survey, obscon=obscon, extra=meta, nsidefile=meta['FILENSID'], hpxlist = [meta['FILEHPX']]) if debug or verbose: log.info('(nowrite = False) ntargs, fn = {0}'.format(retval)) log.info('wrote MTLs to {0}'.format(outputMTLDir)) @@ -1419,7 +1419,7 @@ def loop_alt_ledger(obscon, survey='sv3', zcatdir=None, mtldir=None, altmtldir = altmtlbasedir + '/Univ{0:03d}/'.format(n) altmtltilefn = os.path.join(altmtldir, get_mtl_tile_file_name(secondary=secondary)) - althpdirname = io.find_target_files(altmtldir, flavor="mtl", resolve=resolve, + althpdirname = desitarget.io.find_target_files(altmtldir, flavor="mtl", resolve=resolve, survey=survey, obscon=obscon, ender=form) altMTLTileTrackerFN = makeTileTrackerFN(altmtldir, survey = survey, obscon = obscon) @@ -1457,7 +1457,7 @@ def loop_alt_ledger(obscon, survey='sv3', zcatdir=None, mtldir=None, elif action['ACTIONTYPE'] == 'reproc': #returns timedict - + #raise NotImplementedError('make backup here before reprocessing. Then resume Debugging.') retval = reprocess_alt_ledger(altmtldir, action, obscon=obscon, survey = survey) if debug or verbose: log.info(f'retval = {retval}') @@ -1477,7 +1477,7 @@ def loop_alt_ledger(obscon, survey='sv3', zcatdir=None, mtldir=None, #log.info('----------') - return althpdirname, altmtltilefn, ztilefn, tiles + return althpdirname, altmtltilefn, altMTLTileTrackerFN, actionList def plotMTLProb(mtlBaseDir, ndirs = 10, hplist = None, obscon = 'dark', survey = 'sv3', outFileName = None, outFileType = '.png', jupyter = False, debug = False, verbose = False): """Plots probability that targets were observed among {ndirs} alternate realizations @@ -1911,9 +1911,9 @@ def reprocess_alt_ledger(altmtldir, action, obscon="dark", survey = 'main', zcat # ADM find the general format for the ledger files in `hpdirname`. # ADM also returning the obsconditions. - fileform, oc = io.find_mtl_file_format_from_header(hpdirname, returnoc=True) + fileform, oc = desitarget.io.find_mtl_file_format_from_header(hpdirname, returnoc=True) # ADM also find the format for any associated override ledgers. - overrideff = io.find_mtl_file_format_from_header(hpdirname, + overrideff = desitarget.io.find_mtl_file_format_from_header(hpdirname, forceoverride=True) # ADM check the obscondition is as expected. @@ -1939,7 +1939,7 @@ def reprocess_alt_ledger(altmtldir, action, obscon="dark", survey = 'main', zcat theta, phi = np.radians(90-altZCat["DEC"]), np.radians(altZCat["RA"]) pixnum = hp.ang2pix(nside, theta, phi, nest=True) pixnum = list(set(pixnum)) - targets = io.read_mtl_in_hp(hpdirname, nside, pixnum, unique=False) + targets = desitarget.io.read_mtl_in_hp(hpdirname, nside, pixnum, unique=False) # ADM remove OVERRIDE entries, which should never need reprocessing. targets, _ = desitarget.mtl.remove_overrides(targets) @@ -2037,7 +2037,7 @@ def reprocess_alt_ledger(altmtldir, action, obscon="dark", survey = 'main', zcat raise RuntimeError(msg) # ADM update NUMOBS in the altZCat using previous MTL totals. - mii, zii = desitarget.mtl.match(mtl["TARGETID"], altZCatmini["TARGETID"]) + mii, zii = desitarget.geomask.match(mtl["TARGETID"], altZCatmini["TARGETID"]) altZCatmini["NUMOBS"][zii] = mtl["NUMOBS"][mii] + 1 # ADM restrict to just objects in the altZCat that match an UNOBS @@ -2057,16 +2057,20 @@ def reprocess_alt_ledger(altmtldir, action, obscon="dark", survey = 'main', zcat zmtl = desitarget.mtl.make_mtl(mtl, oc, zcat=altZCatmini, trimtozcat=True, trimcols=True) # ADM match back to overall merged target list to update states. - mii, zii = desitarget.mtl.match(mtl["TARGETID"], zmtl["TARGETID"]) + mii, zii = desitarget.geomask.match(mtl["TARGETID"], zmtl["TARGETID"]) # ADM update the overall merged target list. for col in mtl.dtype.names: + if col.upper() == 'RA': + continue + elif col.upper() == 'DEC': + continue mtl[col][mii] = zmtl[col][zii] # ADM also update the TIMESTAMP for changes on this tile. mtl["TIMESTAMP"][mii] = timestamp # ADM trimtozcat=True discards BAD observations. Retain these. tidmiss = list(set(altZCatmini["TARGETID"]) - set(zmtl["TARGETID"])) - tii = desitarget.mtl.match_to(altZCatmini["TARGETID"], tidmiss) + tii = desitarget.geomask.match_to(altZCatmini["TARGETID"], tidmiss) zbadmiss = altZCatmini[tii] # ADM check all of the missing observations are, indeed, bad. if np.any(zbadmiss["ZWARN"] & zwarn_mask.mask(Mxbad) == 0): @@ -2078,10 +2082,14 @@ def reprocess_alt_ledger(altmtldir, action, obscon="dark", survey = 'main', zcat .format(len(zbadmiss), time()-t0)) # ADM update redshift information in MTL for bad observations. - mii, zii = desitarget.mtl.match(mtl["TARGETID"], zbadmiss["TARGETID"]) + mii, zii = desitarget.geomask.match(mtl["TARGETID"], zbadmiss["TARGETID"]) # ADM update the overall merged target list. # ADM Never update NUMOBS or NUMOBS_MORE using bad observations. - for col in set(zbadmiss.dtype.names) - set(["NUMOBS", "NUMOBS_MORE"]): + for col in set(zbadmiss.dtype.names) - set(["NUMOBS", "NUMOBS_MORE", "RA", "DEC"]): + if col.upper() == 'RA': + continue + elif col.upper() == 'DEC': + continue mtl[col][mii] = zbadmiss[col][zii] # ADM also update the TIMESTAMP for changes on this tile. mtl["TIMESTAMP"][mii] = timestamp From 999efb29d5012491e84e8db3da5dccd55ca76df6 Mon Sep 17 00:00:00 2001 From: jalasker Date: Mon, 20 Nov 2023 14:01:12 -0800 Subject: [PATCH 008/297] Revert "added untracked files to enable git pull." This reverts commit e37d33ea6b6c660655467cd64873664ba117636a. --- bin/#LOCAL_SurveyAltMTLScript.sh# | 282 -------- bin/Y1DataReproductionScript.sh | 316 -------- scripts/mock_tools/Bad_fiber_lists.py | 163 ----- .../LRG+BGS_bad_fiber_list3sigfid.py | 176 ----- scripts/mock_tools/comp_zstats_specrels.py | 272 ------- .../compare_snapshot_dir_with_live.py | 22 - scripts/mock_tools/getLRGmask.py | 186 ----- scripts/mock_tools/getLRGmask_tar.py | 161 ----- scripts/mock_tools/get_speccon.py | 211 ------ scripts/mock_tools/getmask_type.py | 195 ----- scripts/mock_tools/lss_cat_match_dr16.py | 102 --- scripts/mock_tools/mkBGS_flavors.py | 127 ---- scripts/mock_tools/mkBGS_flavors_kEE.py | 125 ---- scripts/mock_tools/mkCat_tar4ang.py | 111 --- scripts/mock_tools/mkemlin.py | 100 --- scripts/mock_tools/mknzplots.py | 66 -- scripts/mock_tools/perfiber_success_stats.py | 175 ----- scripts/mock_tools/pkrun.py | 278 -------- scripts/mock_tools/qso_cat_match_dr16q.py | 113 --- scripts/mock_tools/readwrite_pixel_bitmask.py | 145 ---- scripts/mock_tools/recon.py | 193 ----- scripts/mock_tools/summary_numbers.py | 51 -- scripts/mock_tools/xiruncz.py | 193 ----- scripts/mock_tools/xirunpc.py | 672 ------------------ scripts/xirunpc.py | 2 - 25 files changed, 4437 deletions(-) delete mode 100755 bin/#LOCAL_SurveyAltMTLScript.sh# delete mode 100755 bin/Y1DataReproductionScript.sh delete mode 100644 scripts/mock_tools/Bad_fiber_lists.py delete mode 100644 scripts/mock_tools/LRG+BGS_bad_fiber_list3sigfid.py delete mode 100644 scripts/mock_tools/comp_zstats_specrels.py delete mode 100644 scripts/mock_tools/compare_snapshot_dir_with_live.py delete mode 100644 scripts/mock_tools/getLRGmask.py delete mode 100644 scripts/mock_tools/getLRGmask_tar.py delete mode 100644 scripts/mock_tools/get_speccon.py delete mode 100644 scripts/mock_tools/getmask_type.py delete mode 100755 scripts/mock_tools/lss_cat_match_dr16.py delete mode 100644 scripts/mock_tools/mkBGS_flavors.py delete mode 100644 scripts/mock_tools/mkBGS_flavors_kEE.py delete mode 100644 scripts/mock_tools/mkCat_tar4ang.py delete mode 100644 scripts/mock_tools/mkemlin.py delete mode 100644 scripts/mock_tools/mknzplots.py delete mode 100644 scripts/mock_tools/perfiber_success_stats.py delete mode 100644 scripts/mock_tools/pkrun.py delete mode 100644 scripts/mock_tools/qso_cat_match_dr16q.py delete mode 100644 scripts/mock_tools/readwrite_pixel_bitmask.py delete mode 100644 scripts/mock_tools/recon.py delete mode 100644 scripts/mock_tools/summary_numbers.py delete mode 100644 scripts/mock_tools/xiruncz.py delete mode 100644 scripts/mock_tools/xirunpc.py diff --git a/bin/#LOCAL_SurveyAltMTLScript.sh# b/bin/#LOCAL_SurveyAltMTLScript.sh# deleted file mode 100755 index 09c21e677..000000000 --- a/bin/#LOCAL_SurveyAltMTLScript.sh# +++ /dev/null @@ -1,282 +0,0 @@ -#!/bin/bash -start=`date +%s.%N` -#All Boolean True/False parameters are 0 for False or 1 for True -#So python interprets them correctly - -#Location where you have cloned the LSS Repo -path2LSS=~/.local/desicode/LSS/bin/ - -#Flags for debug/verbose mode/profiling code time usage -debug=1 -verbose=1 -profile=0 - -#Observing conditions for generating MTLs (should be all caps "DARK" or "BRIGHT") -obscon='DARK' - -#Survey to generate MTLs for (should be lowercase "sv3" or "main", sv2, sv1, and cmx are untested and will likely fail) -#survey='sv3' -survey='main' - -#List of healpixels to create Alt MTLs for -#hpListFile="$path2LSS/SV3HPList.txt" -hpListFile="$path2LSS/MainSurveyHPList.txt" - -#simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written -simName=AltMTLReproDebug_"$survey" - -#Number of realizations to generate. Ideally a multiple of 64 -#However, you can choose smaller numbers for debugging - - - ndir=2 -#Number of observation dates to loop through -#Defaults to 40 dates for SV3 -NObsDates=40 -#Number of nodes to run on. This will launch up to 64*N jobs -#if that number of alternate universes have already been generated -#Defaults to 4 for 128 directories -NNodes=1 - -#ALTMTLHOME is a home directory for all of your alternate MTLs. Default is your scratch directory -#There will be an environment variable $ALTMTLHOME for the "survey alt MTLs" -#However, you should specify your own directory to a. not overwrite the survey alt MTLs -# and b. keep your alt MTLs somewhere that you have control/access - -#Uncomment the following line to set your own/nonscratch directory -#ALTMTLHOME=/path/to/your/directory/ - - -if [[ "${NERSC_HOST}" == "cori" ]]; then - CVal='haswell' - QVal='interactive' - ProcPerNode=32 - if [[ -z "${ALTMTLHOME}" ]]; then - ALTMTLHOME=$CSCRATCH - else - echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" - fi -elif [[ "${NERSC_HOST}" == "perlmutter" ]]; then - #srunConfig='-C cpu -q regular' - CVal='cpu' - QVal='interactive' - ProcPerNode=128 - if [[ -z "${ALTMTLHOME}" ]]; then - ALTMTLHOME=$PSCRATCH - else - echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" - fi - -else - echo "This code is only supported on NERSC Cori and NERSC Perlmutter. Goodbye" - exit 1234 -fi - - - - -#Options for InitializeAltMTLs - -#Random seed. Change to any integer you want (or leave the same) -#If seed is different between two otherwise identical runs, the initial MTLs will also be different -#seed is also saved in output directory -seed=31415 - - - -#Set to true(1) if you want to clobber already existing files for Alt MTL generation -overwrite=0 - - - -#For rundate formatting in simName, either manually modify the string below -#to be the desired date or comment that line out and uncomment the -#following line to autogenerate date strings. -#To NOT use any date string specification, use the third line, an empty string -#datestring='071322' -datestring=`date +%y%m%d` -#datestring='' - -#Can save time in MTL generation by first writing files to local tmp directory and then copying over later -#usetmp=True will use the local tmp directory and usetmp=False will directly write to your output directory -usetmp=True - -if [ $usetmp ] -then - outputMTLDirBaseBase=`mktemp -d /dev/shm/"$USER"_tempdirXXXX` -else - outputMTLDirBaseBase=$ALTMTLHOME -fi -printf -v outputMTLDirBase "$outputMTLDirBaseBase/$simName/" $datestring $ndir $survey -printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $survey - - - -#These two options only are considered if the obscon is BRIGHT -#First option indicates whether to shuffle the top level priorities -#of BGS_FAINT/BGS_FAINT_HIP. Second option indicates what fraction/percent -#of BGS_FAINT to promote to BGS_FAINT_HIP. Default is 20%, same as SV3 -shuffleBrightPriorities=0 -PromoteFracBGSFaint=0.2 - -# location of original MTLs to shuffle. -# Default directory is a read only mount of the CFS filesystem -# You can only access that directory from compute nodes. -# Do NOT use the commented out directory (the normal mount of CFS) -# unless the read only mount is broken -#exampleledgerbase=/dvs_ro/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ -exampleledgerbase=/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ - -#Options for DateLoopAltMTL and runAltMTLParallel - -#Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). -#Default = 0/False. Set equal to 1 if you want to restart from the first observations -qR=0 - - -#getosubp: grab subpriorities from the original (exampleledgerbase) MTLs -#This should only be turned on for SV testing/debugging purposes -#This should not be required for main survey debugging. -getosubp=0 - -#shuffleSubpriorities(reproducing) must be set to 1(0) to ensure -#subpriorities are shuffled. debug mode for main survey -#will only require these flags to be set to 0(1) and not the getosubp flag -shuffleSubpriorities=0 -reproducing=1 - -#Include secondary targets? -secondary=0 - -numobs_from_ledger=1 - -#Force redo fiber assignment if it has already been done. -redoFA=0 - - -#Options for MakeBitweightsParallel -#True/False(1/0) as to whether to split bitweight calculation -#among nodes by MPI between realizations -splitByReal=0 - -#Split the calculation of bitweights into splitByChunk -#chunks of healpixels. -splitByChunk=100 - -#Set to true if you want to clobber already existing bitweight files -overwrite2=1 - -#Actual running of scripts - -#Copy this script to output directory for reproducbility -thisFileName=$outputMTLDirBase/$0 - -echo $thisFileName - -if [ -f "$thisFileName" ] -then - echo "File is found. Checking to see it is identical to the original." - cmp $0 $thisFileName - comp=$? - if [[ $comp -eq 1 ]] - then - echo "Files are not identical." - echo "If this is intended, please delete or edit the original copied script at $thisFileName" - echo "If this is unintended, you can reuse the original copied script at that same location" - echo "goodbye" - exit 3141 - elif [[ $comp -eq 0 ]] - then - echo "files are same, continuing" - else - echo "Something has gone very wrong. Exit code for cmp was $a" - exit $a - fi -else - echo "Copied script is not found. Copying now, making directories as needed." - mkdir -p $outputMTLDirBase -fi - -if [ -d "$outputMTLFinalDestination" ] -then - echo "output final directory exists" -else - echo "output final directory does not exist. Creating and copying script there" - mkdir -p $outputMTLFinalDestination - cp $0 $outputMTLFinalDestination -fi - -if [ $getosubp -gt 0 ] -then - touch $outputMTLFinalDestination/GetOSubpTrue -fi - -echo 'moving on to python scripts (REMOVE BEFORE PUSHING)' -printf -v OFIM "%s/Initialize%sAltMTLsParallelOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $date - -srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=120000 $path2LSS/InitializeAltMTLsParallel.py $seed $ndir $overwrite $obscon $survey $outputMTLDirBase $hpListFile $shuffleBrightPriorities $PromoteFracBGSFaint $exampleledgerbase $NNodes $usetmp "$outputMTLFinalDestination/Univ{0:03d}" $shuffleSubpriorities $reproducing $debug $verbose $ProcPerNode >& $OFIM -if [ $? -ne 0 ]; then - exit 1234 - endInit=`date +%s.%N` - runtimeInit=$( echo "$endInit - $start" | bc -l ) - echo "runtime for initialization" - echo $runtimeInit -fi - -endInit=`date +%s.%N` -runtimeInit=$( echo "$endInit - $start" | bc -l ) -echo "runtime for initialization" -echo $runtimeInit - -printf -v OFDL "%s/dateLoop%sAltMTLOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring - -runtimeInit=$( echo "$endInit - $start" | bc -l ) - -nohup bash $path2LSS/dateLoopAltMTL.sh $qR $NObsDates $NNodes $outputMTLFinalDestination $secondary $obscon $survey $numobs_from_ledger $redoFA $getosubp $path2LSS $CVal $QVal $debug $verbose $ProcPerNode >& $OFDL - -endDL=`date +%s.%N` - -if [ $? -ne 0 ]; then - runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) - echo "runtime for Dateloop of $NObsDates days" - echo $runtimeDateLoop - exit 12345 -fi - -runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) -echo "runtime for Dateloop of $NObsDates days" -echo $runtimeDateLoop - -if [ $splitByReal -ne 0 ]; then - printf -v OFBW "%s/MakeBitweights%sOutputCase1%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring - ##echo "skipping bitweights case 1" - srun --nodes=1 -C $CVal -q $QVal -A desi -t 04:00:00 --mem=120000 $path2LSS/MakeBitweights.py $survey $obscon $ndir $splitByReal $splitByChunk $hpListFile $outputMTLFinalDestination $overwrite2 >& $OFBW -else - printf -v OFBW "%s/MakeBitweights%sOutputCase2%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring - ##echo "skipping bitweights case 2" - srun --nodes=1 -C $CVal -q $QVal -A desi -t 04:00:00 --mem=120000 $path2LSS/MakeBitweights.py $survey $obscon $ndir $splitByReal $splitByChunk $hpListFile $outputMTLFinalDestination $overwrite2 >& $OFBW -fi - -endBW=`date +%s.%N` - - - -runtimeInit=$( echo "$endInit - $start" | bc -l ) -runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) -runtimeBitweights=$( echo "$endBW - $endDL" | bc -l ) - -echo "runtime for initialization" -echo $runtimeInit -echo "runtime for Dateloop of $NObsDates days" -echo $runtimeDateLoop -echo "runtime for making bitweights" -echo $runtimeBitweights - - - -echo "runtime for initialization \\n\\ - $runtimeInit \\n\\ - runtime for Dateloop of $NObsDates days \\n\\ - $runtimeDateLoop \\n\\ - runtime for making bitweights \\n\\ - $runtimeBitweights" > $outputMTLFinalDestination/TimingSummary_$datestring.txt \ No newline at end of file diff --git a/bin/Y1DataReproductionScript.sh b/bin/Y1DataReproductionScript.sh deleted file mode 100755 index 13360fdf7..000000000 --- a/bin/Y1DataReproductionScript.sh +++ /dev/null @@ -1,316 +0,0 @@ -#!/bin/bash -start=`date +%s.%N` - -#simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written -simName=Y1Reproduction -#Location where you have cloned the LSS Repo -path2LSS=~/.local/desicode/LSS/bin/ - -# Flags for debug/verbose mode/profiling code time usage. -# Uncomment second set of options to turn on the modes -debug='' -verbose='' -profile='' -#debug='--debug' -#verbose='--verbose' -#profile='--profile' - -#Uncomment second option if running on mocks -mock='' -#mock='--mock' - -#ALTMTLHOME is a home directory for all of your alternate MTLs. Default is your scratch directory -#There will be an environment variable $ALTMTLHOME for the "survey alt MTLs" -#However, you should specify your own directory to a. not overwrite the survey alt MTLs -# and b. keep your alt MTLs somewhere that you have control/access - -#Uncomment the following line to set your own/nonscratch directory -#ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ - - -if [[ "${NERSC_HOST}" == "cori" ]]; then - CVal='haswell' - QVal='interactive' - ProcPerNode=32 - if [[ -z "${ALTMTLHOME}" ]]; then - ALTMTLHOME=$CSCRATCH - else - echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" - fi -elif [[ "${NERSC_HOST}" == "perlmutter" ]]; then - srunConfig='-C cpu -q regular' - CVal='cpu' - QVal='interactive' - ProcPerNode=128 - if [[ -z "${ALTMTLHOME}" ]]; then - ALTMTLHOME=$PSCRATCH - else - echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" - fi - -else - echo "This code is only supported on NERSC Cori and NERSC Perlmutter. Goodbye" - exit 1234 -fi - - - - -#Options for InitializeAltMTLs - -#Random seed. Change to any integer you want (or leave the same) -#If seed is different between two otherwise identical runs, the initial MTLs will also be different -#seed is also saved in output directory -seed=8935781 - -#Number of realizations to generate. Ideally a multiple of 64 for bitweights -#However, you can choose smaller numbers for debugging -ndir=2 - -#Uncomment second option if you want to clobber already existing files for Alt MTL generation -overwrite='' -#overwrite='--overwrite' - -#Observing conditions for generating MTLs (should be all caps "DARK" or "BRIGHT") -obscon='DARK' -#obscon='BRIGHT' - -#Survey to generate MTLs for (should be lowercase "sv3" or "main", sv2, sv1, and cmx are untested and will likely fail) -#survey='sv3' -survey='main' -# options are default None (empty strings). Uncommenting the second options will set them to the Y1 start and end dates. -#startDate='' -#endDate='' -startDate='' -endDate=20220624 - -#For rundate formatting in simName, either manually modify the string below -#to be the desired date or comment that line out and uncomment the -#following line to autogenerate date strings. -#To NOT use any date string specification, use the third line, an empty string -#datestring='071322' -#datestring=`date +%y%m%d` -datestring='' - -#Can save time in MTL generation by first writing files to local tmp directory and then copying over later -#uncommenting the second option will directly write to your output directory -usetmp='' -#usetmp='--dontUseTemp' - -if [ -z $usetmp ] -then - outputMTLDirBaseBase=`mktemp -d /dev/shm/"$USER"_tempdirXXXX` -else - outputMTLDirBaseBase=$ALTMTLHOME -fi -printf -v outputMTLDirBase "$outputMTLDirBaseBase/$simName/" $datestring $ndir $survey -printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $survey - -#List of healpixels to create Alt MTLs for -#hpListFile="$path2LSS/MainSurveyHPList_mock.txt" -hpListFile="$path2LSS/MainSurveyHPList.txt" -#hpListFile="$path2LSS/DebugMainHPList.txt" -#hpListFile="$path2LSS/SV3HPList.txt" - -#These two options only are considered if the obscon is BRIGHT -#First option indicates whether to shuffle the top level priorities -#of BGS_FAINT/BGS_FAINT_HIP. Uncomment section option to turn off shuffling of bright time priorities -#Second option indicates what fraction/percent -#of BGS_FAINT to promote to BGS_FAINT_HIP. Default is 20%, same as SV3 - -#shuffleBrightPriorities='--shuffleBrightPriorities' -shuffleBrightPriorities='' - - -shuffleELGPriorities='' -#shuffleELGPriorities='--shuffleELGPriorities' - -#PromoteFracBGSFaint=0.2 -PromoteFracBGSFaint=0.2 -#PromoteFracELG=0.1 -PromoteFracELG=0.1 - -# location of original MTLs to shuffle. -# Default directory is a read only mount of the CFS filesystem -# You can only access that directory from compute nodes. -# Do NOT use the commented out directory (the normal mount of CFS) -# unless the read only mount is broken -exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ -#exampleLedgerBase=/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ -#exampleLedgerBase=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ -#exampleLedgerBase=$SCRATCH/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ -#Options for DateLoopAltMTL and runAltMTLParallel - -#Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). -#Default = Empty String/False. Uncomment second option if you want to restart from the first observations -#PLEASE DO NOT CHANGEME -echo "Fix QR resetting for new argparse usage" -qR='' -#qR='-qr' - -#Number of observation dates to loop through -#Defaults to 40 dates for SV3 -NObsDates=500 - -# Whether to submit a new job with dateLoopAltMTL for each date -# or to submit a single job -# multiDate=0 -#multiDate='--multiDate' -#echo 'setting QVal here for debug. Fix later.' -#QVal='debug' -#QVal='regular' -#Number of nodes to run on. This will launch up to 64*N jobs -#if that number of alternate universes have already been generated -#Calculated automatically from number of sims requested and number of processes per node. Be careful if setting manually -NNodes=$(( ($ndir + $ProcPerNode - 1 )/$ProcPerNode )) -#echo $NNodes -#getosubp: grab subpriorities from the original (exampleledgerbase) MTLs -#This should only be turned on for SV testing/debugging purposes -#This should not be required for main survey debugging. -getosubp='' -#getosubp='--getosubp' - -#shuffleSubpriorities(reproducing) must be left as empty strings to ensure -#subpriorities are shuffled. debug mode for main survey -#will only require these flags to be set by uncommenting second options - -#dontShuffleSubpriorities='' -#reproducing='' -dontShuffleSubpriorities='--dontShuffleSubpriorities' -reproducing='--reproducing' -#Include secondary targets? -secondary='' -#secondary='--secondary' - - -#If running from mocks, must set target directory. -#Otherwise this is optional -targfile='' #CHANGEME IF RUNNING ON MOCKS -#targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory -#targfile='--targfile=/cscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA1.fits' -#targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' - - -#Default is use numobs from ledger. Uncomment second option to set numobs NOT from ledger -numobs_from_ledger='' -#numobs_from_ledger='--NumObsNotFromLedger' - -#Uncomment second line to force redo fiber assignment if it has already been done. -redoFA='' -#redoFA='--redoFA' - - -#Options for MakeBitweightsParallel -#True/False(1/0) as to whether to split bitweight calculation -#among nodes by MPI between realizations -#splitByReal=1 - -#Split the calculation of bitweights into splitByChunk -#chunks of healpixels. -#splitByChunk=1 - -#Set to true (1) if you want to clobber already existing bitweight files -overwrite2='' -#overwrite2='--overwrite' -#Actual running of scripts - -#Copy this script to output directory for reproducbility -thisFileName=$outputMTLFinalDestination/$0 - -echo $thisFileName - -if [ -f "$thisFileName" ] -then - echo "File is found. Checking to see it is identical to the original." - cmp $0 $thisFileName - comp=$? - if [[ $comp -eq 1 ]] - then - echo "Files are not identical." - echo "If this is intended, please delete or edit the original copied script at $thisFileName" - echo "If this is unintended, you can reuse the original copied script at that same location" - echo "goodbye" - exit 3141 - elif [[ $comp -eq 0 ]] - then - echo "files are same, continuing" - else - echo "Something has gone very wrong. Exit code for cmp was $a" - exit $a - fi -else - echo "Copied script is not found. Copying now, making directories as needed." - mkdir -p $outputMTLFinalDestination - cp $SLURM_SUBMIT_DIR $0 $outputMTLFinalDestination/$0 -fi - -if [ -d "$outputMTLFinalDestination" ] -then - echo "output final directory exists" - echo $outputMTLFinalDestination -else - echo "output final directory does not exist. Creating and copying script there" - mkdir -p $outputMTLFinalDestination - cp $0 $outputMTLFinalDestination -fi - -if [ -z $getosubp ] -then - touch $outputMTLFinalDestination/GetOSubpTrue -fi - -printf -v OFIM "%s/Initialize%sAltMTLsParallelOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $date - -echo "srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclusive $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM" -srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclusive $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM -if [ $? -ne 0 ]; then - exit 1234 - endInit=`date +%s.%N` - runtimeInit=$( echo "$endInit - $start" | bc -l ) - echo "runtime for initialization" - echo $runtimeInit -fi - -endInit=`date +%s.%N` -runtimeInit=$( echo "$endInit - $start" | bc -l ) -echo "runtime for initialization" -echo $runtimeInit - -printf -v OFDL "%s/dateLoop%sAltMTLOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring - -runtimeInit=$( echo "$endInit - $start" | bc -l ) -argstring="--altMTLBaseDir=$outputMTLFinalDestination --obscon=$obscon --survey=$survey --ProcPerNode=$ProcPerNode $numobs_from_ledger $redoFA $getosubp $debug $verbose $secondary $mock $targfile $multiDate" -nohup bash $path2LSS/dateLoopAltMTL.sh $NObsDates $NNodes $path2LSS $CVal $QVal $qR $argstring >& $OFDL - -endDL=`date +%s.%N` - -if [ $? -ne 0 ]; then - runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) - echo "runtime for Dateloop of $NObsDates days" - echo $runtimeDateLoop - exit 12345 -fi -runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) -echo "runtime for Dateloop of $NObsDates days" -echo $runtimeDateLoop -exit 54321 - - - -printf -v OFBW "%s/MakeBitweights%sOutput%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring -srun --nodes=1 -C $CVal -q $QVal -A desi -t 04:00:00 --mem=120000 $path2LSS/MakeBitweights.py --survey=$survey --obscon=$obscon --ndir=$ndir --ProcPerNode=$ProcPerNode --HPListFile=$hpListFile --outdir=$outputMTLFinalDestination $overwrite2 $verbose $debug >& $OFBW - -endBW=`date +%s.%N` - - - -runtimeInit=$( echo "$endInit - $start" | bc -l ) -runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) -runtimeBitweights=$( echo "$endBW - $endDL" | bc -l ) - -echo "runtime for initialization" -echo $runtimeInit -echo "runtime for Dateloop of $NObsDates days" -echo $runtimeDateLoop -echo "runtime for making bitweights" -echo $runtimeBitweights diff --git a/scripts/mock_tools/Bad_fiber_lists.py b/scripts/mock_tools/Bad_fiber_lists.py deleted file mode 100644 index e3f9a9606..000000000 --- a/scripts/mock_tools/Bad_fiber_lists.py +++ /dev/null @@ -1,163 +0,0 @@ -import numpy as np -from scipy import stats -from scipy.stats import norm -import fitsio -import glob -import os -import matplotlib.pyplot as plt -import statistics -import argparse -import astropy -from astropy.table import Table,join -from astropy.time import Time -from astropy.io import fits - -import LSS.common_tools as common - -parser = argparse.ArgumentParser() -#parser.add_argument("--type", help="tracer type to be selected") -basedir='/global/cfs/cdirs/desi/survey/catalogs' -parser.add_argument("--basedir", help="base directory for input/output",default=basedir) -#parser.add_argument("--version", help="catalog version; use 'test' unless you know what you are doing!",default='test') -parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='DA02') -parser.add_argument("--verspec",help="version for redshifts",default='guadalupe') - -args = parser.parse_args() -basedir = args.basedir -#version = args.version -survey = args.survey -specver = args.verspec - - - -f=basedir+'/'+survey+'/LSS/'+specver+'/ELG_zsuccess.txt' -f1=basedir+'/'+survey+'/LSS/'+specver+'/QSO_zsuccess.txt' -f2=basedir+'/'+survey+'/LSS/'+specver+'/LRG_zsuccess.txt' -f3=basedir+'/'+survey+'/LSS/'+specver+'/BGS_ANY_zsuccess.txt' - - - -ELG=Table() -ELG['FIBER']=np.loadtxt(f)[:,0] -ELG['frac_suc']=np.loadtxt(f)[:,1] -ELG['n_suc']=np.loadtxt(f)[:,2] -ELG['n_tot']=np.loadtxt(f)[:,3] - -QSO=Table() -QSO['FIBER']=np.loadtxt(f1)[:,0] -QSO['frac_suc']=np.loadtxt(f1)[:,1] -QSO['n_suc']=np.loadtxt(f1)[:,2] -QSO['n_tot']=np.loadtxt(f1)[:,3] - -LRG=Table() -LRG['FIBER']=np.loadtxt(f2)[:,0] -LRG['frac_suc']=np.loadtxt(f2)[:,1] -LRG['n_suc']=np.loadtxt(f2)[:,2] -LRG['n_tot']=np.loadtxt(f2)[:,3] - - -BGS=Table() -BGS['FIBER']=np.loadtxt(f3)[:,0] -BGS['frac_suc']=np.loadtxt(f3)[:,1] -BGS['n_suc']=np.loadtxt(f3)[:,2] -BGS['n_tot']=np.loadtxt(f3)[:,3] - - -def fse(fiberstats): - #masknosuc= fiberstats['n_suc']>0 - #print(fiberstats[~masknosuc]) - mask1ntots = fiberstats['n_tot']>1 - fiberstats = fiberstats[mask1ntots] - fiberstats['frac_suc'] = fiberstats['n_suc']/fiberstats['n_tot'] - mean = np.sum(fiberstats['n_suc'])/np.sum(fiberstats['n_tot']) - - - error_floor = True - - n, p = fiberstats['n_tot'].copy(), fiberstats['frac_suc'].copy() - if error_floor: - p1 = np.maximum(1-p, 1/n) # error floor - else: - p1 = p - fiberstats['frac_suc_err'] = np.clip(np.sqrt(n * p * (1-p))/n, np.sqrt(n * p1 * (1-p1))/n, 1) - - fiberstats['check'] =(mean - fiberstats['frac_suc'])/fiberstats['frac_suc_err'] - fiberstats.sort('frac_suc') - - - - - from scipy.stats import binom - - bad_stats=Table() - bad_stats["FIBER"]=fiberstats["FIBER"] - bad_stats["n_tot"],bad_stats["n_suc"]=fiberstats['n_tot'], fiberstats['n_suc'] - bad_stats['n_fail'] = bad_stats['n_tot']-bad_stats['n_suc'] - bad_stats['frac_suc']= bad_stats['n_suc']/bad_stats['n_tot'] - bad_stats['frac_suc_err']=fiberstats['frac_suc_err'] - bad_stats['more_fail_p']=np.zeros(len(bad_stats)) - bad_stats['check']=fiberstats["check"] - for fiber in fiberstats['FIBER']: - n = fiberstats['n_tot'][fiberstats['FIBER']==fiber] - s = fiberstats['n_suc'][fiberstats['FIBER']==fiber] - p = mean - bad_stats["more_fail_p"][fiberstats['FIBER']==fiber]= binom.cdf(s-1, n, p) - - - nsigma=float(input("Enter the req sigma value\n")) - #mcheck=fiberstats['check']>3 - mfail=bad_stats['more_fail_p']1 - fstats_comb=fstats_comb[mask0] - fstats_comb['frac_suc']=fstats_comb['n_suc']/fstats_comb['n_tot'] - return(fstats_comb) - -LRGBGS=combine(LRG,BGS) - - - - -choice=1 -while(choice==1): - print("\nEnter number\n1 ELG\n2 LRG\n3 QSO\n4 BGS\n5 LRGBGS\n") - t_t=int(input()) - if(t_t==1): - bad_t,sig_t =fse(ELG) - name="ELG" - elif(t_t==2): - bad_t,sig_t =fse(LRG) - name="LRG" - elif(t_t==3): - bad_t,sig_t =fse(QSO) - name="QSO" - elif(t_t==4): - bad_t,sig_t =fse(BGS) - name="BGS" - elif(t_t==5): - bad_t,sig_t =fse(LRGBGS) - name="LRGBGS" - - fn = "/global/homes/s/sidpen90/desicode/LSS/badfibfail/"+name+"_bad_fibers"+str(sig_t)+"_sigma.txt" - np.savetxt(fn,bad_t['FIBER'],fmt='%i') - print('saved results to '+fn) - choice=int(input("\nPress 1 to run again or any other key to exit\n")) diff --git a/scripts/mock_tools/LRG+BGS_bad_fiber_list3sigfid.py b/scripts/mock_tools/LRG+BGS_bad_fiber_list3sigfid.py deleted file mode 100644 index 67e1d335f..000000000 --- a/scripts/mock_tools/LRG+BGS_bad_fiber_list3sigfid.py +++ /dev/null @@ -1,176 +0,0 @@ -import numpy as np -#!pip install astropy -#!pip install fitsio -from scipy import stats -from scipy.stats import norm -import fitsio -import glob -import os -import matplotlib.pyplot as plt -import statistics -import argparse -import astropy -from astropy.table import Table,join -from astropy.time import Time -from astropy.io import fits - -import LSS.common_tools as common - -parser = argparse.ArgumentParser() -#parser.add_argument("--type", help="tracer type to be selected") -basedir='/global/cfs/cdirs/desi/survey/catalogs' -parser.add_argument("--basedir", help="base directory for input/output",default=basedir) -parser.add_argument("--version", help="catalog version; use 'test' unless you know what you are doing!",default='test') -parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='DA02') -parser.add_argument("--verspec",help="version for redshifts",default='guadalupe') - -args = parser.parse_args() -basedir = args.basedir -version = args.version -survey = args.survey -specver = args.verspec - -#filepathLF = basedir+'/'+survey+'/LSS/'+specver+'/LSScats/'+version+'/LRG_full.dat.fits' -#filepathBGS = basedir+'/'+survey+'/LSS/'+specver+'/LSScats/'+version+'/BGS_ANY_full.dat.fits' - - - -#ff = fitsio.read(filepathLF) -#hdul = fits.open(filepathLF) -#ff2 = fitsio.read(filepathBGS) -#hdul = fits.open(filepathBGS) - -if survey != 'SV3': - zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_dark_tarspecwdup_zdone.fits' - dz = Table(fitsio.read(zf)) - desitarg = 'DESI_TARGET' - bit = 1 #for selecting LRG - wtype = ((dz[desitarg] & bit) > 0) - print(len(dz[wtype])) - #dz = dz[wtype&wg] - dz = dz[wtype] - - ff = common.cut_specdat(dz) - - zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_bright_tarspecwdup_zdone.fits' - dz = Table(fitsio.read(zf)) - desitarg = 'BGS_TARGET' - wtype = dz[desitarg] > 0#((dz[desitarg] & bit) > 0) - print(len(dz[wtype])) - #dz = dz[wtype&wg] - dz = dz[wtype] - - ff2 = common.cut_specdat(dz) - -else: - zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_dark_tarspecwdup_Alltiles.fits' - dz = Table(fitsio.read(zf)) - desitarg = 'SV3_DESI_TARGET' - bit = 1 #for selecting LRG - wtype = ((dz[desitarg] & bit) > 0) - print(len(dz[wtype])) - #dz = dz[wtype&wg] - dz = dz[wtype] - wz = dz['ZWARN'] != 999999 #this is what the null column becomes - wz &= dz['ZWARN']*0 == 0 #just in case of nans - wz &= dz['COADD_FIBERSTATUS'] == 0 - ff = dz[wz] - - zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_bright_tarspecwdup_Alltiles.fits' - dz = Table(fitsio.read(zf)) - desitarg = 'SV3_BGS_TARGET' - wtype = dz[desitarg] > 0#((dz[desitarg] & bit) > 0) - print(len(dz[wtype])) - #dz = dz[wtype&wg] - dz = dz[wtype] - wz = dz['ZWARN'] != 999999 #this is what the null column becomes - wz &= dz['ZWARN']*0 == 0 #just in case of nans - wz &= dz['COADD_FIBERSTATUS'] == 0 - - ff2 = dz[wz] - - -z_suc= ff['ZWARN']==0 -z_suc &= ff['DELTACHI2']>15 -z_suc &= ff['Z']<1.5 -z_tot = ff['ZWARN'] != 999999 -z_tot &= ff['ZWARN']*0 == 0 - -#print(len(ff[z_suc]),len(ff[z_tot])) -print("zsuccess rate for LRG=",len(ff[z_suc])/len(ff[z_tot])) -cat1 = Table(ff[z_tot]) - -full=Table() -full['FIBER'] = np.arange(5000) - -fiberstats = Table() -fiberstats['FIBER'], fiberstats['n_tot'] = np.unique(ff['FIBER'][z_tot], return_counts=True) -#fiberstats.sort('n_tot') - -tt = Table() -tt['FIBER'], tt['n_suc'] = np.unique(ff['FIBER'][z_suc], return_counts=True) - -fiberstats1 = join(fiberstats, tt, keys='FIBER', join_type='outer').filled(0) -fiberstats1 = join(fiberstats1,full, keys='FIBER',join_type='outer').filled(0) -#fiberstats1['frac_suc'] = fiberstats1['n_suc']/fiberstats1['n_tot'] - - -z_tot = ff2['ZWARN'] != 999999 -z_tot &= ff2['ZWARN']*0 == 0 -z_suc =ff2['ZWARN']==0 -z_suc&=ff2['DELTACHI2']>40 -#print(len(ff2[z_suc]),len(ff2[z_tot])) -print("zsuccess rate for BGS=",len(ff2[z_suc])/len(ff2[z_tot])) -cat2 = Table(ff2[z_tot]) - -fiberstats2 = Table() -fiberstats2['FIBER'], fiberstats2['n_tot'] = np.unique(ff2['FIBER'][z_tot], return_counts=True) -#fiberstats.sort('n_tot') - -tt2 = Table() -tt2['FIBER'], tt2['n_suc'] = np.unique(ff2['FIBER'][z_suc], return_counts=True) -fiberstats2 = join(fiberstats2, tt2, keys='FIBER', join_type='outer').filled(0) -fiberstats2 = join(fiberstats2,full, keys='FIBER',join_type='outer').filled(0) -#fiberstats2['frac_suc'] = fiberstats2['n_suc']/fiberstats2['n_tot'] - - -fstats_comb = Table() -fstats_comb['Fiber']=np.arange(5000) -fstats_comb['n_tot']=np.arange(5000) -fstats_comb['n_suc']=np.arange(5000) -for fiber in fstats_comb['Fiber']: - m1=fiberstats1['FIBER']==fiber - m2=fiberstats2['FIBER']==fiber - fstats_comb['n_tot'][fiber] = fiberstats1['n_tot'][m1]+fiberstats2['n_tot'][m2] - fstats_comb['n_suc'][fiber] = fiberstats1['n_suc'][m1]+fiberstats2['n_suc'][m2] - -mask0= fstats_comb['n_tot']>1 -fstats_comb=fstats_comb[mask0] -fstats_comb['frac_suc']=fstats_comb['n_suc']/fstats_comb['n_tot'] -#fstats_comb - -error_floor = True - -n, p = fstats_comb['n_tot'].copy(), fstats_comb['frac_suc'].copy() -if error_floor: - p1 = np.maximum(1-p, 1/n) # error floor -else: - p1 = p -fstats_comb['frac_suc_err'] = np.clip(np.sqrt(n * p * (1-p))/n, np.sqrt(n * p1 * (1-p1))/n, 1) - -#print("Removed fibers for having only 1 obs:\n",fstats_comb['FIBER'][ntotmask]) -mean = np.sum(fstats_comb['n_suc'])/np.sum(fstats_comb['n_tot']) -fstats_comb['check'] =(mean - fstats_comb['frac_suc'])/fstats_comb['frac_suc_err'] -fstats_comb.sort('frac_suc') -#fstats_comb - - - -#mean = np.sum(fstats_comb['n_suc'])/np.sum(fstats_comb['n_tot']) -n = 3 -maskcheck = fstats_comb['check']>n -print(fstats_comb) -#np.savetxt(basedir+'/'+survey+'/LSS/'+specver+'/LSScats/'+version+"/lrg+bgs_"+str(n)+"sig_bad_fibers.txt",fstats_comb[maskcheck]['Fiber'],fmt='%i') -fn = basedir+'/'+survey+'/LSS/'+specver+"/lrg+bgs_"+str(n)+"sig_bad_fibers.txt" -np.savetxt(fn,fstats_comb[maskcheck]['Fiber'],fmt='%i') -print('saved results to '+fn) \ No newline at end of file diff --git a/scripts/mock_tools/comp_zstats_specrels.py b/scripts/mock_tools/comp_zstats_specrels.py deleted file mode 100644 index 9a000bd30..000000000 --- a/scripts/mock_tools/comp_zstats_specrels.py +++ /dev/null @@ -1,272 +0,0 @@ -import numpy as np -#!pip install astropy -#!pip install fitsio -from scipy import stats -from scipy.stats import norm -import fitsio -import glob -import os -import sys -import matplotlib.pyplot as plt -import statistics -import argparse -import astropy -from astropy.table import Table,join -from astropy.time import Time -from astropy.io import fits - -import LSS.common_tools as common - - -parser = argparse.ArgumentParser() -#parser.add_argument("--type", help="tracer type to be selected") -basedir='/global/cfs/cdirs/desi/survey/catalogs' -parser.add_argument("--basedir", help="base directory for input/output",default=basedir) -parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='DA02') -parser.add_argument("--verspec",help="version for redshifts",default='guadalupe') -parser.add_argument("--verspec_new",help="version for redshifts",default='newQSOtemp_tagged') -parser.add_argument("--tracer",help="tracer type(s) (e.g., LRG)",default='all') -parser.add_argument("--mbit5",help="whether to screen against zwarn mask bit 5",default='n') -parser.add_argument("--mbit510",help="whether to screen against zwarn mask bits 5 and 10",default='n') -parser.add_argument("--zwarn0",help="only count as success if zwarn == 0",default='n') - -args = parser.parse_args() -basedir = args.basedir -survey = args.survey -specver = args.verspec -#tp = args.tracer - - - -#ff = fitsio.read(filepathLF) -#hdul = fits.open(filepathLF) -#ff2 = fitsio.read(filepathBGS) -#hdul = fits.open(filepathBGS) - -if args.tracer == 'all': - tracers = ['QSO','LRG','ELG','BGS_ANY'] -else: - tracers = [args.tracer] - - - -for tp in tracers: - notqso = '' - if survey == 'DA02': - if tp == 'LRG': - bit = 1 #for selecting LRG - if tp == 'ELG': - bit = 2 - notqso = 'notqso' - if tp == 'QSO': - bit = 4 - if tp == 'BGS_ANY': - zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_bright_tarspecwdup_zdone.fits' - zf_new = basedir+'/'+survey+'/LSS/'+args.verspec_new+'/datcomb_bright_spec_zdone.fits' - dz = Table(fitsio.read(zf)) - - - desitarg = 'BGS_TARGET' - wtype = dz[desitarg] > 0#((dz[desitarg] & bit) > 0) - else: - zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_dark_tarspecwdup_zdone.fits' - zf_new = basedir+'/'+survey+'/LSS/'+args.verspec_new+'/datcomb_dark_spec_zdone.fits' - dz = Table(fitsio.read(zf)) - desitarg = 'DESI_TARGET' - wtype = ((dz[desitarg] & bit) > 0) - if tp == 'ELG': - wtype &= ((dz[desitarg] & 4) == 0) #remove QSO - print(len(dz[wtype])) - #dz = dz[wtype&wg] - dz = dz[wtype] - - dz = common.cut_specdat(dz) - dz_new = Table(fitsio.read(zf_new)) - dz_new.keep_columns(['Z','ZWARN','DELTACHI2','TARGETID','TILEID','LOCATION']) - print(len(dz)) - dz = join(dz,dz_new,keys=['TARGETID','TILEID','LOCATION'],table_names=['fid','new']) - print(str(len(dz))+' should agree with above') - - - from LSS.globals import main - pars = main(tp,args.verspec) - - elif survey == 'main': - sys.exit(survey+' not supported yet') - zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_'+tp+'_tarspecwdup_zdone.fits' - dz = Table(fitsio.read(zf)) - if tp == 'ELG': - wtype = ((dz['DESI_TARGET'] & 4) == 0) #remove QSO - dz = dz[wtype] - dz = common.cut_specdat(dz) - from LSS.globals import main - pars = main(tp,args.verspec) - - - elif survey == 'SV3': - sys.exit('not written for SV3 yet') - zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_dark_tarspecwdup_Alltiles.fits' - dz = Table(fitsio.read(zf)) - desitarg = 'SV3_DESI_TARGET' - bit = 1 #for selecting LRG - wtype = ((dz[desitarg] & bit) > 0) - print(len(dz[wtype])) - #dz = dz[wtype&wg] - dz = dz[wtype] - wz = dz['ZWARN'] != 999999 #this is what the null column becomes - wz &= dz['ZWARN']*0 == 0 #just in case of nans - wz &= dz['COADD_FIBERSTATUS'] == 0 - ff = dz[wz] - - zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_bright_tarspecwdup_Alltiles.fits' - dz = Table(fitsio.read(zf)) - desitarg = 'SV3_BGS_TARGET' - wtype = dz[desitarg] > 0#((dz[desitarg] & bit) > 0) - print(len(dz[wtype])) - #dz = dz[wtype&wg] - dz = dz[wtype] - wz = dz['ZWARN'] != 999999 #this is what the null column becomes - wz &= dz['ZWARN']*0 == 0 #just in case of nans - wz &= dz['COADD_FIBERSTATUS'] == 0 - - ff2 = dz[wz] - - z_tot = dz['ZWARN_fid'] != 999999 - z_tot &= dz['ZWARN_fid']*0 == 0 - z_new = dz['ZWARN_new'] != 999999 - z_new &= dz['ZWARN_new']*0 == 0 - print('number with z to consider fid,new') - print(len(dz[z_tot]),len(dz[z_new])) - - - if tp == 'LRG': - z_suc= dz['ZWARN_fid']==0 - z_suc &= dz['DELTACHI2_fid']>15 - z_suc &= dz['Z_fid']<1.5 - z_sucnew= dz['ZWARN_new']==0 - z_sucnew &= dz['DELTACHI2_new']>15 - z_sucnew &= dz['Z_new']<1.5 - zmin = 0.4 - zmax = 1.1 - - if tp == 'ELG': - o2f = fitsio.read(pars.elgzf,columns=['TARGETID','LOCATION','TILEID','OII_FLUX','OII_FLUX_IVAR']) - dz = join(dz,o2f,keys=['TARGETID','TILEID','LOCATION']) - o2c = np.log10(dz['OII_FLUX'] * np.sqrt(dz['OII_FLUX_IVAR']))+0.2*np.log10(dz['DELTACHI2_fid']) - z_suc = o2c > 0.9 - o2f_new = fitsio.read(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/emlin_catalog.fits' ,columns=['TARGETID','LOCATION','TILEID','OII_FLUX','OII_FLUX_IVAR']) - dz = join(dz,o2f_new,keys=['TARGETID','TILEID','LOCATION'],table_names=['fid','new']) - o2c_new = np.log10(dz['OII_FLUX_new'] * np.sqrt(dz['OII_FLUX_IVAR_new']))+0.2*np.log10(dz['DELTACHI2_new']) - z_sucnew = o2c_new > 0.9 - zmin = 0.6 - zmax = 1.6 - - if tp == 'QSO': - qsozf = pars.qsozf - if specver == 'guadalupe': - qsozf = '/global/cfs/cdirs/desi/users/edmondc/QSO_catalog/guadalupe/QSO_cat_guadalupe_cumulative.fits' - arz = Table(fitsio.read(qsozf)) - arz.keep_columns(['TARGETID','LOCATION','TILEID','Z','Z_QN']) - arz['TILEID'] = arz['TILEID'].astype(int) - - #arz = fitsio.read(qsozf,columns=['TARGETID','LOCATION','TILEID','Z','Z_QN']) - - #arz['TILEID'] = arz['TILEID'].astype(int) - dz = join(dz,arz,keys=['TARGETID','TILEID','LOCATION'],join_type='left',uniq_col_name='{col_name}{table_name}',table_names=['','_QF']) - #dz['Z'].name = 'Z_RR' #rename the original redrock redshifts - #dz['Z_QF'].name = 'Z' #the redshifts from the quasar file should be used instead - - z_suc = dz['Z'].mask == False #previous Z column should have become Z_fid - if args.mbit5 == 'y': - z_suc &= dz['ZWARN_fid'] & 2**5 == 0 - qsozf_new = basedir+'/'+survey+'/LSS/'+args.verspec_new+'/QSO_catalog.fits' - arz = Table(fitsio.read(qsozf_new)) - arz.keep_columns(['TARGETID','LOCATION','TILEID','Z','Z_QN']) - arz['TILEID'] = arz['TILEID'].astype(int) - dz = join(dz,arz,keys=['TARGETID','TILEID','LOCATION'],join_type='left',uniq_col_name='{col_name}{table_name}',table_names=['','_QF_new']) - #print(dz.dtype.names) - z_sucnew = dz['Z_QF_new'].mask == False - if args.mbit5 == 'y': - z_sucnew &= dz['ZWARN_new'] & 2**5 == 0 - if args.mbit510 == 'y': - z_sucnew &= dz['ZWARN_new'] & 2**5 == 0 - z_sucnew &= dz['ZWARN_new'] & 2**10 == 0 - if args.zwarn0 == 'y': - z_sucnew &= dz['ZWARN_new'] == 0 - - zmin = 0.8 - zmax = 3.5 - - - if tp == 'BGS_ANY': - z_suc = dz['ZWARN_fid']==0 - z_suc &= dz['DELTACHI2_fid']>40 - z_sucnew = dz['ZWARN_new']==0 - z_sucnew &= dz['DELTACHI2_new']>40 - zmin = 0.01 - zmax = 0.6 - - #print(len(ff[z_suc]),len(ff[z_tot])) - print("fiducial zsuccess rate for "+tp,len(dz[z_suc&z_tot])/len(dz[z_tot])) - print("new zsuccess rate for "+tp,len(dz[z_sucnew&z_new])/len(dz[z_new])) - print("fraction with zsuccess in both "+tp,len(dz[z_sucnew&z_new&z_suc])/len(dz[z_new])) - - if tp != 'QSO': - plt.hist(dz['Z_fid'][z_suc&z_tot],histtype='step',label='fiducial',range=(zmin,zmax),bins=50) - plt.hist(dz['Z_new'][z_sucnew&z_new],histtype='step',label='new',range=(zmin,zmax),bins=50) - plt.legend() - plt.xlabel('redshift') - plt.ylabel('# of good z in bin') - plt.title(tp+notqso) - plt.savefig(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/'+tp+notqso+'_zhistcompGuad.png') - - plt.show() - plt.plot(dz['Z_fid'][z_suc&z_tot&z_sucnew],dz['Z_new'][z_suc&z_tot&z_sucnew],'k,') - plt.xlabel('Guadalupe redshift') - plt.ylabel('new redshift') - plt.title(tp+notqso) - plt.savefig(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/'+tp+notqso+'_zcompGuad.png') - plt.show() - - else: - plt.hist(dz['Z'][z_suc&z_tot],histtype='step',label='fiducial',range=(zmin,zmax),bins=50) - plt.hist(dz['Z_QF_new'][z_sucnew&z_new],histtype='step',label='new',range=(zmin,zmax),bins=50) - plt.legend() - plt.xlabel('redshift') - plt.ylabel('# of good z in bin') - plt.title(tp+notqso) - fn_app = '' - if args.mbit5 == 'y': - fn_app = '_maskbit5' - if args.mbit510 == 'y': - fn_app = '_maskbits510' - if args.zwarn0 == 'y': - fn_app = '_zwarn0' - - plt.savefig(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/'+tp+notqso+'_zhistcompGuad'+fn_app+'.png') - plt.show() - plt.plot(dz['Z'][z_suc&z_tot&z_sucnew],dz['Z_QF_new'][z_suc&z_tot&z_sucnew],'k,') - plt.xlabel('Guadalupe redshift') - plt.ylabel('new redshift') - plt.title(tp+notqso) - plt.savefig(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/'+tp+notqso+'_zcompGuad'+fn_app+'.png') - plt.show() - plt.plot(dz['Z_QF_new'][z_suc&z_tot&z_sucnew],(dz['Z_QF_new'][z_suc&z_tot&z_sucnew]-dz['Z'][z_suc&z_tot&z_sucnew])/(1+dz['Z_QF_new'][z_suc&z_tot&z_sucnew]),'k,') - plt.xlabel('new redshift') - plt.ylabel('(new z-Guadalupe z)/(1+new z)') - plt.ylim(-0.02,0.02) - plt.title(tp+notqso) - plt.savefig(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/'+tp+notqso+'_zdiffGuad'+fn_app+'.png') - plt.show() - - plt.plot(dz['Z'][z_suc&z_tot&z_sucnew],dz['Z_QF_new'][z_suc&z_tot&z_sucnew],'k,') - plt.xlabel('Guadalupe redshift') - plt.ylabel('new redshift') - plt.title(tp+notqso) - plt.xlim(1.3,1.6) - plt.ylim(1.3,1.6) - plt.savefig(basedir+'/'+survey+'/LSS/'+args.verspec_new+'/'+tp+notqso+'_zcompGuadzoom'+fn_app+'.png') - plt.show() - - - diff --git a/scripts/mock_tools/compare_snapshot_dir_with_live.py b/scripts/mock_tools/compare_snapshot_dir_with_live.py deleted file mode 100644 index 82de710cf..000000000 --- a/scripts/mock_tools/compare_snapshot_dir_with_live.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/python -''' -Useful script for checking the integrity of a directory - -example run: - python compare_snapshot_dir_with_live.py --livedir /global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/daily/LSScats/1/ --snapdate 2021-08-26 - -Please note snapshots on cfs are only present for a week. Asking for a snapshot older than a week will cause a missing directory error. -''' -import argparse -import os - -parser = argparse.ArgumentParser(description=__doc__) -parser.add_argument('--livedir', type=str, required=True, help="Directory you wish to compare with its snapshot") -parser.add_argument('--snapdate', type=str, required=True, help="Date of snapshot to compare against, format YYYY-MM-DD") - -uargs = parser.parse_args() -live = set(os.listdir(uargs.livedir)) -snapshot = set(os.listdir(uargs.livedir+'/.snapshots/'+uargs.snapdate)) - -print(f'Files present in the snapshot {uargs.snapdate} but not in the live version: {snapshot-live}') -print(f'Files present in the live version but not the snapshot {uargs.snapdate}: {live-snapshot}') diff --git a/scripts/mock_tools/getLRGmask.py b/scripts/mock_tools/getLRGmask.py deleted file mode 100644 index c9faf7b6f..000000000 --- a/scripts/mock_tools/getLRGmask.py +++ /dev/null @@ -1,186 +0,0 @@ -# Get LRG bitmasks for a catalog -# originally written by Rongpu Zhou -# Examples: -# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --input catalog.fits --output catalog_lrgmask.npy -# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --input /global/cfs/cdirs/desi/target/catalogs/dr9/0.49.0/randoms/resolve/randoms-1-0.fits --output $CSCRATCH/temp/randoms-1-0-lrgmask_v1.fits -# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --input /global/cfs/cdirs/desi/users/rongpu/targets/dr9.0/1.0.0/resolve/dr9_lrg_south_1.0.0_basic.fits --output $CSCRATCH/temp/dr9_lrg_south_1.0.0_lrgmask_v1.fits - -from __future__ import division, print_function -from functools import partial -import sys, os, glob, time, warnings, gc -import numpy as np -import matplotlib.pyplot as plt -from astropy.table import Table, vstack, hstack, join -import fitsio - -from astropy.io import fits -from astropy import wcs - -from multiprocessing import Pool -import argparse - - -time_start = time.time() - -#bitmask_dir = '/global/cscratch1/sd/rongpu/desi/lrg_pixel_bitmask/v1' -bitmask_dir = '/global/cfs/cdirs/desi/survey/catalogs/brickmasks/LRG/v1.1' - -n_processes = 32 - -################## -debug = False -################## - -if os.environ['NERSC_HOST'] == 'cori': - scratch = 'CSCRATCH' -elif os.environ['NERSC_HOST'] == 'perlmutter': - scratch = 'PSCRATCH' -else: - print('NERSC_HOST is not cori or permutter but is '+os.environ['NERSC_HOST']) - sys.exit('NERSC_HOST not known (code only works on NERSC), not proceeding') - - -parser = argparse.ArgumentParser() -parser.add_argument("--basedir", help="base directory for output, default is SCRATCH",default=scratch) -parser.add_argument("--survey", help="e.g., SV3 or main",default='SV3') -parser.add_argument("--version", help="catalog version; use 'test' unless you know what you are doing!",default='test') -parser.add_argument("--verspec",help="version for redshifts",default='everest') -parser.add_argument("--minr", help="minimum number for random files",default=0,type=int) -parser.add_argument("--maxr", help="maximum for random files, default is 1, but 18 are available (use parallel script for all)",default=1,type=int) - -#parser.add_argument('-i', '--input', required=True) -#parser.add_argument('-o', '--output', required=True) -args = parser.parse_args() - -lssdir = args.basedir +'/'+args.survey+'/LSS/' - -ldirspec = lssdir+args.verspec+'/' - -indirfull = ldirspec+'/LSScats/'+args.version+'/' - -tp = 'LRG' - -#if args.survey == 'main' or args.survey == 'DA02': -# tp += 'zdone' - - - -def bitmask_radec(brickid, ra, dec): - - brick_index = np.where(bricks['BRICKID']==brickid)[0][0] - - brickname = str(bricks['BRICKNAME'][brick_index]) - if bricks['PHOTSYS'][brick_index]=='N': - field = 'north' - elif bricks['PHOTSYS'][brick_index]=='S': - field = 'south' - else: - raise ValueError - # bitmask_fn = '/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/{}/coadd/{}/{}/legacysurvey-{}-maskbits.fits.fz'.format(field, brickname[:3], brickname, brickname) - bitmask_fn = os.path.join(bitmask_dir, '{}/coadd/{}/{}/{}-lrgmask.fits.gz'.format(field, brickname[:3], brickname, brickname)) - - bitmask_img = fitsio.read(bitmask_fn) - - header = fits.open(bitmask_fn)[1].header - w = wcs.WCS(header) - - coadd_x, coadd_y = w.wcs_world2pix(ra, dec, 0) - coadd_x, coadd_y = np.round(coadd_x).astype(int), np.round(coadd_y).astype(int) - - bitmask = bitmask_img[coadd_y, coadd_x] - - return bitmask - -def wrapper(bid_index,bidorder,bidcnts,bid_unique,cat): - - idx = bidorder[bidcnts[bid_index]:bidcnts[bid_index+1]] - brickid = bid_unique[bid_index] - - ra, dec = cat['RA'][idx], cat['DEC'][idx] - - bitmask = bitmask_radec(brickid, ra, dec) - - data = Table() - data['idx'] = idx - data['lrg_mask'] = bitmask - data['TARGETID'] = cat['TARGETID'][idx] - - return data - - -def mkfile(input_path,output_path): - try: - cat = fitsio.read(input_path, rows=None, columns=['lrg_mask']) - return 'file already has lrg_mask column' - except: - print('adding lrg_mask column') - - try: - cat = Table(fitsio.read(input_path, rows=None, columns=['RA', 'DEC', 'BRICKID','TARGETID'])) - except ValueError: - cat = Table(fitsio.read(input_path, rows=None, columns=['RA', 'DEC','TARGETID'])) - - print(len(cat)) - - #for col in cat.colnames: - # cat.rename_column(col, col.upper()) - - #if 'TARGET_RA' in cat.colnames: - # cat.rename_columns(['TARGET_RA', 'TARGET_DEC'], ['RA', 'DEC']) - - if 'BRICKID' not in cat.colnames: - from desiutil import brick - tmp = brick.Bricks(bricksize=0.25) - cat['BRICKID'] = tmp.brickid(cat['RA'], cat['DEC']) - - # Just some tricks to speed up things up - bid_unique, bidcnts = np.unique(cat['BRICKID'], return_counts=True) - bidcnts = np.insert(bidcnts, 0, 0) - bidcnts = np.cumsum(bidcnts) - bidorder = np.argsort(cat['BRICKID']) - - - # start multiple worker processes - with Pool(processes=n_processes) as pool: - res = pool.map(partial(wrapper,bidorder=bidorder,bidcnts=bidcnts,bid_unique=bid_unique,cat=cat), np.arange(len(bid_unique))) - #partial(func, b=second_arg), a_args - - res = vstack(res) - res.sort('idx') - res.remove_column('idx') - - cat = Table(fitsio.read(input_path)) - - if len(cat) != len(res): - print('mismatched lengths, somehow get brick mask removed data!!!') - - else: - res = join(cat,res,keys=['TARGETID']) - if output_path.endswith('.fits'): - res.write(output_path,overwrite=True) - else: - np.write(output_path, np.array(res['lrg_mask'])) - del cat - del res - print('Done!', time.strftime("%H:%M:%S", time.gmtime(time.time() - time_start))) - -# bricks = Table(fitsio.read('/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/survey-bricks.fits.gz')) -bricks = Table(fitsio.read('/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/randoms/survey-bricks-dr9-randoms-0.48.0.fits')) - -if debug: - rows = np.arange(int(1e3)) -else: - rows = None - -input_path = indirfull+tp+'_full_noveto.dat.fits' -output_path = input_path #we will over-write, just adding new column - -mkfile(input_path,output_path) - -for ri in range(args.minr,args.maxr): - input_path = indirfull+tp+'_'+str(ri)+'_full_noveto.ran.fits' - output_path = input_path #we will over-write, just adding new column - - mkfile(input_path,output_path) - print('adding mask column to LRGs random number '+str(ri)) - diff --git a/scripts/mock_tools/getLRGmask_tar.py b/scripts/mock_tools/getLRGmask_tar.py deleted file mode 100644 index 155e6ff27..000000000 --- a/scripts/mock_tools/getLRGmask_tar.py +++ /dev/null @@ -1,161 +0,0 @@ -# Get LRG bitmasks for a catalog -# originally written by Rongpu Zhou -# Examples: -# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --input catalog.fits --output catalog_lrgmask.npy -# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --input /global/cfs/cdirs/desi/target/catalogs/dr9/0.49.0/randoms/resolve/randoms-1-0.fits --output $CSCRATCH/temp/randoms-1-0-lrgmask_v1.fits -# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --input /global/cfs/cdirs/desi/users/rongpu/targets/dr9.0/1.0.0/resolve/dr9_lrg_south_1.0.0_basic.fits --output $CSCRATCH/temp/dr9_lrg_south_1.0.0_lrgmask_v1.fits - -from __future__ import division, print_function -from functools import partial -import sys, os, glob, time, warnings, gc -import numpy as np -import matplotlib.pyplot as plt -from astropy.table import Table, vstack, hstack, join -import fitsio - -from astropy.io import fits -from astropy import wcs - -from multiprocessing import Pool -import argparse - - -time_start = time.time() - -#bitmask_dir = '/global/cscratch1/sd/rongpu/desi/lrg_pixel_bitmask/v1' -bitmask_dir = '/global/cfs/cdirs/desi/survey/catalogs/brickmasks/LRG/v1' - -n_processes = 32 - -################## -debug = False -################## - -parser = argparse.ArgumentParser() -parser.add_argument("--basedir", help="base directory for output, default is CSCRATCH",default=os.environ['CSCRATCH']) -parser.add_argument("--survey", help="e.g., SV3 or main",default='main') - -#parser.add_argument('-i', '--input', required=True) -#parser.add_argument('-o', '--output', required=True) -args = parser.parse_args() - -lssdir = args.basedir +'/'+args.survey+'/LSS/' - -tp = 'LRG' - - - -def bitmask_radec(brickid, ra, dec): - - brick_index = np.where(bricks['BRICKID']==brickid)[0][0] - - brickname = str(bricks['BRICKNAME'][brick_index]) - if bricks['PHOTSYS'][brick_index]=='N': - field = 'north' - elif bricks['PHOTSYS'][brick_index]=='S': - field = 'south' - else: - raise ValueError - # bitmask_fn = '/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/{}/coadd/{}/{}/legacysurvey-{}-maskbits.fits.fz'.format(field, brickname[:3], brickname, brickname) - bitmask_fn = os.path.join(bitmask_dir, '{}/coadd/{}/{}/{}-lrgmask.fits.gz'.format(field, brickname[:3], brickname, brickname)) - - bitmask_img = fitsio.read(bitmask_fn) - - header = fits.open(bitmask_fn)[1].header - w = wcs.WCS(header) - - coadd_x, coadd_y = w.wcs_world2pix(ra, dec, 0) - coadd_x, coadd_y = np.round(coadd_x).astype(int), np.round(coadd_y).astype(int) - - bitmask = bitmask_img[coadd_y, coadd_x] - - return bitmask - -def wrapper(bid_index,bidorder,bidcnts,bid_unique,cat): - - idx = bidorder[bidcnts[bid_index]:bidcnts[bid_index+1]] - brickid = bid_unique[bid_index] - - ra, dec = cat['RA'][idx], cat['DEC'][idx] - - bitmask = bitmask_radec(brickid, ra, dec) - - data = Table() - data['idx'] = idx - data['lrg_mask'] = bitmask - data['TARGETID'] = cat['TARGETID'][idx] - - return data - - -def mkfile(input_path,output_path): - try: - cat = fitsio.read(input_path, rows=None, columns=['lrg_mask']) - return 'file already has lrg_mask column' - except: - print('adding lrg_mask column') - - try: - cat = Table(fitsio.read(input_path, rows=None, columns=['RA', 'DEC', 'BRICKID','TARGETID'])) - except ValueError: - cat = Table(fitsio.read(input_path, rows=None, columns=['RA', 'DEC','TARGETID'])) - - print(len(cat)) - - #for col in cat.colnames: - # cat.rename_column(col, col.upper()) - - #if 'TARGET_RA' in cat.colnames: - # cat.rename_columns(['TARGET_RA', 'TARGET_DEC'], ['RA', 'DEC']) - - if 'BRICKID' not in cat.colnames: - from desiutil import brick - tmp = brick.Bricks(bricksize=0.25) - cat['BRICKID'] = tmp.brickid(cat['RA'], cat['DEC']) - - # Just some tricks to speed up things up - bid_unique, bidcnts = np.unique(cat['BRICKID'], return_counts=True) - bidcnts = np.insert(bidcnts, 0, 0) - bidcnts = np.cumsum(bidcnts) - bidorder = np.argsort(cat['BRICKID']) - - - # start multiple worker processes - with Pool(processes=n_processes) as pool: - res = pool.map(partial(wrapper,bidorder=bidorder,bidcnts=bidcnts,bid_unique=bid_unique,cat=cat), np.arange(len(bid_unique))) - #partial(func, b=second_arg), a_args - - res = vstack(res) - res.sort('idx') - res.remove_column('idx') - - cat = Table(fitsio.read(input_path)) - - if len(cat) != len(res): - print('mismatched lengths, somehow get brick mask removed data!!!') - - else: - res = join(cat,res,keys=['TARGETID']) - if output_path.endswith('.fits'): - res.write(output_path,overwrite=True) - else: - np.write(output_path, np.array(res['lrg_mask'])) - del cat - del res - print('Done!', time.strftime("%H:%M:%S", time.gmtime(time.time() - time_start))) - -# bricks = Table(fitsio.read('/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/survey-bricks.fits.gz')) -bricks = Table(fitsio.read('/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/randoms/survey-bricks-dr9-randoms-0.48.0.fits')) - -if debug: - rows = np.arange(int(1e3)) -else: - rows = None - -if args.survey == 'main': - input_path = lssdir+tp+'targetsDR9v1.1.1.fits' - output_path = input_path #we will over-write, just adding new column - -mkfile(input_path,output_path) - - diff --git a/scripts/mock_tools/get_speccon.py b/scripts/mock_tools/get_speccon.py deleted file mode 100644 index bdbea7266..000000000 --- a/scripts/mock_tools/get_speccon.py +++ /dev/null @@ -1,211 +0,0 @@ -#adapts Mike Wilson's notebook -import glob -import numpy as np -import astropy.io.fits as fits -import argparse -import fitsio -import os - -from astropy.table import Table, join, unique, vstack -from desiutil.log import get_logger -import ephem -import astropy.units as u - -from desisurvey.config import Configuration -from astropy.time import Time -from astropy.table import Table - -config = Configuration() - -mayall = ephem.Observer() -mayall.lat = config.location.latitude().to(u.rad).value -mayall.lon = config.location.longitude().to(u.rad).value -mayall.elevation = config.location.elevation().to(u.m).value - - - - -parser = argparse.ArgumentParser() -parser.add_argument("--basedir", help="base directory for output, default is CSCRATCH",default=os.environ['CSCRATCH']) -parser.add_argument("--survey", help="main or sv3",default='main') -parser.add_argument("--prog", help="dark or bright",default='dark') -parser.add_argument("--verspec",help="version for redshifts",default='daily') -parser.add_argument("--test",help="if yes, test a small fraction of the exposures",default='n') - -args = parser.parse_args() - -sw = args.survey -if args.survey == 'sv3': - sw = 'SV3' - -outf = args.basedir +'/'+sw+'/LSS/'+args.verspec+'/specobscon_'+args.prog+'.fits' - -datadir = '/global/cfs/cdirs/desi/spectro/redux/'+args.verspec+'/' -exposures = fitsio.read(datadir + '/exposures-'+args.verspec+'.fits') -if args.test == 'y': - exposures = exposures[:10] -exposures = Table(exposures) -nexp = len(exposures) -#if args.test == 'y': -# nexp = 10 -exposures['MOON_ILLUM'] = np.zeros(nexp) - -moon = ephem.Moon(mayall) -for ii in range(0,nexp): - - t = Time(exposures[ii]['MJD'], format='mjd') - moon.compute(t.datetime) - - moon_illum = moon.moon_phase - exposures[ii]['MOON_ILLUM'] = moon_illum - -print('added moon illumination, median is:'+str(np.median(exposures['MOON_ILLUM']))) - - - -addcols = ['ZD','ETCTRANS', 'ETCTHRUB', 'ETCSKY', 'ACQFWHM','SLEWANGL','MOONSEP','PMIRTEMP', 'TAIRTEMP','PARALLAC','ROTOFFST','TURBRMS','WINDSPD','WINDDIR'] - -for col in addcols: - exposures[col] = np.ones(nexp)*-99 - - -for ii in range(0,nexp): - es = str(exposures[ii]['EXPID']).zfill(8) - efn = '/global/cfs/cdirs/desi/spectro/data/'+str(exposures[ii]['NIGHT'])+'/'+es+'/desi-'+es+'.fits.fz' - hh = fitsio.read_header(efn,ext=1) - if ii//100 == ii/100: - print('at exposure '+str(ii)+ ' out of '+str(nexp)) - for col in addcols: - try: - exposures[ii][col] = hh[col] - except: - pass - -for col in addcols: - selnull = exposures[col] == -99 - print('fraction null:') - print(col,str(len(exposures[selnull])/len(exposures))) - -ocol = ['MOON_ILLUM','EXPID', 'SEEING_ETC', 'AIRMASS', 'EBV', 'TRANSPARENCY_GFA', 'SEEING_GFA', 'SKY_MAG_AB_GFA', 'SKY_MAG_G_SPEC', 'SKY_MAG_R_SPEC', 'SKY_MAG_Z_SPEC', 'EFFTIME_SPEC'] -tcol = addcols + ocol -exposures = exposures[tcol] - -if args.verspec == 'daily': - dcat = fitsio.read(args.basedir +'/'+sw+'/LSS/'+args.verspec+'/datcomb_'+args.prog+'_spec_zdone.fits') -else: - dcat = fitsio.read(datadir+'/zcatalog/ztile-'+args.survey+'-'+args.prog+'-'+'cumulative.fits') -tids = np.unique(dcat['TILEID']) - -mt = Table.read('/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/ops/tiles-specstatus.ecsv') -wd = mt['SURVEY'] == args.survey -wd &= mt['FAPRGRM'] == args.prog -wd &= np.isin(mt['TILEID'],tids) -mtd = mt[wd] - -tiles4comb = Table() -tiles4comb['TILEID'] = mtd['TILEID'].astype(int) -tiles4comb['ZDATE'] = mtd['LASTNIGHT'] - -print('numbers of tiles, should match:') -print(len(tids),len(tiles4comb)) - -coadd_fpaths = [] - -for ii in range(0,len(tiles4comb)): - ''' - Retrieve coadd paths for all tiles - ''' - - fpath = '{}/tiles/cumulative/{:d}/{:d}'.format(datadir, tiles4comb['TILEID'][ii], tiles4comb['ZDATE'][ii]) - - - # Here we grab the path for each coadd under cumulative/tileid/zdate - fpaths = sorted(glob.glob(fpath + '/' + 'coadd-?-{:d}-thru{}.fits'.format(tiles4comb['TILEID'][ii], tiles4comb['ZDATE'][ii]))) - - - coadd_fpaths += [x for x in fpaths] - -print(coadd_fpaths[:12]) - -def process_coadd(coadd_fpath): - ''' - Retrieve the input expids for each location on a given (tileid, thru_night). - - Note: - assuming input expids may differ by location due to quality cuts. We - run round this later by processing simulatenously all locs with the same - input expids. - ''' - tileid = coadd_fpath.split('/')[-3] - thru_night = coadd_fpath.split('/')[-2] - - coadd = Table.read(coadd_fpath, hdu='EXP_FIBERMAP') - # coadd - - # expids, cnts = np.unique(coadd['EXPID'], return_counts=True) - - # print(len(expids)) - - condition_cat = coadd['TARGETID', 'LOCATION'] - condition_cat = unique(condition_cat) - condition_cat.sort('LOCATION') - - condition_cat['TILEID'] = tileid - condition_cat['THRU_NIGHT'] = thru_night - condition_cat['IN_EXPIDS'] = 'x' * 50 - - locs, cnts = np.unique(condition_cat['LOCATION'].data, return_counts=True) - - assert cnts.max() == 1 - assert np.all(locs == condition_cat['LOCATION'].data) - - for i, loc in enumerate(locs): - coadd_loc = coadd[(coadd['LOCATION'] == loc) & (coadd['FIBERSTATUS'] == 0)] - - loc_expids = '-'.join(np.unique(coadd_loc['EXPID'].data).astype(str).tolist()) - - condition_cat['IN_EXPIDS'][i] = loc_expids - - # print(i, loc_expids) - - return condition_cat - -to_process = coadd_fpaths -condition_cat = [process_coadd(x) for x in to_process] -condition_cat = vstack(condition_cat) - -unique_in_expids = np.unique(condition_cat['IN_EXPIDS'].data).tolist() - -unique_in_expids.remove('') - -update_cols = list(exposures.dtype.names) -update_cols.remove('EXPID') -update_cols.remove('EFFTIME_SPEC') - -for col in update_cols: - condition_cat[col] = -99. - -for in_expids in unique_in_expids: - expids = np.array(in_expids.split('-')).astype(np.int) - - # Get the exposure conditions for this set of expids. - in_exposures = exposures[np.isin(exposures['EXPID'].data, expids)] - - # print(expids) - # print(in_exposures) - - mean_function = lambda x: np.average(x, weights=in_exposures['EFFTIME_SPEC']) - - # Weighted mean of the condition table for this exp. set (weights are efftime_spec) - in_exposures = in_exposures.groups.aggregate(mean_function) - - # To be extra sure, we could include matches to TILEID and thru night. - to_update = condition_cat['IN_EXPIDS'] == in_expids - - for col in update_cols: - condition_cat[col].data[to_update] = in_exposures[col].data[0] - - print('Processed: {}'.format(in_expids)) - - -condition_cat.write(outf, format='fits', overwrite=True) diff --git a/scripts/mock_tools/getmask_type.py b/scripts/mock_tools/getmask_type.py deleted file mode 100644 index 21fa05517..000000000 --- a/scripts/mock_tools/getmask_type.py +++ /dev/null @@ -1,195 +0,0 @@ -# Get LRG bitmasks for a catalog -# originally written by Rongpu Zhou -# Examples: -# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --input catalog.fits --output catalog_lrgmask.npy -# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --input /global/cfs/cdirs/desi/target/catalogs/dr9/0.49.0/randoms/resolve/randoms-1-0.fits --output $CSCRATCH/temp/randoms-1-0-lrgmask_v1.fits -# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --input /global/cfs/cdirs/desi/users/rongpu/targets/dr9.0/1.0.0/resolve/dr9_lrg_south_1.0.0_basic.fits --output $CSCRATCH/temp/dr9_lrg_south_1.0.0_lrgmask_v1.fits - -from __future__ import division, print_function -from functools import partial -import sys, os, glob, time, warnings, gc -import numpy as np -import matplotlib.pyplot as plt -from astropy.table import Table, vstack, hstack, join -import fitsio - -from astropy.io import fits -from astropy import wcs - -from multiprocessing import Pool -import argparse - - -time_start = time.time() - -#bitmask_dir = '/global/cscratch1/sd/rongpu/desi/lrg_pixel_bitmask/v1' - -n_processes = 32 - -################## -debug = False -################## -if os.environ['NERSC_HOST'] == 'cori': - scratch = 'CSCRATCH' -elif os.environ['NERSC_HOST'] == 'perlmutter': - scratch = 'PSCRATCH' -else: - print('NERSC_HOST is not cori or permutter but is '+os.environ['NERSC_HOST']) - sys.exit('NERSC_HOST not known (code only works on NERSC), not proceeding') - - -parser = argparse.ArgumentParser() -parser.add_argument("--tracer", help="tracer type to be selected") -parser.add_argument("--basedir", help="base directory for output, default is SCRATCH",default=os.environ[scratch]) -parser.add_argument("--survey", help="e.g., SV3 or main",default='SV3') -parser.add_argument("--version", help="catalog version; use 'test' unless you know what you are doing!",default='test') -parser.add_argument("--mver", help="version of the mask",default='1') -parser.add_argument("--verspec",help="version for redshifts",default='everest') -parser.add_argument("--minr", help="minimum number for random files",default=0,type=int) -parser.add_argument("--maxr", help="maximum for random files, default is 1, but 18 are available (use parallel script for all)",default=1,type=int) - -#parser.add_argument('-i', '--input', required=True) -#parser.add_argument('-o', '--output', required=True) -args = parser.parse_args() - -lssdir = args.basedir +'/'+args.survey+'/LSS/' - -ldirspec = lssdir+args.verspec+'/' - -indirfull = ldirspec+'/LSScats/'+args.version+'/' - -tp = args.tracer -tpr = tp[:3] -tprl = tpr.lower() -print(tp,tpr,tprl) - -bitmask_dir = '/global/cfs/cdirs/desi/survey/catalogs/brickmasks/'+tpr+'/v'+args.mver - - -#if args.survey == 'main' or args.survey == 'DA02': -# tp += 'zdone' - - - -def bitmask_radec(brickid, ra, dec): - - brick_index = np.where(bricks['BRICKID']==brickid)[0][0] - - brickname = str(bricks['BRICKNAME'][brick_index]) - if bricks['PHOTSYS'][brick_index]=='N': - field = 'north' - elif bricks['PHOTSYS'][brick_index]=='S': - field = 'south' - else: - raise ValueError - # bitmask_fn = '/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/{}/coadd/{}/{}/legacysurvey-{}-maskbits.fits.fz'.format(field, brickname[:3], brickname, brickname) - bitmask_fn = os.path.join(bitmask_dir, '{}/coadd/{}/{}/{}-{}mask.fits.gz'.format(field, brickname[:3], brickname, brickname,tprl)) - - bitmask_img = fitsio.read(bitmask_fn) - - header = fits.open(bitmask_fn)[1].header - w = wcs.WCS(header) - - coadd_x, coadd_y = w.wcs_world2pix(ra, dec, 0) - coadd_x, coadd_y = np.round(coadd_x).astype(int), np.round(coadd_y).astype(int) - - bitmask = bitmask_img[coadd_y, coadd_x] - - return bitmask - -def wrapper(bid_index,bidorder,bidcnts,bid_unique,cat): - - idx = bidorder[bidcnts[bid_index]:bidcnts[bid_index+1]] - brickid = bid_unique[bid_index] - - ra, dec = cat['RA'][idx], cat['DEC'][idx] - - bitmask = bitmask_radec(brickid, ra, dec) - - data = Table() - data['idx'] = idx - data[tprl+'_mask'] = bitmask - data['TARGETID'] = cat['TARGETID'][idx] - - return data - - -def mkfile(input_path,output_path): - try: - cat = fitsio.read(input_path, rows=None, columns=[tprl+'_mask']) - return 'file already has '+tpr.lower()+'_mask column' - except: - print('adding '+tprl+'_mask column') - - try: - cat = Table(fitsio.read(input_path, rows=None, columns=['RA', 'DEC', 'BRICKID','TARGETID'])) - except ValueError: - cat = Table(fitsio.read(input_path, rows=None, columns=['RA', 'DEC','TARGETID'])) - - print(len(cat)) - - #for col in cat.colnames: - # cat.rename_column(col, col.upper()) - - #if 'TARGET_RA' in cat.colnames: - # cat.rename_columns(['TARGET_RA', 'TARGET_DEC'], ['RA', 'DEC']) - - if 'BRICKID' not in cat.colnames: - from desiutil import brick - tmp = brick.Bricks(bricksize=0.25) - cat['BRICKID'] = tmp.brickid(cat['RA'], cat['DEC']) - - # Just some tricks to speed up things up - bid_unique, bidcnts = np.unique(cat['BRICKID'], return_counts=True) - bidcnts = np.insert(bidcnts, 0, 0) - bidcnts = np.cumsum(bidcnts) - bidorder = np.argsort(cat['BRICKID']) - - - # start multiple worker processes - with Pool(processes=n_processes) as pool: - res = pool.map(partial(wrapper,bidorder=bidorder,bidcnts=bidcnts,bid_unique=bid_unique,cat=cat), np.arange(len(bid_unique))) - #partial(func, b=second_arg), a_args - - res = vstack(res) - res.sort('idx') - res.remove_column('idx') - print('done, now writing out') - #cat = Table(fitsio.read(input_path)) - catf = fitsio.FITS(input_path,'rw') - - if len(cat) != len(res): - print('mismatched lengths, somehow get brick mask removed data!!!') - - else: - #res = join(cat,res,keys=['TARGETID']) - catf[1].insert_column(tprl+'_mask',res[tprl+'_mask']) - catf.close() - #if output_path.endswith('.fits'): - # res.write(output_path,overwrite=True) - #else: - # np.write(output_path, np.array(res[tp.lower+'_mask'])) - #del cat - del res - print('Done!', time.strftime("%H:%M:%S", time.gmtime(time.time() - time_start))) - -# bricks = Table(fitsio.read('/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/survey-bricks.fits.gz')) -bricks = Table(fitsio.read('/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/randoms/survey-bricks-dr9-randoms-0.48.0.fits')) - -if debug: - rows = np.arange(int(1e3)) -else: - rows = None - -input_path = indirfull+tp+'_full_noveto.dat.fits' -output_path = input_path #we will over-write, just adding new column - -mkfile(input_path,output_path) - -for ri in range(args.minr,args.maxr): - input_path = indirfull+tp+'_'+str(ri)+'_full_noveto.ran.fits' - output_path = input_path #we will over-write, just adding new column - - mkfile(input_path,output_path) - print('adding mask column to '+tp+' random number '+str(ri)) - diff --git a/scripts/mock_tools/lss_cat_match_dr16.py b/scripts/mock_tools/lss_cat_match_dr16.py deleted file mode 100755 index 6677b59d3..000000000 --- a/scripts/mock_tools/lss_cat_match_dr16.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import glob -import os -from pathlib import Path - -from astropy import units as u -from astropy.coordinates import match_coordinates_sky, SkyCoord -from astropy.table import Table, hstack -import numpy as np -import fitsio - -parser = argparse.ArgumentParser(description="Match the input catalog to DR16 LSS.") - -parser.add_argument("-o", "--out-dir", type=str, required=True, help="Directory to save matched catalog to.") -parser.add_argument("--tracer",help='tracer type to match between',type=str,default='ELG') -parser.add_argument("--version",help='LSS catalog version',type=str,default='test') -parser.add_argument("--specrel",help='LSS catalog version',type=str,default='daily') - -args = parser.parse_args() - -out_loc = Path(args.out_dir) -if not os.path.isdir(out_loc): - os.mkdir(out_loc) - - - -if args.specrel == 'daily': - survey = 'main' - -if args.specrel == 'guadalupe': - survey = 'DA02' -if args.specrel == 'fuji': - survey = 'SV3' -ROOT = "/global/cfs/cdirs/desi/survey/catalogs/"+survey+"/LSS/"+args.specrel+"/LSScats/"+args.version+"/" -fname = args.tracer+'_full.dat.fits' -with fitsio.FITS(ROOT + fname) as h: - tab = h[1].read() - sel = tab['ZWARN'] != 999999 #reject the targets that were not observed - desi_table = tab[sel] -print("Loaded "+fname+"... matching "+str(len(tab[sel]))+' rows') - - - -# Pull out the RA/DEC for use in matching. -desi_ra = desi_table["RA"] -desi_dec = desi_table["DEC"] - -desi_skycoords = SkyCoord(ra=desi_ra, dec=desi_dec, unit="deg") - -# Loads DR16 -DR16_ROOT = "/global/cfs/cdirs/sdss/staging/dr16/eboss/lss/catalogs/DR16/" -dr16_fname = "eBOSS_"+args.tracer+"_full_ALLdata-vDR16.fits" - -cols_eboss = ["RA", "DEC", "Z", "PLATE", "MJD", "FIBERID","IMATCH"] - -with fitsio.FITS(DR16_ROOT + dr16_fname) as h: - eboss_table = h[1].read_columns(columns=cols_eboss) - sel = eboss_table['IMATCH'] == 1 - sel |= eboss_table['IMATCH'] == 2 - eboss_table = eboss_table[sel] -print("Loaded "+dr16_fname+"... matching "+str(len(eboss_table))+' rows') - -eboss_ra = np.asarray([i["RA"] for i in eboss_table]) -eboss_dec = np.asarray([i["DEC"] for i in eboss_table]) -eboss_skycoords = SkyCoord(ra=eboss_ra, dec=eboss_dec, unit="deg") - -# This is the line that actually matches the two table RA/DECs to each other -print("Matching...") -idx, sep2d, dist3d = match_coordinates_sky(desi_skycoords, eboss_skycoords) - -# 2d seperation in arc seconds to constrain our search radius. -d2d = np.asarray(sep2d.to(u.arcsec)) - -# Keep everything whose match is within 1 arcsecond -# Eseentially deciding everything that close is "correct" -match_keep = d2d < 1 -_, keep_counts = np.unique(idx[match_keep], return_counts=True) -print("Matched "+str(np.sum(match_keep))+" entries from input catalog to DR16 LSS "+args.tracer+ " catalog.") - -# If there are any double matches we'll need to handle that -if np.any(keep_counts) > 1: - print("Double matches found...") - -# Reduces the tables to the matched entries using the indices of matches -desi_keep = Table(desi_table[match_keep]) -eboss_keep = Table(eboss_table[idx][match_keep]) -eboss_keep.rename_column("Z", "Z_SDSS") -eboss_keep.remove_columns(['RA','DEC']) -joined = hstack([desi_keep, eboss_keep]) - -# Drops the SDSS RA/DEC from the joined table, since we already have these from -# the DESI portion of the table. -#del joined["RA"] -#del joined["DEC"] - -# Setting the save name. -out_name = args.tracer+"_cat_"+args.specrel+'_'+args.version+"_LSSfull_DR16_match.fits" - -joined.write(out_loc / out_name, format="fits", overwrite=True) - diff --git a/scripts/mock_tools/mkBGS_flavors.py b/scripts/mock_tools/mkBGS_flavors.py deleted file mode 100644 index f3620606c..000000000 --- a/scripts/mock_tools/mkBGS_flavors.py +++ /dev/null @@ -1,127 +0,0 @@ -#standard python -import sys -import os -import shutil -import unittest -from datetime import datetime -import json -import numpy as np -import fitsio -import glob -import argparse -from astropy.table import Table,join,unique,vstack - -#from this package -import LSS.SV3.cattools as ct -import LSS.common_tools as common - -from LSS.tabulated_cosmo import TabulatedDESI -cosmo = TabulatedDESI() -dis_dc = cosmo.comoving_radial_distance - -if os.environ['NERSC_HOST'] == 'cori': - scratch = 'CSCRATCH' -elif os.environ['NERSC_HOST'] == 'perlmutter': - scratch = 'PSCRATCH' -else: - print('NERSC_HOST is not cori or permutter but is '+os.environ['NERSC_HOST']) - sys.exit('NERSC_HOST not known (code only works on NERSC), not proceeding') - - -parser = argparse.ArgumentParser() -parser.add_argument("--tracer", help="tracer type to be selected; BGS_ANY or BGS_BRIGHT",default='BGS_BRIGHT') -parser.add_argument("--survey", help="e.g., SV3, DA02, main",default='SV3') -parser.add_argument("--verspec",help="version for redshifts",default='fuji') -parser.add_argument("--basedir", help="base directory for output, default is CSCRATCH",default=os.environ[scratch]) -parser.add_argument("--version", help="catalog version; use 'test' unless you know what you are doing!",default='test') -parser.add_argument("--clus", help="make the data clustering files; these are cut to a small subset of columns",default='n') -parser.add_argument("--clusran", help="make the random clustering files; these are cut to a small subset of columns",default='n') -parser.add_argument("--minr", help="minimum number for random files",default=0) -parser.add_argument("--maxr", help="maximum for random files, default is 1, but 18 are available (use parallel script for all)",default=18) - -parser.add_argument("--mkcats", help="make the subsampled catalogs ",default='y') -parser.add_argument("--nz", help="get n(z) ",default='y') - -args = parser.parse_args() - -dirin = args.basedir+'/'+args.survey+ '/LSS/'+args.verspec+'/LSScats/'+args.version+'/' -dirout = dirin +'BGSsubcats/' - -zw = '' -#if args.survey == 'DA02': -# zw = 'zdone' - -if not os.path.exists(dirout): - os.mkdir(dirout) - print('made '+dirout) - -def dl(z): # Luminosity distance from now to z - return dis_dc(z)*(1.+z) - -def dm(z): - return 5.*np.log10(dl(z)) + 25. - - -def AbsMag(mag,z): - return mag - dm(z) - -def cut_abr_ct(data,maxr=0,minr=-100,minct=-100,maxct=100,zmin=0.01,zmax=0.5): - selz = data['Z'] > zmin - selz &= data['Z'] < zmax - data = data[selz] - r_dered = 22.5 - 2.5*np.log10(data['flux_r_dered']) - g_dered = 22.5 - 2.5*np.log10(data['flux_g_dered']) - - abr = r_dered -dm(data['Z']) - abg = g_dered -dm(data['Z']) - ct = g_dered-r_dered-0.14*(data['Z']-0.1)/0.05 #rough change based on peak of red g-r - sel = abr > minr - sel &= abr < maxr - sel &= ct > minct - sel &= ct < maxct - return data[sel] - -ctc = 0.7 #rough red/blue cut -abl = [-21.5,-20.5,-19.5] -P0 = 7000 -dz = 0.01 -zmin = 0.1 -zmax = 0.5 - -regl = ['_N','_S'] -for reg in regl: - if args.mkcats == 'y': - dat = fitsio.read(dirin+args.tracer+zw+reg+'_clustering.dat.fits') - for ab in abl: - dato = cut_abr_ct(dat,maxr=ab) - outf = dirout+args.tracer+zw+str(ab)+reg+'_clustering.dat.fits' - common.write_LSS(dato,outf) - dato = cut_abr_ct(dat,maxr=ab,maxct=ctc) - outf = dirout+args.tracer+zw+str(ab)+'blue'+reg+'_clustering.dat.fits' - common.write_LSS(dato,outf) - dato = cut_abr_ct(dat,maxr=ab,minct=ctc) - outf = dirout+args.tracer+zw+str(ab)+'red'+reg+'_clustering.dat.fits' - common.write_LSS(dato,outf) - - for rann in range(args.minr,args.maxr): - dat = fitsio.read(dirin+args.tracer+zw+reg+'_'+str(rann)+'_clustering.ran.fits') - for ab in abl: - dato = cut_abr_ct(dat,maxr=ab) - outf = dirout+args.tracer+zw+str(ab)+reg+'_'+str(rann)+'_clustering.ran.fits' - common.write_LSS(dato,outf) - dato = cut_abr_ct(dat,maxr=ab,maxct=ctc) - outf = dirout+args.tracer+zw+str(ab)+'blue'+reg+'_'+str(rann)+'_clustering.ran.fits' - common.write_LSS(dato,outf) - dato = cut_abr_ct(dat,maxr=ab,minct=ctc) - outf = dirout+args.tracer+zw+str(ab)+'red'+reg+'_'+str(rann)+'_clustering.ran.fits' - common.write_LSS(dato,outf) - if args.nz== 'y': - for ab in abl: - for cl in ['','blue','red']: - fb = dirout+args.tracer+zw+str(ab)+cl+reg - fcr = dirin+args.tracer+zw+reg+'_0_clustering.ran.fits' - fcd = fb+'_clustering.dat.fits' - fout = fb+'_nz.txt' - common.mknz(fcd,fcr,fout,bs=dz,zmin=zmin,zmax=zmax) - common.addnbar(fb,bs=dz,zmin=zmin,zmax=zmax,P0=P0) - diff --git a/scripts/mock_tools/mkBGS_flavors_kEE.py b/scripts/mock_tools/mkBGS_flavors_kEE.py deleted file mode 100644 index bc0e2dc4a..000000000 --- a/scripts/mock_tools/mkBGS_flavors_kEE.py +++ /dev/null @@ -1,125 +0,0 @@ -#standard python -import sys -import os -import sys -import shutil -import unittest -from datetime import datetime -import json -import numpy as np -import fitsio -import glob -import argparse -from astropy.table import Table,join,unique,vstack - -#from kcorr package, needs to be added to path -# ke_code_root = '/global/homes/a/ajross/desicode/DESI_ke' -# sys.path.append(ke_code_root) -# os.environ['CODE_ROOT'] = ke_code_root -# from smith_kcorr import GAMA_KCorrection -# from rest_gmr import smith_rest_gmr -# from tmr_ecorr import tmr_ecorr, tmr_q - -#from this package -import LSS.SV3.cattools as ct -import LSS.common_tools as common - -from LSS.tabulated_cosmo import TabulatedDESI -cosmo = TabulatedDESI() -dis_dc = cosmo.comoving_radial_distance - -if os.environ['NERSC_HOST'] == 'cori': - scratch = os.environ['CSCRATCH'] -elif os.environ['NERSC_HOST'] == 'perlmutter': - scratch = os.environ['PSCRATCH'] -else: - print('NERSC_HOST is not cori or permutter but is '+os.environ['NERSC_HOST']) - sys.exit('NERSC_HOST not known (code only works on NERSC), not proceeding') - - -parser = argparse.ArgumentParser() -parser.add_argument("--tracer", help="tracer type to be selected; BGS_ANY or BGS_BRIGHT",default='BGS_BRIGHT') -parser.add_argument("--survey", help="e.g., SV3, DA02, main",default='SV3') -parser.add_argument("--verspec",help="version for redshifts",default='fuji') -parser.add_argument("--basedir", help="base directory for output, default is (C/P)SCRATCH",default=scratch) -parser.add_argument("--version", help="catalog version; use 'test' unless you know what you are doing!",default='test') -parser.add_argument("--minr", help="minimum number for random files",default=0) -parser.add_argument("--maxr", help="maximum for random files, default is 1, but 18 are available (use parallel script for all)",default=18) - -parser.add_argument("--mkcats", help="make the subsampled catalogs ",default='y') -parser.add_argument("--nz", help="get n(z) ",default='y') - -args = parser.parse_args() - -dirin = args.basedir+'/'+args.survey+ '/LSS/'+args.verspec+'/LSScats/'+args.version+'/' -dirout = dirin +'BGSsubcats/' - -zw = '' -#if args.survey == 'DA02': -# zw = 'zdone' - -if not os.path.exists(dirout): - os.mkdir(dirout) - print('made '+dirout) - - - -def cut_abr_ct(data,maxr=0,minr=-100,minct=-100,maxct=100,zmin=0.01,zmax=0.5): - abr = data['ABSMAG_R'] - ct = data['REST_GMR_0P1'] - sel = abr > minr - sel &= abr < maxr - sel &= ct > minct - sel &= ct < maxct - return data[sel] - -ctc = 0.75 #rough red/blue cut -abl = [-21.5,-20.5,-19.5] -P0 = 7000 -dz = 0.01 -zmin = 0.01 -if args.survey == 'DA02': - zmin = 0.1 -zmax = 0.5 - -regl = ['_N','_S'] -for reg in regl: - if args.mkcats == 'y': - dat = Table(fitsio.read(dirin+args.tracer+zw+reg+'_clustering.dat.fits')) - #selz = dat['Z'] > zmin - #selz &= data['Z'] < zmax - #data = data[selz] - - for ab in abl: - dato = cut_abr_ct(dat,maxr=ab) - outf = dirout+args.tracer+zw+str(ab)+'ke'+reg+'_clustering.dat.fits' - common.write_LSS(dato,outf) - dato = cut_abr_ct(dat,maxr=ab,maxct=ctc) - outf = dirout+args.tracer+zw+str(ab)+'keblue'+reg+'_clustering.dat.fits' - common.write_LSS(dato,outf) - dato = cut_abr_ct(dat,maxr=ab,minct=ctc) - outf = dirout+args.tracer+zw+str(ab)+'kered'+reg+'_clustering.dat.fits' - common.write_LSS(dato,outf) - - for rann in range(args.minr,args.maxr): - dat = fitsio.read(dirin+args.tracer+zw+reg+'_'+str(rann)+'_clustering.ran.fits') - for ab in abl: - dato = cut_abr_ct(dat,maxr=ab) - outf = dirout+args.tracer+zw+str(ab)+'ke'+reg+'_'+str(rann)+'_clustering.ran.fits' - common.write_LSS(dato,outf) - dato = cut_abr_ct(dat,maxr=ab,maxct=ctc) - outf = dirout+args.tracer+zw+str(ab)+'keblue'+reg+'_'+str(rann)+'_clustering.ran.fits' - common.write_LSS(dato,outf) - dato = cut_abr_ct(dat,maxr=ab,minct=ctc) - outf = dirout+args.tracer+zw+str(ab)+'kered'+reg+'_'+str(rann)+'_clustering.ran.fits' - common.write_LSS(dato,outf) - if args.nz== 'y': - for ab in abl: - for cl in ['ke','keblue','kered']: - fb = dirout+args.tracer+zw+str(ab)+cl+reg - fcr = dirin+args.tracer+zw+reg+'_0_clustering.ran.fits' - fcd = fb+'_clustering.dat.fits' - fout = fb+'_nz.txt' - common.mknz(fcd,fcr,fout,bs=dz,zmin=zmin,zmax=zmax) - common.addnbar(fb,bs=dz,zmin=zmin,zmax=zmax,P0=P0) - diff --git a/scripts/mock_tools/mkCat_tar4ang.py b/scripts/mock_tools/mkCat_tar4ang.py deleted file mode 100644 index f0080be7b..000000000 --- a/scripts/mock_tools/mkCat_tar4ang.py +++ /dev/null @@ -1,111 +0,0 @@ -''' -one executable to create catalogs for given target type meant for angular clustering -''' - - - -#standard python -import sys -import os -import shutil -import unittest -from datetime import datetime -import json -import numpy as np -import fitsio -import glob -import argparse -from astropy.table import Table,join,unique,vstack -from matplotlib import pyplot as plt - -#sys.path.append('../py') - -#from this package -import LSS.imaging.select_samples as ss - -parser = argparse.ArgumentParser() -parser.add_argument("--type", help="tracer type to be selected") -parser.add_argument("--tarver", help="version of targeting",default='0.57.0') -parser.add_argument("--survey", help="e.g., sv1 or main",default='sv3') -parser.add_argument("--basedir", help="base directory for output, default is CSCRATCH",default=os.environ['CSCRATCH']) -parser.add_argument("--version", help="catalog version; use 'test' unless you know what you are doing!",default='test') - -args = parser.parse_args() - -type = args.type -tarver = args.tarver -version = args.version -basedir = args.basedir -survey = args.survey - -if survey == 'main': - tp = 'DESI_TARGET' - sw = '' -if survey == 'sv1': - tp = 'SV1_DESI_TARGET' - sw = 'sv1' -if survey == 'sv3': - tp = 'SV3_DESI_TARGET' - sw = 'sv3' - -outdir = basedir+'/tarcat/v'+version+'/tv'+tarver+'/' -if not os.path.exists( basedir+'/tarcat'): - os.mkdir(basedir+'/tarcat') - print('created '+basedir+'/tarcat') - -if not os.path.exists( basedir+'/tarcat/v'+version): - os.mkdir(basedir+'/tarcat/v'+version) - print('created '+basedir+'/tarcat/v'+version) - -if not os.path.exists(outdir): - os.mkdir(outdir) - print('created '+outdir) - -dirsweeps = '/global/project/projectdirs/cosmo/data/legacysurvey/dr9/south/sweep/9.0/' -dirsweepn = '/global/project/projectdirs/cosmo/data/legacysurvey/dr9/north/sweep/9.0/' -targroot = '/project/projectdirs/desi/target/catalogs/dr9/'+tarver+'/targets/'+survey+'/resolve/' -ranroot = '/global/cfs/cdirs/desi/target/catalogs/dr9/0.49.0/randoms/resolve/randoms-1-' -nran = 10 - -sfs = glob.glob(dirsweeps+'sweep*') -sfn = glob.glob(dirsweepn+'sweep*') - - - -elgandlrgbits = [1,5,6,7,8,9,11,12,13] #these get used to veto imaging area; combination of bits applied to ELGs and LRGs in DR8 targeting - -mkbsamp = True #make the base sample -domaskd = True #mask data based on mask bits above -domaskr = True #mask randoms -'test' -print('type being used for bright/dark '+type[:3]) - -#columns to select from target sample -keys = ['RA', 'DEC', 'BRICKID', 'BRICKNAME','MORPHTYPE','DCHISQ','FLUX_G', 'FLUX_R', 'FLUX_Z','FLUX_W1','FLUX_W2','MW_TRANSMISSION_G', 'MW_TRANSMISSION_R', 'MW_TRANSMISSION_Z', 'MW_TRANSMISSION_W1', 'MW_TRANSMISSION_W2','FLUX_IVAR_G', 'FLUX_IVAR_R', 'FLUX_IVAR_Z','NOBS_G', 'NOBS_R', 'NOBS_Z','PSFDEPTH_G', 'PSFDEPTH_R', 'PSFDEPTH_Z', 'GALDEPTH_G', 'GALDEPTH_R',\ - 'GALDEPTH_Z','FIBERFLUX_G', 'FIBERFLUX_R', 'FIBERFLUX_Z', 'FIBERTOTFLUX_G', 'FIBERTOTFLUX_R', 'FIBERTOTFLUX_Z',\ - 'MASKBITS', 'EBV', 'PHOTSYS','TARGETID',tp,'SHAPE_R'] - - -if mkbsamp: #concatenate target files for given type, with column selection hardcoded - prog = 'dark' - if type[:3] == 'BGS': - prog = 'bright' - ss.gather_targets(type,targroot,outdir,tarver,survey,prog,keys=keys) - -if domaskd: - dd = fitsio.read(outdir+type+sw +'targetsDR9v'+tarver.strip('.')+'.fits' ) - dd = ss.mask(dd,elgandlrgbits) - outf = outdir+type+sw +'targetsDR9v'+tarver.strip('.')+'_masked.fits' - fitsio.write(outf,dd,clobber=True) - print('wrote to '+outf) - -if domaskr: - for ii in range(0,nran): - rr = fitsio.read(ranroot+str(ii)+'.fits',columns=['RA','DEC','BRICKID','PHOTSYS','NOBS_G','NOBS_R','NOBS_Z','MASKBITS']) - #need to restrict columns on line above otherwise run out of memory - rr = ss.mask(rr,elgandlrgbits) - outf = outdir+'randomsDR9v'+tarver.strip('.')+'_'+str(ii)+'_masked.fits' - fitsio.write(outf,rr,clobber=True) - print('wrote to '+outf) - - diff --git a/scripts/mock_tools/mkemlin.py b/scripts/mock_tools/mkemlin.py deleted file mode 100644 index 294ccf1c7..000000000 --- a/scripts/mock_tools/mkemlin.py +++ /dev/null @@ -1,100 +0,0 @@ -#standard python -import sys -import os -import shutil -import unittest -from datetime import datetime -import json -import numpy as np -import healpy as hp -import fitsio -import glob -import argparse -from astropy.table import Table,join,unique,vstack -from matplotlib import pyplot as plt -from desitarget.io import read_targets_in_tiles -from desitarget.mtl import inflate_ledger -from desimodel.footprint import is_point_in_desi -import desimodel.footprint as foot -from desitarget import targetmask - -#import logging -#logging.getLogger().setLevel(logging.ERROR) - - -#sys.path.append('../py') #this requires running from LSS/bin, *something* must allow linking without this but is not present in code yet - -#from this package -#try: -import LSS.main.cattools as ct -from LSS.globals import main - -parser = argparse.ArgumentParser() -parser.add_argument("--prog", help="dark or bright is supported",default='dark') - -args = parser.parse_args() -print(args) - -specrel = 'daily' -prog = args.prog -progu = prog.upper() - - -mainp = main(prog) - -mt = mainp.mtld -tiles = mainp.tiles - -wd = mt['SURVEY'] == 'main' -wd &= mt['ZDONE'] == 'true' -wd &= mt['FAPRGRM'] == prog -mtd = mt[wd] -print('found '+str(len(mtd))+' '+prog+' time main survey tiles with zdone true for '+specrel+' version of reduced spectra') - - -tiles4comb = Table() -tiles4comb['TILEID'] = mtd['TILEID'] -tiles4comb['ZDATE'] = mtd['ARCHIVEDATE'] -tiles4comb['THRUDATE'] = mtd['LASTNIGHT'] - -tiles.keep_columns(['TILEID','RA','DEC']) -#print(tiles.dtype.names) - -tiles4comb = join(tiles4comb,tiles,keys=['TILEID']) - -print('check that length of tiles4comb matches '+str(len(tiles4comb))) - - -outdir = '/global/cfs/cdirs/desi/survey/catalogs/main/LSS/daily/emtiles/' -guadtiles = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/DA02/LSS/guadalupe/datcomb_'+prog+'_spec_zdone.fits',columns=['TILEID']) -guadtiles = np.unique(guadtiles['TILEID']) -gtids = np.isin(tiles4comb['TILEID'],guadtiles) -tiles4em = tiles4comb[~gtids] -ndone = 0 - -def mkEMtile(ii): - if ii >= len(tiles4em): - print('out of range!') - else: - tile,zdate,tdate = tiles4em['TILEID'][ii],tiles4em['ZDATE'][ii],tiles4em['THRUDATE'][ii] - outf = outdir+'emline-'+str(tile)+'.fits' - if not os.path.isfile(outf): - tdate = str(tdate) - ct.combEMdata_daily(tile,zdate,tdate,outf=outf) - print('wrote '+outf) - -if __name__ == '__main__': - - from multiprocessing import Pool - N = 64 - if os.environ['NERSC_HOST'] == 'perlmutter': - N = 128 - print('using 128 cpus') - for n in range(0,len(tiles4em),N): - p = Pool(N) - inds = [] - for i in range(n,n+N): - inds.append(i) - p.map(mkEMtile,inds) - print(n,len(tiles4em)) - diff --git a/scripts/mock_tools/mknzplots.py b/scripts/mock_tools/mknzplots.py deleted file mode 100644 index 8421cbbb9..000000000 --- a/scripts/mock_tools/mknzplots.py +++ /dev/null @@ -1,66 +0,0 @@ -import sys,os -import argparse - -import numpy as np -from matplotlib import pyplot as plt - -parser = argparse.ArgumentParser() -parser.add_argument("--survey", help="current choices are SV3,DA02,or main",default='SV3') -parser.add_argument("--version", help="catalog version",default='test') -parser.add_argument("--verspec",help="version for redshifts",default='everest') - - -args = parser.parse_args() -print(args) -catdir='/global/cfs/cdirs/desi/survey/catalogs/' -indir = catdir +args.survey+'/LSS/' +args.verspec+'/LSScats/'+args.version+'/' - -dirout = indir+'/plots/' - -if not os.path.exists(dirout): - os.mkdir(dirout) - print('made '+dirout) - - -types = ['ELG','ELG_LOP','LRG','ELG_LOPnotqso','QSO','BGS_ANY','BGS_BRIGHT'] -if args.survey == 'SV3': - types = ['ELG','ELG_HIP','LRG','LRG_main','ELG_HIPnotqso','QSO','BGS_ANY','BGS_BRIGHT'] - - - -for tp in types: - wzm = '' - if args.survey != 'SV3': - wzm = 'zdone' - - regl = ['_N','_S'] - cl = ['-r','-b'] - ll = ['BASS/MzLS','DECaLS'] - p = False - for reg,c,l in zip(regl,cl,ll): - fn = indir+tp+wzm+reg+'_nz.dat' - if os.path.exists(fn): - p = True - zdat = np.loadtxt(fn).transpose() - plt.plot(zdat[0],zdat[3],c,label=l) - - else: - print('did not find '+fn) - if p: - if tp[:3] == 'ELG': - plt.ylim(0,.0013) - print(tp) - if tp[:3] == 'BGS': - plt.ylim(0,.05) - plt.xlim(0,.6) - print(tp) - - plt.legend() - plt.xlabel('z (redshift)') - plt.ylabel(r'$n(z)~ (h$Mpc$)^3$') - plt.title(args.survey+' '+tp) - - - plt.savefig(dirout+'nz'+args.survey+tp+'.png') - plt.clf() - \ No newline at end of file diff --git a/scripts/mock_tools/perfiber_success_stats.py b/scripts/mock_tools/perfiber_success_stats.py deleted file mode 100644 index c6d5b2703..000000000 --- a/scripts/mock_tools/perfiber_success_stats.py +++ /dev/null @@ -1,175 +0,0 @@ -import numpy as np -#!pip install astropy -#!pip install fitsio -from scipy import stats -from scipy.stats import norm -import fitsio -import glob -import os -import sys -import matplotlib.pyplot as plt -import statistics -import argparse -import astropy -from astropy.table import Table,join -from astropy.time import Time -from astropy.io import fits - -import LSS.common_tools as common - - -parser = argparse.ArgumentParser() - -basedir='/global/cfs/cdirs/desi/survey/catalogs' -parser.add_argument("--tracer", help="tracer type to be selected",default='all') -parser.add_argument("--basedir", help="base directory for input/output",default=basedir) -parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='DA02') -parser.add_argument("--verspec",help="version for redshifts",default='guadalupe') -parser.add_argument("--mkfiles",help="whether or not to make the files",default='n') -#parser.add_argument("--tracer",help="tracer type (e.g., LRG)",default='LRG') - -args = parser.parse_args() -basedir = args.basedir -survey = args.survey -specver = args.verspec -#tp = args.tracer - - - -#ff = fitsio.read(filepathLF) -#hdul = fits.open(filepathLF) -#ff2 = fitsio.read(filepathBGS) -#hdul = fits.open(filepathBGS) - -if args.tracer == 'all': - tracers = ['QSO','LRG','ELG','BGS_ANY'] -else: - tracers = [args.tracer] - -if args.mkfiles == 'y': - for tp in tracers: - if survey == 'DA02': - if tp == 'LRG': - bit = 1 #for selecting LRG - if tp == 'ELG': - bit = 2 - if tp == 'QSO': - bit = 4 - if tp == 'BGS_ANY': - zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_bright_tarspecwdup_zdone.fits' - dz = Table(fitsio.read(zf)) - - desitarg = 'BGS_TARGET' - wtype = dz[desitarg] > 0#((dz[desitarg] & bit) > 0) - else: - zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_dark_tarspecwdup_zdone.fits' - dz = Table(fitsio.read(zf)) - desitarg = 'DESI_TARGET' - wtype = ((dz[desitarg] & bit) > 0) - if tp == 'ELG': - wtype &= ((dz[desitarg] & 4) == 0) #remove QSO - print(len(dz[wtype])) - #dz = dz[wtype&wg] - dz = dz[wtype] - - dz = common.cut_specdat(dz) - from LSS.globals import main - pars = main(tp,args.verspec) - - - - elif survey == 'SV3': - #ys.exit('not written for SV3 yet') - if tp != 'BGS_ANY': - zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_dark_tarspecwdup_Alltiles.fits' - dz = Table(fitsio.read(zf)) - desitarg = 'SV3_DESI_TARGET' - if tp == 'LRG': - bit = 1 #for selecting LRG - if tp == 'ELG': - bit = 2 - if tp == 'QSO': - bit = 4 - wtype = ((dz[desitarg] & bit) > 0) - if tp == 'ELG': - wtype &= ((dz[desitarg] & 4) == 0) #remove QSO - else: - zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_bright_tarspecwdup_Alltiles.fits' - dz = Table(fitsio.read(zf)) - desitarg = 'SV3_BGS_TARGET' - wtype = dz[desitarg] > 0#((dz[desitarg] & bit) > 0) - - print(len(dz[wtype])) - #dz = dz[wtype&wg] - dz = dz[wtype] - wz = dz['COADD_FIBERSTATUS'] == 0 - dz = dz[wz] - - else: - zf = basedir+'/'+survey+'/LSS/'+specver+'/datcomb_'+tp+'_tarspecwdup_zdone.fits' - dz = Table(fitsio.read(zf)) - if tp == 'ELG': - wtype = ((dz['DESI_TARGET'] & 4) == 0) #remove QSO - dz = dz[wtype] - dz = common.cut_specdat(dz) - from LSS.globals import main - pars = main(tp,args.verspec) - - - z_tot = dz['ZWARN'] != 999999 - z_tot &= dz['ZWARN']*0 == 0 - - - if tp == 'LRG': - z_suc= dz['ZWARN']==0 - z_suc &= dz['DELTACHI2']>15 - z_suc &= dz['Z']<1.5 - - if tp == 'ELG': - o2f = fitsio.read(pars.elgzf,columns=['TARGETID','LOCATION','TILEID','OII_FLUX','OII_FLUX_IVAR']) - dz = join(dz,o2f,keys=['TARGETID','TILEID','LOCATION']) - o2c = np.log10(dz['OII_FLUX'] * np.sqrt(dz['OII_FLUX_IVAR']))+0.2*np.log10(dz['DELTACHI2']) - z_suc = o2c > 0.9 - - if tp == 'QSO': - qsozf = pars.qsozf - if specver == 'guadalupe': - qsozf = '/global/cfs/cdirs/desi/users/edmondc/QSO_catalog/guadalupe/QSO_cat_guadalupe_cumulative.fits' - arz = Table(fitsio.read(qsozf)) - arz.keep_columns(['TARGETID','LOCATION','TILEID','Z','Z_QN']) - arz['TILEID'] = arz['TILEID'].astype(int) - - #arz = fitsio.read(qsozf,columns=['TARGETID','LOCATION','TILEID','Z','Z_QN']) - - #arz['TILEID'] = arz['TILEID'].astype(int) - dz = join(dz,arz,keys=['TARGETID','TILEID','LOCATION'],join_type='left',uniq_col_name='{col_name}{table_name}',table_names=['','_QF']) - #dz['Z'].name = 'Z_RR' #rename the original redrock redshifts - #dz['Z_QF'].name = 'Z' #the redshifts from the quasar file should be used instead - - z_suc = dz['Z_QF'].mask == False - - - if tp == 'BGS_ANY': - z_suc = dz['ZWARN']==0 - z_suc &= dz['DELTACHI2']>40 - - #print(len(ff[z_suc]),len(ff[z_tot])) - print("zsuccess rate for "+tp,len(dz[z_suc&z_tot])/len(dz[z_tot])) - fibl,n_tot = np.unique(dz[z_tot]['FIBER'],return_counts=True) - fiblg,n_g = np.unique(dz[z_suc&z_tot]['FIBER'],return_counts=True) - fib_test = np.isin(fibl,fiblg) - z_tot &= np.isin(dz['FIBER'],fibl[fib_test]) - fibl,n_tot = np.unique(dz[z_tot]['FIBER'],return_counts=True) - - if np.array_equal(fibl,fiblg): - gfrac = n_g/n_tot - else: - sys.exit('need to put something in for mismatch fiber lists') - - fn = basedir+'/'+survey+'/LSS/'+specver+"/"+tp+'_zsuccess.txt' - fo = open(fn,'w') - for ii in range(len(fibl)): - fo.write(str(fibl[ii])+' '+str(n_g[ii]/n_tot[ii])+' '+str(n_g[ii])+' '+str(n_tot[ii])+'\n') - fo.close() - - diff --git a/scripts/mock_tools/pkrun.py b/scripts/mock_tools/pkrun.py deleted file mode 100644 index 669b8ab0c..000000000 --- a/scripts/mock_tools/pkrun.py +++ /dev/null @@ -1,278 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -# To run: srun -n 64 python pkrun.py --tracer ELG... - -import os -import argparse -import logging - -import numpy as np -from astropy.table import Table, vstack -from matplotlib import pyplot as plt - -from pypower import CatalogFFTPower, PowerSpectrumStatistics, CatalogSmoothWindow, PowerSpectrumSmoothWindow, PowerSpectrumOddWideAngleMatrix, PowerSpectrumSmoothWindowMatrix, utils, setup_logging -from LSS.tabulated_cosmo import TabulatedDESI - -from xirunpc import read_clustering_positions_weights, concatenate_data_randoms, compute_angular_weights, catalog_dir, get_regions, get_zlims, get_scratch_dir - - -os.environ['OMP_NUM_THREADS'] = os.environ['NUMEXPR_MAX_THREADS'] = '1' -logger = logging.getLogger('pkrun') - - -def barrier_idle(mpicomm, tag=0, sleep=0.01): - """ - MPI barrier fonction that solves the problem that idle processes occupy 100% CPU. - See: https://goo.gl/NofOO9. - """ - import time - size = mpicomm.size - if size == 1: return - rank = mpicomm.rank - mask = 1 - while mask < size: - dst = (rank + mask) % size - src = (rank - mask + size) % size - req = mpicomm.isend(None, dst, tag) - while not mpicomm.Iprobe(src, tag): - time.sleep(sleep) - mpicomm.recv(None, src, tag) - req.Wait() - mask <<= 1 - - -def compute_power_spectrum(edges, distance, dtype='f8', wang=None, weight_type='default', tracer='ELG', tracer2=None, rec_type=None, ells=(0, 2, 4), boxsize=5000., nmesh=1024, dowin=False, option=None, mpicomm=None, mpiroot=0, **kwargs): - - autocorr = tracer2 is None - catalog_kwargs = kwargs.copy() - catalog_kwargs['weight_type'] = weight_type - catalog_kwargs['concatenate'] = True - with_shifted = rec_type is not None - - if 'angular' in weight_type and wang is None: - #wang = compute_angular_weights(nthreads=1, dtype=dtype, weight_type=weight_type, tracer=tracer, tracer2=tracer2, mpicomm=mpicomm, mpiroot=mpiroot, **kwargs) - # Does not run faster, why? - # Because the number of cores is ncores // mpicomm.size - nthreads = 64 - color = mpicomm.rank % nthreads == 0 - subcomm = mpicomm.Split(color, 0) - if color: - wang = compute_angular_weights(nthreads=nthreads, dtype=dtype, weight_type=weight_type, tracer=tracer, tracer2=tracer2, mpicomm=subcomm, mpiroot=0, **kwargs) - barrier_idle(mpicomm) - wang = mpicomm.bcast(wang, root=0) - exit() - - data_positions1, data_weights1, data_positions2, data_weights2 = None, None, None, None - randoms_positions1, randoms_weights1, randoms_positions2, randoms_weights2 = None, None, None, None - shifted_positions1, shifted_weights1, shifted_positions2, shifted_weights2 = None, None, None, None - - if mpicomm is None or mpicomm.rank == mpiroot: - - data, randoms = read_clustering_positions_weights(distance, name=['data', 'randoms'], rec_type=rec_type, tracer=tracer, option=option, **catalog_kwargs) - if with_shifted: - shifted = randoms # above returned shifted randoms - randoms = read_clustering_positions_weights(distance, name='randoms', rec_type=False, tracer=tracer, option=option, **catalog_kwargs) - (data_positions1, data_weights1), (randoms_positions1, randoms_weights1) = concatenate_data_randoms(data, randoms, **catalog_kwargs) - if with_shifted: - shifted_positions1, shifted_weights1 = concatenate_data_randoms(data, shifted, **catalog_kwargs)[1] - - if not autocorr: - data, randoms = read_clustering_positions_weights(distance, name=['data', 'randoms'], rec_type=rec_type, tracer=tracer2, option=option, **catalog_kwargs) - if with_shifted: - shifted = randoms - randoms = read_clustering_positions_weights(distance, name='randoms', rec_type=False, tracer=tracer2, option=option, **catalog_kwargs) - (data_positions2, data_weights2), (randoms_positions2, randoms_weights2) = concatenate_data_randoms(data, randoms, **catalog_kwargs) - if with_shifted: - shifted_positions2, shifted_weights2 = concatenate_data_randoms(data, shifted, **catalog_kwargs)[1] - - kwargs = {} - kwargs.update(wang or {}) - - result = CatalogFFTPower(data_positions1=data_positions1, data_weights1=data_weights1, - data_positions2=data_positions2, data_weights2=data_weights2, - randoms_positions1=randoms_positions1, randoms_weights1=randoms_weights1, - randoms_positions2=randoms_positions2, randoms_weights2=randoms_weights2, - shifted_positions1=shifted_positions1, shifted_weights1=shifted_weights1, - shifted_positions2=shifted_positions2, shifted_weights2=shifted_weights2, - edges=edges, ells=ells, boxsize=boxsize, nmesh=nmesh, resampler='tsc', interlacing=3, - position_type='rdd', dtype=dtype, direct_limits=(0., 1.), direct_limit_type='degree', # direct_limits, (0, 1) degree - **kwargs, mpicomm=mpicomm, mpiroot=mpiroot).poles - wawm = None - if dowin: - windows = [] - boxsizes = [scale * boxsize for scale in [20., 5., 1.]] - edges = {'step': 2. * np.pi / boxsizes[0]} - for boxsize in boxsizes: - windows.append(CatalogSmoothWindow(randoms_positions1=randoms_positions1, randoms_weights1=randoms_weights1, - power_ref=result, edges=edges, boxsize=boxsize, position_type='rdd', - mpicomm=mpicomm, mpiroot=mpiroot).poles) - window = PowerSpectrumSmoothWindow.concatenate_x(*windows, frac_nyq=0.9) - if mpicomm.rank == mpiroot: - # Let us compute the wide-angle and window function matrix - kout = result.k # output k-bins - ellsout = [0, 2, 4] # output multipoles - ellsin = [0, 2, 4] # input (theory) multipoles - wa_orders = 1 # wide-angle order - sep = np.geomspace(1e-4, 4e3, 1024*16) # configuration space separation for FFTlog - kin_rebin = 4 # rebin input theory to save memory - kin_lim = (0, 2e1) # pre-cut input (theory) ks to save some memory - # Input projections for window function matrix: - # theory multipoles at wa_order = 0, and wide-angle terms at wa_order = 1 - projsin = ellsin + PowerSpectrumOddWideAngleMatrix.propose_out(ellsin, wa_orders=wa_orders) - # Window matrix - wm = PowerSpectrumSmoothWindowMatrix(kout, projsin=projsin, projsout=ellsout, window=window, sep=sep, kin_rebin=kin_rebin, kin_lim=kin_lim) - # We resum over theory odd-wide angle - wawm = wm.copy() - wawm.resum_input_odd_wide_angle() - - return result, wang, wawm - - -def get_edges(): - return {'min':0., 'step':0.001} - - -def power_fn(file_type='npy', region='', tracer='ELG', tracer2=None, zmin=0, zmax=np.inf, rec_type=False, weight_type='default', bin_type='lin', out_dir='.'): - if tracer2: tracer += '_' + tracer2 - if rec_type: tracer += '_' + rec_type - if region: tracer += '_' + region - root = '{}_{}_{}_{}_{}'.format(tracer, zmin, zmax, weight_type, bin_type) - if file_type == 'npy': - return os.path.join(out_dir, 'pkpoles_{}.npy'.format(root)) - return os.path.join(out_dir, '{}_{}.txt'.format(file_type, root)) - - -def window_fn(file_type='npy', region='', tracer='ELG', tracer2=None, zmin=0, zmax=np.inf, rec_type=False, weight_type='default', bin_type='lin', out_dir='.'): - if tracer2: tracer += '_' + tracer2 - if rec_type: tracer += '_' + rec_type - if region: tracer += '_' + region - root = '{}_{}_{}_{}_{}'.format(tracer, zmin, zmax, weight_type, bin_type) - if file_type == 'npy': - return os.path.join(out_dir, 'window_smooth_{}.npy'.format(root)) - return os.path.join(out_dir, '{}_{}.txt'.format(file_type, root)) - - -if __name__ == '__main__': - - parser = argparse.ArgumentParser() - parser.add_argument('--tracer', help='tracer(s) to be selected - 2 for cross-correlation', type=str, nargs='+', default=['ELG']) - parser.add_argument('--basedir', help='where to find catalogs', type=str, default='/global/cfs/cdirs/desi/survey/catalogs/') - parser.add_argument('--survey', help='e.g., SV3 or main', type=str, choices=['SV3', 'DA02', 'main'], default='SV3') - parser.add_argument('--verspec', help='version for redshifts', type=str, default='guadalupe') - parser.add_argument('--version', help='catalog version', type=str, default='test') - parser.add_argument('--ran_sw', help='extra string in random name', type=str, default='') - parser.add_argument('--region', help='regions; by default, run on N, S; pass NS to run on concatenated N + S', type=str, nargs='*', choices=['N', 'S', 'NS','NGC','SGC'], default=None) - parser.add_argument('--zlim', help='z-limits, or options for z-limits, e.g. "highz", "lowz", "fullonly"', type=str, nargs='*', default=None) - parser.add_argument('--weight_type', help='types of weights to use; use "default_angular_bitwise" for PIP with angular upweighting; "default" just uses WEIGHT column', type=str, default='default') - parser.add_argument('--boxsize', help='box size', type=float, default=8000.) - parser.add_argument('--nmesh', help='mesh size', type=int, default=1024) - parser.add_argument('--nran', help='number of random files to combine together (1-18 available)', type=int, default=4) - parser.add_argument('--outdir', help='base directory for output (default: SCRATCH)', type=str, default=None) - parser.add_argument('--calc_win', help='also calculate window?; use "y" for yes', default='n') - parser.add_argument('--vis', help='show plot of each pk?', action='store_true', default=False) - parser.add_argument('--rebinning', help='whether to rebin the pk or just keep the original .npy file', default='n') - - #only relevant for reconstruction - parser.add_argument('--rec_type', help='reconstruction algorithm + reconstruction convention', choices=['IFTrecsym', 'IFTreciso', 'MGrecsym', 'MGreciso'], type=str, default=None) - - setup_logging() - args = parser.parse_args() - if args.calc_win == 'n': - args.calc_win = False - if args.calc_win == 'y': - args.calc_win = True - - if args.rebinning == 'n': - args.rebinning = False - if args.rebinning == 'y': - args.rebinning = True - - from pypower import mpi - mpicomm = mpi.COMM_WORLD - mpiroot = 0 - - if os.path.normpath(args.basedir) == os.path.normpath('/global/cfs/cdirs/desi/survey/catalogs/'): - cat_dir = catalog_dir(base_dir=args.basedir, survey=args.survey, verspec=args.verspec, version=args.version) - elif os.path.normpath(args.basedir) == os.path.normpath('/global/project/projectdirs/desi/users/acarnero/mtl_mock000_univ1/'): - cat_dir = args.basedir - args.region = [''] - else: - cat_dir = args.basedir - if mpicomm is None or mpicomm.rank == mpiroot: - logger.info('Catalog directory is {}.'.format(cat_dir)) - - if args.outdir is None: - out_dir = os.path.join(get_scratch_dir(), args.survey) - else: - out_dir = args.outdir - if mpicomm is None or mpicomm.rank == mpiroot: - logger.info('Output directory is {}.'.format(out_dir)) - - tracer, tracer2 = args.tracer[0], None - if len(args.tracer) > 1: - tracer2 = args.tracer[1] - if len(args.tracer) > 2: - raise ValueError('Provide <= 2 tracers!') - if tracer2 == tracer: - tracer2 = None # otherwise counting of self-pairs - catalog_kwargs = dict(tracer=tracer, tracer2=tracer2, survey=args.survey, cat_dir=cat_dir, rec_type=args.rec_type,ran_sw=args.ran_sw) # survey required for zdone - distance = TabulatedDESI().comoving_radial_distance - - regions = args.region - if regions is None: - regions = get_regions(args.survey, rec=bool(args.rec_type)) - - if args.zlim is None: - zlims = get_zlims(tracer, tracer2=tracer2) - elif not args.zlim[0].replace('.', '').isdigit(): - option = args.zlim[0] - zlims = get_zlims(tracer, tracer2=tracer2, option=option) - else: - zlims = [float(zlim) for zlim in args.zlim] - zlims = list(zip(zlims[:-1], zlims[1:])) + ([(zlims[0], zlims[-1])] if len(zlims) > 2 else []) # len(zlims) == 2 == single redshift range - - bin_type = 'lin' - rebinning_factors = [1, 5, 10] - if mpicomm.rank == mpiroot: - logger.info('Computing power spectrum multipoles in regions {} in redshift ranges {}.'.format(regions, zlims)) - - for zmin, zmax in zlims: - base_file_kwargs = dict(tracer=tracer, tracer2=tracer2, zmin=zmin, zmax=zmax, rec_type=args.rec_type, weight_type=args.weight_type, bin_type=bin_type, out_dir=os.path.join(out_dir, 'pk')) - for region in regions: - if mpicomm.rank == mpiroot: - logger.info('Computing power spectrum in region {} in redshift range {}.'.format(region, (zmin, zmax))) - edges = get_edges() - wang = None - result, wang, window = compute_power_spectrum(edges=edges, distance=distance, nrandoms=args.nran, region=region, zlim=(zmin, zmax), weight_type=args.weight_type, boxsize=args.boxsize, nmesh=args.nmesh, wang=wang, dowin=args.calc_win, mpicomm=mpicomm, mpiroot=mpiroot, **catalog_kwargs) - fn = power_fn(file_type='npy', region=region, **base_file_kwargs) - result.save(fn) - if window is not None: - fn = window_fn(file_type='npy', region=region, **base_file_kwargs) - window.save(fn) - - all_regions = regions.copy() - if mpicomm.rank == mpiroot: - if 'N' in regions and 'S' in regions: # let's combine - result = sum([PowerSpectrumStatistics.load(power_fn(file_type='npy', region=region, **base_file_kwargs)) for region in ['N', 'S']]) - result.save(power_fn(file_type='npy', region='NScomb', **base_file_kwargs)) - all_regions.append('NScomb') - if args.rebinning: - for region in all_regions: - txt_kwargs = base_file_kwargs.copy() - txt_kwargs.update(region=region) - result = PowerSpectrumStatistics.load(power_fn(file_type='npy', **txt_kwargs)) - for factor in rebinning_factors: - #result = PowerSpectrumStatistics.load(fn) - rebinned = result[:(result.shape[0]//factor)*factor:factor] - txt_kwargs.update(bin_type=bin_type+str(factor)) - fn_txt = power_fn(file_type='pkpoles', **txt_kwargs) - rebinned.save_txt(fn_txt) - - if args.vis: - k, poles = rebinned(return_k=True, complex=False) - for pole in poles: plt.plot(k, k*pole) - tracers = tracer - if tracer2 is not None: tracers += ' x ' + tracer2 - plt.title('{} {:.2f} < z {:.2f} in {}'.format(tracers, zmin, zmax, region)) - plt.show() diff --git a/scripts/mock_tools/qso_cat_match_dr16q.py b/scripts/mock_tools/qso_cat_match_dr16q.py deleted file mode 100644 index 9b155b0ca..000000000 --- a/scripts/mock_tools/qso_cat_match_dr16q.py +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import glob -import os -from pathlib import Path - -from astropy import units as u -from astropy.coordinates import match_coordinates_sky, SkyCoord -from astropy.table import Table, hstack -import numpy as np -import fitsio - -parser = argparse.ArgumentParser(description="Match the input catalog to DR16Q.") - -parser.add_argument("-o", "--out-dir", type=str, required=True, help="Directory to save matched catalog to.") -group = parser.add_mutually_exclusive_group(required=True) -group.add_argument("-f", "--fuji", action="store_true", help="Match against Fuji catalog.") -group.add_argument("-g", "--guadalupe", action="store_true", help="Match against Guadalupe catalog.") -group.add_argument("-d", "--daily", action="store_true", help="Match against daily catalog.") -group.add_argument("-a", "--all", action="store_true", help="Match against combined catalogs.") - -args = parser.parse_args() - -out_loc = Path(args.out_dir) -if not os.path.isdir(out_loc): - os.mkdir(out_loc) - -# Load each of the two releases individually -releases = [] -if args.guadalupe or args.all: - releases.append("guadalupe") -if args.fuji or args.all: - releases.append("fuji") -if args.daily or args.all: - releases.append("daily") - -desi_tables = {} - -for r in releases: - if r == 'daily': - ROOT = "/global/cfs/cdirs/desi/survey/catalogs/main/LSS/daily/" - fname = "QSO_catalog.fits" - else: - ROOT = f"/global/cfs/cdirs/desi/users/edmondc/QSO_catalog/{r}/" - fname = f"QSO_cat_{r}_healpix.fits" - - with fitsio.FITS(ROOT + fname) as h: - desi_tables[r] = h[1].read() - print(f"Loaded {fname}...") - -# Combine the two releases into a single table -desi_table_combined = desi_table = np.concatenate([desi_tables[k] for k in releases]) - -# Pull out the RA/DEC for use in matching. -desi_ra = np.asarray([i["TARGET_RA"] for i in desi_table]) -desi_dec = np.asarray([i["TARGET_DEC"] for i in desi_table]) - -desi_skycoords = SkyCoord(ra=desi_ra, dec=desi_dec, unit="deg") - -# Loads DR16 -DR16Q_ROOT = "/global/cfs/cdirs/sdss/staging/dr16/eboss/qso/DR16Q/" -dr16q_fname = "DR16Q_v4.fits" - -cols_eboss = ["RA", "DEC", "Z", "PLATE", "MJD", "FIBERID"] - -with fitsio.FITS(DR16Q_ROOT + dr16q_fname) as h: - eboss_table = h[1].read_columns(columns=cols_eboss) - print(f"Loaded {dr16q_fname}...") - -eboss_ra = np.asarray([i["RA"] for i in eboss_table]) -eboss_dec = np.asarray([i["DEC"] for i in eboss_table]) -eboss_skycoords = SkyCoord(ra=eboss_ra, dec=eboss_dec, unit="deg") - -# This is the line that actually matches the two table RA/DECs to each other -print("Matching...") -idx, sep2d, dist3d = match_coordinates_sky(desi_skycoords, eboss_skycoords) - -# 2d seperation in arc seconds to constrain our search radius. -d2d = np.asarray(sep2d.to(u.arcsec)) - -# Keep everything whose match is within 1 arcsecond -# Eseentially deciding everything that close is "correct" -match_keep = d2d < 1 -_, keep_counts = np.unique(idx[match_keep], return_counts=True) -print(f"Matched {np.sum(match_keep)} entries from input catalog to DR16Q.") - -# If there are any double matches we'll need to handle that -if np.any(keep_counts) > 1: - print("Double matches found...") - -# Reduces the tables to the matched entries using the indices of matches -desi_keep = Table(desi_table[match_keep]) -eboss_keep = Table(eboss_table[idx][match_keep]) -eboss_keep.rename_column("Z", "Z_SDSS") -joined = hstack([desi_keep, eboss_keep]) - -# Drops the SDSS RA/DEC from the joined table, since we already have these from -# the DESI portion of the table. -del joined["RA"] -del joined["DEC"] - -# Setting the save name. -out_name = "QSO_cat_fujilupe_healpix_DR16Q_match.fits" -if args.fuji: - out_name = "QSO_cat_fuji_healpix_DR16Q_match.fits" -elif args.guadalupe: - out_name = "QSO_cat_guadalupe_healpix_DR16Q_match.fits" -elif args.daily: - out_name = "QSO_cat_daily_tile_DR16Q_match.fits" - -joined.write(out_loc / out_name, format="fits", overwrite=True) - diff --git a/scripts/mock_tools/readwrite_pixel_bitmask.py b/scripts/mock_tools/readwrite_pixel_bitmask.py deleted file mode 100644 index 2943c43f8..000000000 --- a/scripts/mock_tools/readwrite_pixel_bitmask.py +++ /dev/null @@ -1,145 +0,0 @@ -# Get bitmask values from pixel-level per-brick masks for a catalog -# Examples: -# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --tracer lrg --input catalog.fits --output catalog_lrgmask_v1.1.npy -# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --tracer lrg --input /global/cfs/cdirs/desi/target/catalogs/dr9/0.49.0/randoms/resolve/randoms-1-0.fits --output $CSCRATCH/temp/randoms-1-0-lrgmask_v1.1.fits -# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmask.py --tracer lrg --input /global/cfs/cdirs/desi/users/rongpu/targets/dr9.0/1.0.0/resolve/dr9_lrg_south_1.0.0_basic.fits --output $CSCRATCH/temp/dr9_lrg_south_1.0.0_lrgmask_v1.1.fits - -from __future__ import division, print_function -import sys, os, glob, time, warnings, gc -import numpy as np -import matplotlib.pyplot as plt -from astropy.table import Table, vstack, hstack, join -import fitsio - -from astropy.io import fits -from astropy import wcs - -from multiprocessing import Pool -import argparse - - -time_start = time.time() - -n_processes = 32 - -parser = argparse.ArgumentParser() -parser.add_argument('-t', '--tracer', required=True) -parser.add_argument('-i', '--input', required=True) -#parser.add_argument('-o', '--output', required=True) -parser.add_argument('-v', '--version', default='none', required=False) -parser.add_argument('-rv', '--tarver', default='targetsDR9v1.1.1', required=False) -parser.add_argument( '--ran', default=False, required=False,type=bool) -args = parser.parse_args() - - -input_path = '/global/cfs/cdirs/desi/survey/catalogs/main/LSS/'+args.input+args.tarver+'.fits' -output_path = '/global/cfs/cdirs/desi/survey/catalogs/main/LSS/'+args.input+args.tarver+'_'+args.tracer+'imask.fits' -if args.ran: - input_path = '/global/cfs/cdirs/desi/target/catalogs/dr9/0.49.0/randoms/resolve/randoms-1-'+str(args.input)+'.fits' - output_path = '/global/cfs/cdirs/desi/survey/catalogs/main/LSS/randoms-1-'+str(args.input)+args.tracer+'imask.fits' - -tracer = args.tracer.lower() -version = args.version - -version_dict = {'lrg': 'v1.1', 'elg': 'v1'} -if version=='none': - version = version_dict[tracer] - -bitmask_dir = '/global/cfs/cdirs/desi/survey/catalogs/brickmasks/{}/{}'.format(tracer.upper(), version) - -# input_path = '/global/cfs/cdirs/desi/target/catalogs/dr9/0.49.0/randoms/resolve/randoms-1-0.fits' -# output_path = '/global/cscratch1/sd/rongpu/temp/randoms-1-0-lrgmask_v1.fits' - -if os.path.isfile(output_path): - raise ValueError(output_path+' already exists!') - - -def bitmask_radec(brickid, ra, dec): - - brick_index = np.where(bricks['BRICKID']==brickid)[0][0] - - brickname = str(bricks['BRICKNAME'][brick_index]) - if bricks['PHOTSYS'][brick_index]=='N': - field = 'north' - elif bricks['PHOTSYS'][brick_index]=='S': - field = 'south' - else: - # raise ValueError - # Outside DR9 footprint; assign mask bit 7 - bitmask = np.full(len(ra), 2**7, dtype=np.uint8) - return bitmask - - # bitmask_fn = '/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/{}/coadd/{}/{}/legacysurvey-{}-maskbits.fits.fz'.format(field, brickname[:3], brickname, brickname) - bitmask_fn = os.path.join(bitmask_dir, '{}/coadd/{}/{}/{}-{}mask.fits.gz'.format(field, brickname[:3], brickname, brickname, tracer)) - - bitmask_img = fitsio.read(bitmask_fn) - - header = fits.open(bitmask_fn)[1].header - w = wcs.WCS(header) - - coadd_x, coadd_y = w.wcs_world2pix(ra, dec, 0) - coadd_x, coadd_y = np.round(coadd_x).astype(int), np.round(coadd_y).astype(int) - - bitmask = bitmask_img[coadd_y, coadd_x] - - return bitmask - - -def wrapper(bid_index): - - idx = bidorder[bidcnts[bid_index]:bidcnts[bid_index+1]] - brickid = bid_unique[bid_index] - - ra, dec = cat['RA'][idx], cat['DEC'][idx] - tid = cat['TARGETID'][idx] - bitmask = bitmask_radec(brickid, ra, dec) - - data = Table() - data['idx'] = idx - data['{}_mask'.format(tracer)] = bitmask - data['TARGETID'] = tid - - return data - - -# bricks = Table(fitsio.read('/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/survey-bricks.fits.gz')) -bricks = Table(fitsio.read('/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/randoms/survey-bricks-dr9-randoms-0.48.0.fits')) - -try: - cat = Table(fitsio.read(input_path, rows=None, columns=['RA', 'DEC', 'BRICKID', 'TARGETID'])) -except ValueError: - cat = Table(fitsio.read(input_path, rows=None, columns=['RA', 'DEC', 'TARGETID'])) - -print(len(cat)) - -for col in cat.colnames: - cat.rename_column(col, col.upper()) - -if 'TARGET_RA' in cat.colnames: - cat.rename_columns(['TARGET_RA', 'TARGET_DEC'], ['RA', 'DEC']) - -if 'BRICKID' not in cat.colnames: - from desiutil import brick - tmp = brick.Bricks(bricksize=0.25) - cat['BRICKID'] = tmp.brickid(cat['RA'], cat['DEC']) - -# Just some tricks to speed up things up -bid_unique, bidcnts = np.unique(cat['BRICKID'], return_counts=True) -bidcnts = np.insert(bidcnts, 0, 0) -bidcnts = np.cumsum(bidcnts) -bidorder = np.argsort(cat['BRICKID']) - -# start multiple worker processes -with Pool(processes=n_processes) as pool: - res = pool.map(wrapper, np.arange(len(bid_unique))) - -res = vstack(res) -res.sort('idx') -res.remove_column('idx') - -if output_path.endswith('.fits'): - res.write(output_path) -else: - np.write(output_path, np.array(res['{}_mask'.format(tracer)])) - -print('Done!', time.strftime("%H:%M:%S", time.gmtime(time.time() - time_start))) diff --git a/scripts/mock_tools/recon.py b/scripts/mock_tools/recon.py deleted file mode 100644 index bf0ec8307..000000000 --- a/scripts/mock_tools/recon.py +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -import os -import argparse -import logging - -import numpy as np -from astropy.table import Table, vstack - -from pyrecon import MultiGridReconstruction, IterativeFFTReconstruction, IterativeFFTParticleReconstruction, utils, setup_logging -from LSS.tabulated_cosmo import TabulatedDESI - -from xirunpc import get_clustering_positions_weights, catalog_dir, catalog_fn, get_regions, get_zlims, get_scratch_dir - - -logger = logging.getLogger('recon') - - -def run_reconstruction(Reconstruction, distance, data_fn, randoms_fn, data_rec_fn, randoms_rec_fn, f=0.8, bias=1.2, boxsize=None, nmesh=None, cellsize=7, smoothing_radius=15, nthreads=8, convention='reciso', dtype='f4', **kwargs): - - if np.ndim(randoms_fn) == 0: randoms_fn = [randoms_fn] - if np.ndim(randoms_rec_fn) == 0: randoms_rec_fn = [randoms_rec_fn] - - logger.info('Loading {}.'.format(data_fn)) - data = Table.read(data_fn) - (ra, dec, dist), data_weights, mask = get_clustering_positions_weights(data, distance, name='data', return_mask=True, **kwargs) - data = data[mask] - data_positions = utils.sky_to_cartesian(dist, ra, dec, dtype=dtype) - recon = Reconstruction(f=f, bias=bias, boxsize=boxsize, nmesh=nmesh, cellsize=cellsize, los='local', positions=data_positions, nthreads=nthreads, fft_engine='fftw', dtype=dtype) - - recon.assign_data(data_positions, data_weights) - for fn in randoms_fn: - logger.info('Loading {}.'.format(fn)) - (ra, dec, dist), randoms_weights = get_clustering_positions_weights(Table.read(fn), distance, name='randoms', **kwargs) - randoms_positions = utils.sky_to_cartesian(dist, ra, dec, dtype=dtype) - recon.assign_randoms(randoms_positions, randoms_weights) - - recon.set_density_contrast(smoothing_radius=smoothing_radius) - recon.run() - - field = 'disp+rsd' - if type(recon) is IterativeFFTParticleReconstruction: - data_positions_rec = recon.read_shifted_positions('data', field=field) - else: - data_positions_rec = recon.read_shifted_positions(data_positions, field=field) - - distance_to_redshift = utils.DistanceToRedshift(distance) - catalog = Table(data) - dist, ra, dec = utils.cartesian_to_sky(data_positions_rec) - catalog['RA'], catalog['DEC'], catalog['Z'] = ra, dec, distance_to_redshift(dist) - logger.info('Saving {}.'.format(data_rec_fn)) - utils.mkdir(os.path.dirname(data_rec_fn)) - catalog.write(data_rec_fn, format='fits', overwrite=True) - - field = 'disp+rsd' if convention == 'recsym' else 'disp' - for fn, rec_fn in zip(randoms_fn, randoms_rec_fn): - catalog = Table.read(fn) - (ra, dec, dist), randoms_weights, mask = get_clustering_positions_weights(catalog, distance, name='randoms', return_mask=True, **kwargs) - catalog = catalog[mask] - randoms_positions = utils.sky_to_cartesian(dist, ra, dec, dtype=dtype) - dist, ra, dec = utils.cartesian_to_sky(recon.read_shifted_positions(randoms_positions, field=field)) - catalog['RA'], catalog['DEC'], catalog['Z'] = ra, dec, distance_to_redshift(dist) - logger.info('Saving {}.'.format(rec_fn)) - utils.mkdir(os.path.dirname(rec_fn)) - catalog.write(rec_fn, format='fits', overwrite=True) - - -def run_realspace_reconstruction(Reconstruction, distance, data_fn, randoms_fn, data_rec_fn, f=0.8, bias=1.2, boxsize=None, nmesh=None, cellsize=7, smoothing_radius=15, nthreads=8, dtype='f4', **kwargs): - - convention = 'RSD' - - if np.ndim(randoms_fn) == 0: randoms_fn = [randoms_fn] - #if np.ndim(randoms_rec_fn) == 0: randoms_rec_fn = [randoms_rec_fn] - - logger.info('Loading {}.'.format(data_fn)) - data = Table.read(data_fn) - (ra, dec, dist), data_weights, mask = get_clustering_positions_weights(data, distance, name='data', return_mask=True, **kwargs) - data = data[mask] - data_positions = utils.sky_to_cartesian(dist, ra, dec, dtype=dtype) - recon = Reconstruction(f=f, bias=bias, boxsize=boxsize, nmesh=nmesh, cellsize=cellsize, los='local', positions=data_positions, nthreads=nthreads, fft_engine='fftw', dtype=dtype) - - recon.assign_data(data_positions, data_weights) - for fn in randoms_fn: - logger.info('Loading {}.'.format(fn)) - (ra, dec, dist), randoms_weights = get_clustering_positions_weights(Table.read(fn), distance, name='randoms', **kwargs) - randoms_positions = utils.sky_to_cartesian(dist, ra, dec, dtype=dtype) - recon.assign_randoms(randoms_positions, randoms_weights) - - recon.set_density_contrast(smoothing_radius=smoothing_radius) - recon.run() - - field = 'rsd' - if type(recon) is IterativeFFTParticleReconstruction: - data_positions_rec = recon.read_shifted_positions('data', field=field) - else: - data_positions_rec = recon.read_shifted_positions(data_positions, field=field) - - distance_to_redshift = utils.DistanceToRedshift(distance) - catalog = Table(data) - dist, ra, dec = utils.cartesian_to_sky(data_positions_rec) - catalog['RA'], catalog['DEC'], catalog['Z'] = ra, dec, distance_to_redshift(dist) - logger.info('Saving {}.'.format(data_rec_fn)) - utils.mkdir(os.path.dirname(data_rec_fn)) - catalog.write(data_rec_fn, format='fits', overwrite=True) - - -def get_f_bias(tracer='ELG'): - if tracer.startswith('ELG') or tracer.startswith('QSO'): - return 0.9, 1.3 - if tracer.startswith('LRG'): - return 0.8, 2. - if tracer.startswith('BGS'): - return 0.67, 1.5 - - return 0.8, 1.2 - - -if __name__ == '__main__': - - parser = argparse.ArgumentParser() - parser.add_argument('--tracer', help='tracer to be selected', type=str, default='ELG') - parser.add_argument('--indir', help='where to find catalogs', type=str, default='/global/cfs/cdirs/desi/survey/catalogs/') - parser.add_argument('--survey', help='e.g., SV3 or main', type=str, choices=['SV3', 'DA02', 'main'], default='DA02') - parser.add_argument('--verspec', help='version for redshifts', type=str, default='guadalupe') - parser.add_argument('--version', help='catalog version', type=str, default='test') - parser.add_argument('--region', help='regions; by default, run on all regions', type=str, nargs='*', choices=['NGC','SGC','N', 'S', 'DN', 'DS', ''], default=None) - parser.add_argument('--zlim', help='z-limits, or options for z-limits, e.g. "highz", "lowz"', type=str, nargs='*', default=None) - parser.add_argument('--weight_type', help='types of weights to use; "default" just uses WEIGHT column', type=str, default='default') - parser.add_argument('--nran', help='number of random files to combine together (1-18 available)', type=int, default=5) - parser.add_argument('--nthreads', help='number of threads', type=int, default=64) - parser.add_argument('--outdir', help='base directory for output (default: SCRATCH)', type=str, default=None) - parser.add_argument('--algorithm', help='reconstruction algorithm', type=str, choices=['MG', 'IFT', 'IFTP'], default='MG') - parser.add_argument('--convention', help='reconstruction convention', type=str, choices=['reciso', 'recsym'], default='reciso') - parser.add_argument('--f', help='growth rate', type=float, default=None) - parser.add_argument('--bias', help='bias', type=float, default=None) - parser.add_argument('--boxsize', help='box size', type=float, default=None) - parser.add_argument('--nmesh', help='mesh size', type=int, default=None) - parser.add_argument('--cellsize', help='cell size', type=float, default=7) - parser.add_argument('--smoothing_radius', help='smoothing radius', type=float, default=15) - parser.add_argument('--prepare_blinding', help='Use this flag to create a realspace catalog, thtat can be used as innput for RSD blinding', type=bool,default=False)#,action='store_true' - - setup_logging() - args = parser.parse_args() - - Reconstruction = {'MG': MultiGridReconstruction, 'IFT': IterativeFFTReconstruction, 'IFTP': IterativeFFTParticleReconstruction}[args.algorithm] - - if os.path.normpath(args.indir) == os.path.normpath('/global/cfs/cdirs/desi/survey/catalogs/'): - cat_dir = catalog_dir(base_dir=args.indir, survey=args.survey, verspec=args.verspec, version=args.version) - elif os.path.normpath(args.indir) == os.path.normpath('/global/project/projectdirs/desi/users/acarnero/mtl_mock000_univ1/'): - cat_dir = args.indir - args.region = [''] - else: - cat_dir = args.indir - logger.info('Input directory is {}.'.format(cat_dir)) - - if args.outdir is None: - out_dir = os.path.join(get_scratch_dir(), args.survey) - else: - out_dir = args.outdir - logger.info('Output directory is {}.'.format(out_dir)) - - distance = TabulatedDESI().comoving_radial_distance - - f, bias = get_f_bias(args.tracer) - if args.f is not None: f = args.f - if args.bias is not None: bias = args.bias - - regions = args.region - if regions is None: - regions = get_regions(args.survey, rec=True) - - if args.zlim is None: - zlims = get_zlims(args.tracer) - elif not args.zlim[0].replace('.', '').isdigit(): - zlims = get_zlims(args.tracer, option=args.zlim[0]) - else: - zlims = [float(zlim) for zlim in args.zlim] - zlims = [(zlims[0], zlims[-1])] - - for zmin, zmax in zlims: - for region in regions: - logger.info('Running reconstruction in region {} in redshift range {} with f, bias = {}.'.format(region, (zmin, zmax), (f, bias))) - catalog_kwargs = dict(tracer=args.tracer, region=region, ctype='clustering', nrandoms=args.nran, survey=args.survey) - data_fn = catalog_fn(**catalog_kwargs, cat_dir=cat_dir, name='data') - randoms_fn = catalog_fn(**catalog_kwargs, cat_dir=cat_dir, name='randoms') - data_rec_fn = catalog_fn(**catalog_kwargs, cat_dir=out_dir, rec_type=args.algorithm+args.convention, name='data') - randoms_rec_fn = catalog_fn(**catalog_kwargs, cat_dir=out_dir, rec_type=args.algorithm+args.convention, name='randoms') - data_realspacerec_fn = catalog_fn(**catalog_kwargs, cat_dir=out_dir, rec_type=args.algorithm+'rsd', name='data') - if args.prepare_blinding: - run_realspace_reconstruction(Reconstruction, distance, data_fn, randoms_fn, data_realspacerec_fn, f=f, bias=bias, boxsize=args.boxsize, nmesh=args.nmesh, cellsize=args.cellsize, smoothing_radius=args.smoothing_radius, nthreads=args.nthreads, dtype='f4', zlim=(zmin, zmax), weight_type=args.weight_type) - else: - run_reconstruction(Reconstruction, distance, data_fn, randoms_fn, data_rec_fn, randoms_rec_fn, f=f, bias=bias, boxsize=args.boxsize, nmesh=args.nmesh, cellsize=args.cellsize, smoothing_radius=args.smoothing_radius, nthreads=args.nthreads, convention=args.convention, dtype='f4', zlim=(zmin, zmax), weight_type=args.weight_type) diff --git a/scripts/mock_tools/summary_numbers.py b/scripts/mock_tools/summary_numbers.py deleted file mode 100644 index 3cfd9ae23..000000000 --- a/scripts/mock_tools/summary_numbers.py +++ /dev/null @@ -1,51 +0,0 @@ -import matplotlib.pyplot as plt -import numpy as np -import os -import sys -import argparse - -import fitsio -from astropy.table import join,Table -import healpy as hp - -from LSS.imaging import densvar - -parser = argparse.ArgumentParser() -parser.add_argument("--version", help="catalog version",default='EDAbeta') -parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='SV3') -parser.add_argument("--tracers", help="all runs all for given survey",default='all') -parser.add_argument("--verspec",help="version for redshifts",default='fuji') -args = parser.parse_args() - - -indir = '/global/cfs/cdirs/desi/survey/catalogs/'+args.survey+'/LSS/'+args.verspec+'/LSScats/'+args.version+'/' -zcol = 'Z' -nran = 18 - -tps = [args.tracers] -if args.tracers == 'all': - tps = ['QSO','LRG','BGS_BRIGHT','ELG_LOPnotqso'] - -zdw = ''#'zdone' - -regl = ['_N','_S'] - -if args.survey == 'SV3' and args.tracers == 'all': - tps = ['QSO','LRG','BGS_ANY','ELGnotqso'] - -tot = 0 -for tp in tps: - - for nr in range(0,nran): - rffh = fitsio.read_header(indir+tp+zdw+'_'+str(nr)+'_full.ran.fits',ext=1) - print(tp+' area is '+str(rffh['NAXIS2']/2500)+' deg2, using random '+str(nr)) - - tot_tp = 0 - for reg in regl: - dtf = fitsio.read_header(indir+tp+zdw+reg+'_clustering.dat.fits',ext=1) - ncat = dtf['NAXIS2'] - print('number for '+tp+' in '+reg +' is '+str(ncat)) - tot_tp += ncat - print('number for '+tp+' is '+str(tot_tp)) - tot += tot_tp -print('total number for '+args.survey +' is '+str(tot)) \ No newline at end of file diff --git a/scripts/mock_tools/xiruncz.py b/scripts/mock_tools/xiruncz.py deleted file mode 100644 index e3467d588..000000000 --- a/scripts/mock_tools/xiruncz.py +++ /dev/null @@ -1,193 +0,0 @@ -#make sure to type these two commands: -#export OMP_NUM_THREADS=64 -#module load gsl -#python xiruncz.py --type ELG_HIP -import subprocess -import sys -import argparse -import os -#sys.path.append('../py') -#import LSS.mkCat_singletile.xitools as xt -#import LSS.SV3.xitools as xt - - -parser = argparse.ArgumentParser() -parser.add_argument("--type", help="tracer type to be selected") -parser.add_argument("--basedir", help="base directory for output, default is desi catalog directory",default='/global/cfs/cdirs/desi/survey/catalogs') -parser.add_argument("--version", help="catalog version; use 'test' unless you know what you are doing!",default='test') -parser.add_argument("--verspec",help="version for redshifts",default='everest') -parser.add_argument("--survey",help="e.g., SV3 or main",default='SV3') -parser.add_argument("--nran",help="number of random files to combine together (1-18 available)",default=10) - -args = parser.parse_args() - -type = args.type -basedir = args.basedir -version = args.version -specrel = args.verspec -survey = args.survey -nran = int(args.nran) - -if survey == 'SV3': - import LSS.SV3.xitools as xt -if survey == 'main': - import LSS.main.xitools as xt - -lssdir = basedir+'/'+survey+'/LSS/'+specrel+'/LSScats/' -#dirout = svdir+'LSScats/'+version+'/' - -zmask = [''] -minn = 0 - -subt = None -if type == 'LRGAlltiles' or type == 'LRGAlltiles_main': - zl = [0.32,0.6,0.8,1.05,1.3] - #minn = 2 - #zmin=0.32 - #zmax=1.05 - -if type == 'LRG': - zl = [0.4,0.6,0.8,1.1] -# minn = 5 - #zmin=0.32 - #zmax=1.05 - - -if type == 'LRG_OPT': - subt = type - zmin=0.6 - zmax=1. - type = 'LRG' - -if type == 'LRG_IR': - subt = type - zmin=0.6 - zmax=1. - type = 'LRG' - - -if type[:3] == 'ELG':# or type == 'ELG_HIP': - #minn = 5 - zl = [0.8,1.1,1.5] - #zmask = ['','_zmask'] - - #zmin = 0.8 - #zmax = 1.6 - -#if type == 'ELG_HIP': -# zmin = 0.8 -# zmax = 1.6 -if type == 'ELG_HIP16': - minn = 5 - zl = [1,1.6] - type = 'ELG_HIP' - -if type == 'ELG16': - minn = 5 - zl = [1,1.6] - type = 'ELG' - - -if type == 'ELGlz': - zmin = 0.6 - zmax = 0.8 - type = 'ELG' - -if type == 'ELGmz': - zmin = 0.8 - zmax = 1.1 - type = 'ELG' - -if type == 'ELGhz': - zmin = 1.1 - zmax = 1.6 - type = 'ELG' - -if type == 'ELGmhz': - zmin = 0.6 - zmax = 1.497 - type = 'ELG' - -if type == 'ELGhz497': - zmin = 1.1 - zmax = 1.497 - type = 'ELG' - -if type == 'QSO': - zl = [0.8,1.1,1.5,2.1] - #zmin = 1. - #zmax = 2.1 - -if type == 'QSOhiz': - zmin = 1.6 - zmax = 2.1 - type = 'QSO' - -if type == 'QSOlya': - #zmin = 2.1 - #zmax = 3.5 - zl = [2.1,3.5] - type = 'QSO' - - -if type == 'QSO_RF_4PASS': - subt = type - zmin = 1.6 - zmax = 2.1 - type = 'QSO' - -if type == 'ELG_FDR_GFIB': - subt = type - zmin = 1.1 - zmax = 1.6 - type = 'ELG' - -if type[:3] == 'BGS': - #minn = 2 - zl = [0.1,0.3,0.5] - #zmin = 0.1 - #zmax = 0.5 - -if type == 'BGS_hiz': - zmin = 0.3 - zmax = 0.5 - type = 'BGS_ANY' - -ranwt1=False - -regl = ['_N','_S'] - -if survey == 'main': - regl = ['_DN','_DS','_N','_S'] - -for i in range(0,len(zl)): - if i == len(zl)-1: - zmin=zl[0] - zmax=zl[-1] - else: - zmin = zl[i] - zmax = zl[i+1] - print(zmin,zmax) - for zma in zmask: - for reg in regl: - xt.prep4czxi(type,zmin,zmax,nran=nran,indir=lssdir,ver=version,minn=minn,reg=zma+reg,outdir=os.environ['CSCRATCH']+'/cz/',ranwt1=ranwt1,subt=subt) - subprocess.run(['chmod','+x','czpc.sh']) - subprocess.run('./czpc.sh') - fa = '' - if ranwt1: - fa = 'ranwt1' - if subt is not None: - fa += subt - xt.calcxi_dataCZ(type,zmin,zmax,minn=minn,reg=zma+reg,ver=version,fa=fa) - - - xt.prep4czxi(type,zmin,zmax,nran=nran,indir=lssdir,ver=version,minn=minn,reg=zma,outdir=os.environ['CSCRATCH']+'/cz/',ranwt1=ranwt1,subt=subt) - subprocess.run(['chmod','+x','czpc.sh']) - subprocess.run('./czpc.sh') - fa = '' - if ranwt1: - fa = 'ranwt1' - if subt is not None: - fa += subt - xt.calcxi_dataCZ(type,zmin,zmax,minn=minn,ver=version,fa=fa,reg=zma) - diff --git a/scripts/mock_tools/xirunpc.py b/scripts/mock_tools/xirunpc.py deleted file mode 100644 index 548c0e60a..000000000 --- a/scripts/mock_tools/xirunpc.py +++ /dev/null @@ -1,672 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -import os -import argparse -import logging - -import numpy as np -from astropy.table import Table, vstack -from matplotlib import pyplot as plt - -from pycorr import TwoPointCorrelationFunction, TwoPointEstimator, KMeansSubsampler, utils, setup_logging -from LSS.tabulated_cosmo import TabulatedDESI - - -logger = logging.getLogger('xirunpc') - - -def get_scratch_dir(): - if os.environ['NERSC_HOST'] == 'cori': - scratch_dir = os.environ['CSCRATCH'] - os.system('export OMP_NUM_THREADS=64') - elif os.environ['NERSC_HOST'] == 'perlmutter': - scratch_dir = os.environ['PSCRATCH'] - os.system('export OMP_NUM_THREADS=128') - else: - msg = 'NERSC_HOST is not cori or permutter but is {};\n'.format(os.environ['NERSC_HOST']) - msg += 'NERSC_HOST not known (code only works on NERSC), not proceeding' - raise ValueError(msg) - return scratch_dir - - -def get_zlims(tracer, tracer2=None, option=None): - - if tracer2 is not None: - zlims1 = get_zlims(tracer, option=option) - zlims2 = get_zlims(tracer2, option=option) - return [zlim for zlim in zlims1 if zlim in zlims2] - - if tracer.startswith('LRG'): - zlims = [0.4, 0.6, 0.8, 1.1] - - if tracer.startswith('ELG'):# or type == 'ELG_HIP': - zlims = [0.8, 1.1, 1.6] - if option: - if option == 'safez': - zlims = [0.9, 1.48] - if 'extended' in option: - logger.warning('extended is no longer a meaningful option') - #zlims = [0.8, 1.1, 1.6] - if 'smallshells' in option: - zlims = [0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6] - - if tracer.startswith('QSO'): - zlims = [0.8, 1.1, 1.6, 2.1, 3.5] - if option == 'highz': - zlims = [2.1, 3.5] - if option == 'lowz': - zlims = [0.8, 2.1] - - if tracer.startswith('BGS'): - zlims = [0.1, 0.3, 0.5] - if option == 'lowz': - zlims = [0.1, 0.3] - if option == 'highz': - zlims = [0.3, 0.5] - - if option == 'fullonly': - zlims = [zlims[0], zlims[-1]] - - return zlims - - -def get_regions(survey, rec=False): - regions = ['N', 'S']#, ''] - #if survey in ['main', 'DA02']: - # regions = ['DN', 'DS', 'N', 'S'] - # if rec: regions = ['DN', 'N'] - return regions - - -def select_region(ra, dec, region): - mask_ra = (ra > 100 - dec) - mask_ra &= (ra < 280 + dec) - if region == 'DN': - mask = dec < 32.375 - mask &= mask_ra - elif region == 'DS': - mask = dec > -25 - mask &= ~mask_ra - else: - raise ValueError('Input region must be one of ["DN", "DS"].') - return mask - - -def catalog_dir(survey='main', verspec='guadalupe', version='test', base_dir='/global/cfs/cdirs/desi/survey/catalogs'): - return os.path.join(base_dir, survey, 'LSS', verspec, 'LSScats', version) - - - -def catalog_fn(tracer='ELG', region='', ctype='clustering', name='data', ran_sw='',rec_type=False, nrandoms=4, cat_dir=None, survey='main', **kwargs): - if cat_dir is None: - cat_dir = catalog_dir(survey=survey, **kwargs) - #if survey in ['main', 'DA02']: - # tracer += 'zdone' - if 'edav1' in cat_dir: - cat_dir += ctype - - if ctype == 'full': - region = '' - dat_or_ran = name[:3] - if name == 'randoms' and tracer == 'LRG_main' and ctype == 'full': - tracer = 'LRG' - if region: region = '_' + region - if rec_type: - dat_or_ran = '{}.{}'.format(rec_type, dat_or_ran) - if name == 'data': - return os.path.join(cat_dir, '{}{}_{}.{}.fits'.format(tracer, region, ctype, dat_or_ran)) - return [os.path.join(cat_dir, '{}{}{}_{:d}_{}.{}.fits'.format(tracer, ran_sw, region, iran, ctype, dat_or_ran)) for iran in range(nrandoms)] - - -def get_clustering_positions_weights(catalog, distance, zlim=(0., np.inf),maglim=None, weight_type='default', name='data', return_mask=False, option=None): - - if maglim is None: - mask = (catalog['Z'] >= zlim[0]) & (catalog['Z'] < zlim[1]) - if maglim is not None: - mask = (catalog['Z'] >= zlim[0]) & (catalog['Z'] < zlim[1]) & (catalog['ABSMAG_R'] >= maglim[0]) & (catalog['ABSMAG_R'] < maglim[1]) - - if option: - if 'elgzmask' in option: - zmask = ((catalog['Z'] >= 1.49) & (catalog['Z'] < 1.52)) - mask &= ~zmask - logger.info('Using {:d} rows for {}.'.format(mask.sum(), name)) - positions = [catalog['RA'][mask], catalog['DEC'][mask], distance(catalog['Z'][mask])] - weights = np.ones_like(positions[0]) - - if 'completeness_only' in weight_type and 'bitwise' in weight_type: - raise ValueError('inconsistent choices were put into weight_type') - - if name == 'data': - if 'zfail' in weight_type: - weights *= catalog['WEIGHT_ZFAIL'][mask] - if 'default' in weight_type and 'bitwise' not in weight_type: - weights *= catalog['WEIGHT'][mask] - if 'RF' in weight_type: - weights *= catalog['WEIGHT_RF'][mask]*catalog['WEIGHT_COMP'][mask] - if 'completeness_only' in weight_type: - weights = catalog['WEIGHT_COMP'][mask] - if 'EB' in weight_type: - weights *= catalog['WEIGHT_SYSEB'][mask]*catalog['WEIGHT_COMP'][mask] - if 'FKP' in weight_type: - weights *= catalog['WEIGHT_FKP'][mask] - if 'bitwise' in weight_type: - if catalog['BITWEIGHTS'].ndim == 2: weights = list(catalog['BITWEIGHTS'][mask].T) + [weights] - else: weights = [catalog['BITWEIGHTS'][mask]] + [weights] - - if name == 'randoms': - if 'default' in weight_type: - weights *= catalog['WEIGHT'][mask] - if 'RF' in weight_type: - weights *= catalog['WEIGHT_RF'][mask]*catalog['WEIGHT_COMP'][mask] - if 'zfail' in weight_type: - weights *= catalog['WEIGHT_ZFAIL'][mask] - if 'completeness_only' in weight_type: - weights = catalog['WEIGHT_COMP'][mask] - if 'EB' in weight_type: - weights *= catalog['WEIGHT_SYSEB'][mask]*catalog['WEIGHT_COMP'][mask] - if 'FKP' in weight_type: - weights *= catalog['WEIGHT_FKP'][mask] - - if return_mask: - return positions, weights, mask - return positions, weights - - -def _concatenate(arrays): - if isinstance(arrays[0], (tuple, list)): # e.g., list of bitwise weights for first catalog - array = [np.concatenate([arr[iarr] for arr in arrays], axis=0) for iarr in range(len(arrays[0]))] - else: - array = np.concatenate(arrays, axis=0) # e.g. individual weights for first catalog - return array - - -def read_clustering_positions_weights(distance, zlim =(0., np.inf), maglim =None,weight_type='default', name='data', concatenate=False, option=None, region=None, **kwargs): - - if 'GC' in region: - region = [region] - - def read_positions_weights(name): - positions, weights = [], [] - for reg in region: - cat_fns = catalog_fn(ctype='clustering', name=name, region=reg, **kwargs) - logger.info('Loading {}.'.format(cat_fns)) - isscalar = not isinstance(cat_fns, (tuple, list)) - if isscalar: - cat_fns = [cat_fns] - positions_weights = [get_clustering_positions_weights(Table.read(cat_fn), distance, zlim=zlim, maglim=maglim, weight_type=weight_type, name=name, option=option) for cat_fn in cat_fns] - if isscalar: - positions.append(positions_weights[0][0]) - weights.append(positions_weights[0][1]) - else: - p, w = [tmp[0] for tmp in positions_weights], [tmp[1] for tmp in positions_weights] - if concatenate: - p, w = _concatenate(p), _concatenate(w) - positions.append(p) - weights.append(w) - return positions, weights - - if isinstance(name, (tuple, list)): - return [read_positions_weights(n) for n in name] - return read_positions_weights(name) - - -def get_full_positions_weights(catalog, name='data', weight_type='default', fibered=False, region='', return_mask=False): - - mask = np.ones(len(catalog), dtype='?') - if region in ['DS', 'DN']: - mask &= select_region(catalog['RA'], catalog['DEC'], region) - elif region: - mask &= catalog['PHOTSYS'] == region.strip('_') - - if fibered: mask &= catalog['LOCATION_ASSIGNED'] - positions = [catalog['RA'][mask], catalog['DEC'][mask], catalog['DEC'][mask]] - if name == 'data' and fibered and 'bitwise' in weight_type: - if catalog['BITWEIGHTS'].ndim == 2: weights = list(catalog['BITWEIGHTS'][mask].T) - else: weights = [catalog['BITWEIGHTS'][mask]] - else: weights = np.ones_like(positions[0]) - if return_mask: - return positions, weights, mask - return positions, weights - - -def read_full_positions_weights(name='data', weight_type='default', fibered=False, region='', **kwargs): - - def read_positions_weights(name): - positions, weights = [], [] - for reg in region: - cat_fn = catalog_fn(ctype='full', name=name, **kwargs) - logger.info('Loading {}.'.format(cat_fn)) - if isinstance(cat_fn, (tuple, list)): - catalog = vstack([Table.read(fn) for fn in cat_fn]) - else: - catalog = Table.read(cat_fn) - p, w = get_full_positions_weights(catalog, name=name, weight_type=weight_type, fibered=fibered, region=reg) - positions.append(p) - weights.append(w) - return positions, weights - - if isinstance(name, (tuple, list)): - return [read_positions_weights(n) for n in name] - return read_positions_weights(name) - - -def normalize_data_randoms_weights(data_weights, randoms_weights, weight_attrs=None): - # Renormalize randoms / data for each input catalogs - # data_weights should be a list (for each N/S catalogs) of weights - import inspect - from pycorr.twopoint_counter import _format_weights, get_inverse_probability_weight - if weight_attrs is None: weight_attrs = {} - weight_attrs = {k: v for k, v in weight_attrs.items() if k in inspect.getargspec(get_inverse_probability_weight).args} - wsums, weights = {}, {} - for name, catalog_weights in zip(['data', 'randoms'], [data_weights, randoms_weights]): - wsums[name], weights[name] = [], [] - for w in catalog_weights: - w, nbits = _format_weights(w, copy=True) # this will sort bitwise weights first, then single individual weight - iip = get_inverse_probability_weight(w[:nbits], **weight_attrs) if nbits else 1. - iip = iip * w[nbits] - wsums[name].append(iip.sum()) - weights[name].append(w) - wsum_data, wsum_randoms = sum(wsums['data']), sum(wsums['randoms']) - for icat, w in enumerate(weights['randoms']): - factor = wsums['data'][icat] / wsums['randoms'][icat] * wsum_randoms / wsum_data - w[-1] *= factor - logger.info('Rescaling randoms weights of catalog {:d} by {:.4f}.'.format(icat, factor)) - return weights['data'], weights['randoms'] - - -def concatenate_data_randoms(data, randoms=None, **kwargs): - - if randoms is None: - positions, weights = data - return _concatenate(positions), _concatenate(weights) - - positions, weights = {}, {} - for name in ['data', 'randoms']: - positions[name], weights[name] = locals()[name] - for name in positions: - concatenated = not isinstance(positions[name][0][0], (tuple, list)) # first catalog, unconcatenated [RA, DEC, distance] (False) or concatenated RA (True)? - if concatenated: - positions[name] = _concatenate(positions[name]) - else: - positions[name] = [_concatenate([p[i] for p in positions[name]]) for i in range(len(positions['randoms'][0]))] - data_weights, randoms_weights = [], [] - if concatenated: - wd, wr = normalize_data_randoms_weights(weights['data'], weights['randoms'], weight_attrs=kwargs.get('weight_attrs', None)) - weights['data'], weights['randoms'] = _concatenate(wd), _concatenate(wr) - else: - for i in range(len(weights['randoms'][0])): - wd, wr = normalize_data_randoms_weights(weights['data'], [w[i] for w in weights['randoms']], weight_attrs=kwargs.get('weight_attrs', None)) - data_weights.append(_concatenate(wd)) - randoms_weights.append(_concatenate(wr)) - weights['data'] = data_weights[0] - for wd in data_weights[1:]: - for w0, w in zip(weights['data'], wd): assert np.all(w == w0) - weights['randoms'] = randoms_weights - return [(positions[name], weights[name]) for name in ['data', 'randoms']] - - -def compute_angular_weights(nthreads=8, dtype='f8', tracer='ELG', tracer2=None, mpicomm=None, mpiroot=None, **kwargs): - - autocorr = tracer2 is None - catalog_kwargs = kwargs - - fibered_data_positions1, fibered_data_weights1, fibered_data_positions2, fibered_data_weights2 = None, None, None, None - parent_data_positions1, parent_data_weights1, parent_data_positions2, parent_data_weights2 = None, None, None, None - parent_randoms_positions1, parent_randoms_weights1, parent_randoms_positions2, parent_randoms_weights2 = None, None, None, None - - if mpicomm is None or mpicomm.rank == mpiroot: - - fibered_data = read_full_positions_weights(name='data', fibered=True, tracer=tracer, **catalog_kwargs) - parent_data, parent_randoms = read_full_positions_weights(name=['data', 'randoms'], fibered=False, tracer=tracer, **catalog_kwargs) - fibered_data_positions1, fibered_data_weights1 = concatenate_data_randoms(fibered_data) - (parent_data_positions1, parent_data_weights1), (parent_randoms_positions1, parent_randoms_weights1) = concatenate_data_randoms(parent_data, parent_randoms, **catalog_kwargs) - if not autocorr: - fibered_data = read_full_positions_weights(name='data', fibered=True, tracer=tracer2, **catalog_kwargs) - parent_data, parent_randoms = read_full_positions_weights(name=['data', 'randoms'], fibered=False, tracer=tracer2, **catalog_kwargs) - fibered_data_positions2, fibered_data_weights2 = concatenate_data_randoms(fibered_data) - (parent_data_positions2, parent_data_weights2), (parent_randoms_positions2, parent_randoms_weights2) = concatenate_data_randoms(parent_data, parent_randoms, **catalog_kwargs) - - tedges = np.logspace(-4., 0.5, 41) - # First D1D2_parent/D1D2_PIP angular weight - wangD1D2 = TwoPointCorrelationFunction('theta', tedges, data_positions1=fibered_data_positions1, data_weights1=fibered_data_weights1, - data_positions2=fibered_data_positions2, data_weights2=fibered_data_weights2, - randoms_positions1=parent_data_positions1, randoms_weights1=parent_data_weights1, - randoms_positions2=parent_data_positions2, randoms_weights2=parent_data_weights2, - estimator='weight', engine='corrfunc', position_type='rdd', nthreads=nthreads, - dtype=dtype, mpicomm=mpicomm, mpiroot=mpiroot) - - # First D1R2_parent/D1R2_IIP angular weight - # Input bitwise weights are automatically turned into IIP - if autocorr: - parent_randoms_positions2, parent_randoms_weights2 = parent_randoms_positions1, parent_randoms_weights1 - wangD1R2 = TwoPointCorrelationFunction('theta', tedges, data_positions1=fibered_data_positions1, data_weights1=fibered_data_weights1, - data_positions2=parent_randoms_positions2, data_weights2=parent_randoms_weights2, - randoms_positions1=parent_data_positions1, randoms_weights1=parent_data_weights1, - randoms_positions2=parent_randoms_positions2, randoms_weights2=parent_randoms_weights2, - estimator='weight', engine='corrfunc', position_type='rdd', nthreads=nthreads, - dtype=dtype, mpicomm=mpicomm, mpiroot=mpiroot) - wangR1D2 = None - if not autocorr: - wangR1D2 = TwoPointCorrelationFunction('theta', tedges, data_positions1=parent_randoms_positions1, data_weights1=parent_randoms_weights1, - data_positions2=fibered_data_positions2, data_weights2=fibered_data_weights2, - randoms_positions1=parent_randoms_positions1, randoms_weights1=parent_randoms_weights1, - randoms_positions2=parent_data_positions2, randoms_weights2=parent_data_weights2, - estimator='weight', engine='corrfunc', position_type='rdd', nthreads=nthreads, - dtype=dtype, mpicomm=mpicomm, mpiroot=mpiroot) - - wang = {} - wang['D1D2_twopoint_weights'] = wangD1D2 - wang['D1R2_twopoint_weights'] = wangD1R2 - wang['R1D2_twopoint_weights'] = wangR1D2 - - return wang - - -def compute_correlation_function(corr_type, edges, distance, nthreads=8, dtype='f8', wang=None, split_randoms_above=30., weight_type='default', tracer='ELG', tracer2=None, rec_type=None, njack=120, option=None, mpicomm=None, mpiroot=None, **kwargs): - - autocorr = tracer2 is None - catalog_kwargs = kwargs.copy() - catalog_kwargs['weight_type'] = weight_type - with_shifted = rec_type is not None - - if 'angular' in weight_type and wang is None: - - wang = compute_angular_weights(nthreads=nthreads, dtype=dtype, weight_type=weight_type, tracer=tracer, tracer2=tracer2, mpicomm=mpicomm, mpiroot=mpiroot, **kwargs) - - data_positions1, data_weights1, data_samples1, data_positions2, data_weights2, data_samples2 = None, None, None, None, None, None - randoms_positions1, randoms_weights1, randoms_samples1, randoms_positions2, randoms_weights2, randoms_samples2 = None, None, None, None, None, None - shifted_positions1, shifted_weights1, shifted_samples1, shifted_positions2, shifted_weights2, shifted_samples2 = None, None, None, None, None, None - jack_positions = None - - if mpicomm is None or mpicomm.rank == mpiroot: - - data, randoms = read_clustering_positions_weights(distance, name=['data', 'randoms'], rec_type=rec_type, tracer=tracer, option=option, **catalog_kwargs) - if with_shifted: - shifted = randoms # above returned shifted randoms - randoms = read_clustering_positions_weights(distance, name='randoms', rec_type=False, tracer=tracer, option=option, **catalog_kwargs) - (data_positions1, data_weights1), (randoms_positions1, randoms_weights1) = concatenate_data_randoms(data, randoms, **catalog_kwargs) - if with_shifted: - shifted_positions1, shifted_weights1 = concatenate_data_randoms(data, shifted, **catalog_kwargs)[1] - jack_positions = data_positions1 - - if not autocorr: - data, randoms = read_clustering_positions_weights(distance, name=['data', 'randoms'], rec_type=rec_type, tracer=tracer2, option=option, **catalog_kwargs) - if with_shifted: - shifted = randoms - randoms = read_clustering_positions_weights(distance, name='randoms', rec_type=False, tracer=tracer2, option=option, **catalog_kwargs) - (data_positions2, data_weights2), (randoms_positions2, randoms_weights2) = concatenate_data_randoms(data, randoms, **catalog_kwargs) - if with_shifted: - shifted_positions2, shifted_weights2 = concatenate_data_randoms(data, shifted, **catalog_kwargs)[1] - jack_positions = [np.concatenate([p1, p2], axis=0) for p1, p2 in zip(jack_positions, data_positions2)] - - if njack >= 2: - subsampler = KMeansSubsampler('angular', positions=jack_positions, nsamples=njack, nside=512, random_state=42, position_type='rdd', - dtype=dtype, mpicomm=mpicomm, mpiroot=mpiroot) - - if mpicomm is None or mpicomm.rank == mpiroot: - data_samples1 = subsampler.label(data_positions1) - randoms_samples1 = [subsampler.label(p) for p in randoms_positions1] - if with_shifted: - shifted_samples1 = [subsampler.label(p) for p in shifted_positions1] - if not autocorr: - data_samples2 = subsampler.label(data_positions2) - randoms_samples2 = [subsampler.label(p) for p in randoms_positions2] - if with_shifted: - shifted_samples2 = [subsampler.label(p) for p in shifted_positions2] - - kwargs = {} - kwargs.update(wang or {}) - randoms_kwargs = dict(randoms_positions1=randoms_positions1, randoms_weights1=randoms_weights1, randoms_samples1=randoms_samples1, - randoms_positions2=randoms_positions2, randoms_weights2=randoms_weights2, randoms_samples2=randoms_samples2, - shifted_positions1=shifted_positions1, shifted_weights1=shifted_weights1, shifted_samples1=shifted_samples1, - shifted_positions2=shifted_positions2, shifted_weights2=shifted_weights2, shifted_samples2=shifted_samples2) - - zedges = np.array(list(zip(edges[0][:-1], edges[0][1:]))) - mask = zedges[:,0] >= split_randoms_above - zedges = [zedges[~mask], zedges[mask]] - split_edges, split_randoms = [], [] - for ii, zedge in enumerate(zedges): - if zedge.size: - split_edges.append([np.append(zedge[:,0], zedge[-1,-1])] + list(edges[1:])) - split_randoms.append(ii > 0) - - results = [] - if mpicomm is None: - nran = len(randoms_positions1) - else: - nran = mpicomm.bcast(len(randoms_positions1) if mpicomm.rank == mpiroot else None, root=mpiroot) - for i_split_randoms, edges in zip(split_randoms, split_edges): - result = 0 - D1D2 = None - for iran in range(nran if i_split_randoms else 1): - tmp_randoms_kwargs = {} - if i_split_randoms: - # On scales above split_randoms_above, sum correlation function over multiple randoms - for name, arrays in randoms_kwargs.items(): - if arrays is None: - continue - else: - tmp_randoms_kwargs[name] = arrays[iran] - else: - # On scales below split_randoms_above, concatenate randoms - for name, arrays in randoms_kwargs.items(): - if arrays is None: - continue - elif isinstance(arrays[0], (tuple, list)): # e.g., list of bitwise weights - array = [np.concatenate([arr[iarr] for arr in arrays], axis=0) for iarr in range(len(arrays[0]))] - else: - array = np.concatenate(arrays, axis=0) - tmp_randoms_kwargs[name] = array - tmp = TwoPointCorrelationFunction(corr_type, edges, data_positions1=data_positions1, data_weights1=data_weights1, data_samples1=data_samples1, - data_positions2=data_positions2, data_weights2=data_weights2, data_samples2=data_samples2, - engine='corrfunc', position_type='rdd', nthreads=nthreads, dtype=dtype, **tmp_randoms_kwargs, **kwargs, - D1D2=D1D2, mpicomm=mpicomm, mpiroot=mpiroot) - D1D2 = tmp.D1D2 - result += tmp - results.append(result) - return results[0].concatenate_x(*results), wang - - -def get_edges(corr_type='smu', bin_type='lin'): - - if bin_type == 'log': - sedges = np.geomspace(0.01, 100., 49) - elif bin_type == 'lin': - sedges = np.linspace(0., 200, 201) - else: - raise ValueError('bin_type must be one of ["log", "lin"]') - if corr_type == 'smu': - edges = (sedges, np.linspace(-1., 1., 201)) #s is input edges and mu evenly spaced between -1 and 1 - elif corr_type == 'rppi': - if bin_type == 'lin': - edges = (sedges, np.linspace(-200., 200, 401)) #transverse and radial separations are coded to be the same here - else: - edges = (sedges, np.linspace(0., 40., 41)) - elif corr_type == 'theta': - edges = np.linspace(0., 4., 101) - else: - raise ValueError('corr_type must be one of ["smu", "rppi", "theta"]') - return edges - - -def corr_fn(file_type='npy', region='', tracer='ELG', tracer2=None, zmin=0, zmax=np.inf, rec_type=False, weight_type='default', bin_type='lin', njack=0, nrandoms=8, split_randoms_above=10, out_dir='.', option=None, wang=None): - if tracer2: tracer += '_' + tracer2 - if rec_type: tracer += '_' + rec_type - if region: tracer += '_' + region - if option: - zmax = str(zmax) + option - split = '_split{:.0f}'.format(split_randoms_above) if split_randoms_above < np.inf else '' - wang = '{}_'.format(wang) if wang is not None else '' - root = '{}{}_{}_{}_{}_{}_njack{:d}_nran{:d}{}'.format(wang, tracer, zmin, zmax, weight_type, bin_type, njack, nrandoms, split) - if file_type == 'npy': - return os.path.join(out_dir, 'allcounts_{}.npy'.format(root)) - return os.path.join(out_dir, '{}_{}.txt'.format(file_type, root)) - - -if __name__ == '__main__': - - parser = argparse.ArgumentParser() - parser.add_argument('--tracer', help='tracer(s) to be selected - 2 for cross-correlation', type=str, nargs='+', default=['ELG']) - parser.add_argument('--basedir', help='where to find catalogs', type=str, default='/global/cfs/cdirs/desi/survey/catalogs/') - parser.add_argument('--survey', help='e.g., SV3, DA02, etc.', type=str, default='SV3') - parser.add_argument('--verspec', help='version for redshifts', type=str, default='guadalupe') - parser.add_argument('--version', help='catalog version', type=str, default='test') - parser.add_argument('--region', help='regions; by default, run on N, S; pass NS to run on concatenated N + S', type=str, nargs='*', choices=['N', 'S', 'NS','NGC','SGC'], default=None) - parser.add_argument('--zlim', help='z-limits, or options for z-limits, e.g. "highz", "lowz", "fullonly"', type=str, nargs='*', default=None) - parser.add_argument('--maglim', help='absolute r-band magnitude limits', type=str, nargs='*', default=None) - parser.add_argument('--corr_type', help='correlation type', type=str, nargs='*', choices=['smu', 'rppi', 'theta'], default=['smu', 'rppi']) - parser.add_argument('--weight_type', help='types of weights to use; use "default_angular_bitwise" for PIP with angular upweighting; "default" just uses WEIGHT column', type=str, default='default') - parser.add_argument('--bin_type', help='binning type', type=str, choices=['log', 'lin'], default='lin') - parser.add_argument('--nran', help='number of random files to combine together (1-18 available)', type=int, default=4) - parser.add_argument('--split_ran_above', help='separation scale above which RR are summed over each random file;\ - typically, most efficient for xi < 1, i.e. sep > 10 Mpc/h;\ - see https://arxiv.org/pdf/1905.01133.pdf', type=float, default=20) - parser.add_argument('--njack', help='number of jack-knife subsamples; 0 for no jack-knife error estimates', type=int, default=60) - parser.add_argument('--nthreads', help='number of threads', type=int, default=64) - parser.add_argument('--outdir', help='base directory for output (default: SCRATCH)', type=str, default=None) - #parser.add_argument('--mpi', help='whether to use MPI', action='store_true', default=False) - parser.add_argument('--vis', help='show plot of each xi?', action='store_true', default=False) - parser.add_argument('--rebinning', help='whether to rebin the xi or just keep the original .npy file', default='y') - - #only relevant for reconstruction - parser.add_argument('--rec_type', help='reconstruction algorithm + reconstruction convention', choices=['IFTPrecsym', 'IFTPreciso','IFTrecsym', 'IFTreciso', 'MGrecsym', 'MGreciso'], type=str, default=None) - - setup_logging() - args = parser.parse_args() - - if args.rebinning == 'n': - args.rebinning = False - if args.rebinning == 'y': - args.rebinning = True - - mpicomm, mpiroot = None, None - if True:#args.mpi: - from pycorr import mpi - mpicomm = mpi.COMM_WORLD - mpiroot = 0 - - if os.path.normpath(args.basedir) == os.path.normpath('/global/cfs/cdirs/desi/survey/catalogs/'): - cat_dir = catalog_dir(base_dir=args.basedir, survey=args.survey, verspec=args.verspec, version=args.version) - elif os.path.normpath(args.basedir) == os.path.normpath('/global/project/projectdirs/desi/users/acarnero/mtl_mock000_univ1/'): - cat_dir = args.basedir - args.region = [''] - else: - cat_dir = args.basedir - if mpicomm is None or mpicomm.rank == mpiroot: - logger.info('Catalog directory is {}.'.format(cat_dir)) - - if args.outdir is None: - out_dir = os.path.join(get_scratch_dir(), args.survey) - else: - out_dir = args.outdir - if mpicomm is None or mpicomm.rank == mpiroot: - logger.info('Output directory is {}.'.format(out_dir)) - - tracer, tracer2 = args.tracer[0], None - if len(args.tracer) > 1: - tracer2 = args.tracer[1] - if len(args.tracer) > 2: - raise ValueError('Provide <= 2 tracers!') - if tracer2 == tracer: - tracer2 = None # otherwise counting of self-pairs - catalog_kwargs = dict(tracer=tracer, tracer2=tracer2, survey=args.survey, cat_dir=cat_dir, rec_type=args.rec_type) # survey required for zdone - distance = TabulatedDESI().comoving_radial_distance - - regions = args.region - if regions is None: - regions = get_regions(args.survey, rec=bool(args.rec_type)) - - option = None - if args.zlim is None: - zlims = get_zlims(tracer, tracer2=tracer2) - elif not args.zlim[0].replace('.', '').isdigit(): - option = args.zlim[0] - zlims = get_zlims(tracer, tracer2=tracer2, option=option) - else: - zlims = [float(zlim) for zlim in args.zlim] - - - if args.maglim is not None: - magmin = float(args.maglim[0]) - magmax = float(args.maglim[1]) - maglims = (magmin,magmax) - else: - maglims = None - - zlims = list(zip(zlims[:-1], zlims[1:])) + ([(zlims[0], zlims[-1])] if len(zlims) > 2 else []) # len(zlims) == 2 == single redshift range - rebinning_factors = [1, 4, 5, 10] if 'lin' in args.bin_type else [1, 2, 4] - pi_rebinning_factors = [1, 4, 5, 10] if 'log' in args.bin_type else [1] - if mpicomm is None or mpicomm.rank == mpiroot: - logger.info('Computing correlation functions {} in regions {} in redshift ranges {}.'.format(args.corr_type, regions, zlims)) - - for zmin, zmax in zlims: - base_file_kwargs = dict(tracer=tracer, tracer2=tracer2, zmin=zmin, zmax=zmax, rec_type=args.rec_type, weight_type=args.weight_type, bin_type=args.bin_type, njack=args.njack, nrandoms=args.nran, split_randoms_above=args.split_ran_above, option=option) - for region in regions: - wang = None - for corr_type in args.corr_type: - if mpicomm is None or mpicomm.rank == mpiroot: - logger.info('Computing correlation function {} in region {} in redshift range {}.'.format(corr_type, region, (zmin, zmax))) - edges = get_edges(corr_type=corr_type, bin_type=args.bin_type) - result, wang = compute_correlation_function(corr_type, edges=edges, distance=distance, nrandoms=args.nran, split_randoms_above=args.split_ran_above, nthreads=args.nthreads, region=region, zlim=(zmin, zmax), maglim=maglims, weight_type=args.weight_type, njack=args.njack, wang=wang, mpicomm=mpicomm, mpiroot=mpiroot, option=option, **catalog_kwargs) - # Save pair counts - if mpicomm is None or mpicomm.rank == mpiroot: - result.save(corr_fn(file_type='npy', region=region, out_dir=os.path.join(out_dir, corr_type), **base_file_kwargs)) - if mpicomm is None or mpicomm.rank == mpiroot: - if wang is not None: - for name in wang: - if wang[name] is not None: - wang[name].save(corr_fn(file_type='npy', region=region, out_dir=os.path.join(out_dir, 'wang'), **base_file_kwargs, wang=name)) - - # Save combination and .txt files - for corr_type in args.corr_type: - all_regions = regions.copy() - if mpicomm is None or mpicomm.rank == mpiroot: - if 'N' in regions and 'S' in regions: # let's combine - result = sum([TwoPointCorrelationFunction.load( - corr_fn(file_type='npy', region=region, out_dir=os.path.join(out_dir, corr_type), **base_file_kwargs)).normalize() for region in ['N', 'S']]) - result.save(corr_fn(file_type='npy', region='NScomb', out_dir=os.path.join(out_dir, corr_type), **base_file_kwargs)) - all_regions.append('NScomb') - if args.rebinning: - for region in all_regions: - txt_kwargs = base_file_kwargs.copy() - txt_kwargs.update(region=region, out_dir=os.path.join(out_dir, corr_type)) - result = TwoPointCorrelationFunction.load(corr_fn(file_type='npy', **txt_kwargs)) - for factor in rebinning_factors: - #result = TwoPointEstimator.load(fn) - rebinned = result[:(result.shape[0] // factor) * factor:factor] - txt_kwargs.update(bin_type=args.bin_type+str(factor)) - if corr_type == 'smu': - fn_txt = corr_fn(file_type='xismu', **txt_kwargs) - rebinned.save_txt(fn_txt) - fn_txt = corr_fn(file_type='xipoles', **txt_kwargs) - rebinned.save_txt(fn_txt, ells=(0, 2, 4)) - fn_txt = corr_fn(file_type='xiwedges', **txt_kwargs) - rebinned.save_txt(fn_txt, wedges=(-1., -2./3, -1./3, 0., 1./3, 2./3, 1.)) - elif corr_type == 'rppi': - fn_txt = corr_fn(file_type='wp', **txt_kwargs) - rebinned.save_txt(fn_txt, pimax=40.) - for pifac in pi_rebinning_factors: - rebinned = result[:(result.shape[0]//factor)*factor:factor,:(result.shape[1]//pifac)*pifac:pifac] - txt_kwargs.update(bin_type=args.bin_type+str(factor)+'_'+str(pifac)) - fn_txt = corr_fn(file_type='xirppi', **txt_kwargs) - rebinned.save_txt(fn_txt) - elif corr_type == 'theta': - fn_txt = corr_fn(file_type='theta', **txt_kwargs) - rebinned.save_txt(fn_txt) - - if args.vis: - if corr_type == 'smu': - sep, xis = rebinned(ells=(0, 2, 4), return_sep=True, return_std=False) - elif corr_type == 'rppi': - sep, xis = rebinned(pimax=40, return_sep=True, return_std=False) - else: - sep, xis = rebinned(return_sep=True, return_std=False) - if args.bin_type == 'log': - for xi in xis: plt.loglog(sep, xi) - if args.bin_type == 'lin': - for xi in xis: plt.plot(sep, sep**2 * xi) - tracers = tracer - if tracer2 is not None: tracers += ' x ' + tracer2 - plt.title('{} {:.2f} < z {:.2f} in {}'.format(tracers, zmin, zmax, region)) - plt.show() diff --git a/scripts/xirunpc.py b/scripts/xirunpc.py index 39aca790d..dd0e173f5 100644 --- a/scripts/xirunpc.py +++ b/scripts/xirunpc.py @@ -455,8 +455,6 @@ def compute_correlation_function(corr_type, edges, distance, nthreads=8, dtype=' jack_positions = [np.concatenate([p1, p2], axis=0) for p1, p2 in zip(jack_positions, data_positions2)] if njack >= 2: - print('jack_positions') - print(jack_positions) subsampler = KMeansSubsampler('angular', positions=jack_positions, nsamples=njack, nside=512, random_state=42, position_type='rdd', dtype=dtype, mpicomm=mpicomm, mpiroot=mpiroot) From 96707fc7adf323d98d1ba1d34f52dd496ef3f7a4 Mon Sep 17 00:00:00 2001 From: jalasker Date: Tue, 21 Nov 2023 12:06:18 -0800 Subject: [PATCH 009/297] removed comments and print statements from cosmodesi_io_tools.py --- py/LSS/cosmodesi_io_tools.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index f4b69b52d..54b7d9bfb 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -63,9 +63,7 @@ def get_zlims(tracer, tracer2=None, option=None): logger.warning('extended is no longer a meaningful option') #zlims = [0.8, 1.1, 1.6] if 'smallshells' in option: - zlims = [0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6] - if 'uchuu' in option.lower(): - zlims = [0.88, 1.00, 1.16, 1.34] + zlims = [0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6] if tracer.startswith('QSO'): zlims = [0.8, 1.1, 1.6, 2.1] @@ -146,24 +144,15 @@ def _format_bitweights(bitweights): def get_clustering_positions_weights(catalog, distance, zlim=(0., np.inf),maglim=None, weight_type='default', name='data', return_mask=False, option=None): - try: - logger.info(catalog.shape) - except: - try: - logger.info(len(catalog)) - except: - logger.info('catalog has neither shape nor len') if maglim is None: mask = (catalog['Z'] >= zlim[0]) & (catalog['Z'] < zlim[1]) if maglim is not None: mask = (catalog['Z'] >= zlim[0]) & (catalog['Z'] < zlim[1]) & (catalog['ABSMAG_R'] >= maglim[0]) & (catalog['ABSMAG_R'] < maglim[1]) - logger.info('np.sum(mask) = {0:d}'.format(np.sum(mask))) if option: if 'elgzmask' in option: zmask = ((catalog['Z'] >= 1.49) & (catalog['Z'] < 1.52)) mask &= ~zmask - logger.info('np.sum(mask) = {0:d}'.format(np.sum(mask))) logger.info('Using {:d} rows for {}.'.format(mask.sum(), name)) positions = [catalog['RA'][mask], catalog['DEC'][mask], distance(catalog['Z'][mask])] From 2cfaafc467c7866d7a465c86fc797018878a0c2f Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 10 Jan 2024 14:58:03 -0500 Subject: [PATCH 010/297] Update mkCat_main.py --- scripts/main/mkCat_main.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/scripts/main/mkCat_main.py b/scripts/main/mkCat_main.py index cb24564c2..dd077235c 100644 --- a/scripts/main/mkCat_main.py +++ b/scripts/main/mkCat_main.py @@ -566,7 +566,18 @@ def _wrapper(N): logf.write('added bitweights to data catalogs for '+tp+' '+str(datetime.now())) fn = dirout+type+notqso+'_full'+args.use_map_veto+'.dat.fits' print(fn) - ff = fitsio.read(fn) + ff = Table(fitsio.read(fn)) + try: + ff.remove_columns(['BITWEIGHTS_1','PROBOBS_1','BITWEIGHTS_2','PROBOBS_2']) + print('removed ','BITWEIGHTS_1','PROBOBS_1','BITWEIGHTS_2','PROBOBS_2') + except: + pass + try: + ff.remove_columns(['BITWEIGHTS','PROBOBS']) + print('removed ','BITWEIGHTS','PROBOBS') + except: + pass + if type[:3] != 'BGS': bitf = fitsio.read(mainp.darkbitweightfile) else: From 4671091e2fab69f2c16b3b81bf2e0d5a2efe0be5 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 10 Jan 2024 15:08:40 -0500 Subject: [PATCH 011/297] Update mkCat_main.py --- scripts/main/mkCat_main.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/scripts/main/mkCat_main.py b/scripts/main/mkCat_main.py index dd077235c..529ec86b1 100644 --- a/scripts/main/mkCat_main.py +++ b/scripts/main/mkCat_main.py @@ -567,16 +567,16 @@ def _wrapper(N): fn = dirout+type+notqso+'_full'+args.use_map_veto+'.dat.fits' print(fn) ff = Table(fitsio.read(fn)) - try: - ff.remove_columns(['BITWEIGHTS_1','PROBOBS_1','BITWEIGHTS_2','PROBOBS_2']) - print('removed ','BITWEIGHTS_1','PROBOBS_1','BITWEIGHTS_2','PROBOBS_2') - except: - pass - try: - ff.remove_columns(['BITWEIGHTS','PROBOBS']) - print('removed ','BITWEIGHTS','PROBOBS') - except: - pass + #try: + ff.remove_columns(['BITWEIGHTS_1','PROBOBS_1','BITWEIGHTS_2','PROBOBS_2']) + print('removed ','BITWEIGHTS_1','PROBOBS_1','BITWEIGHTS_2','PROBOBS_2') + #except: + # print('not removing 1/2 bitweights') + #try: + ff.remove_columns(['BITWEIGHTS','PROBOBS']) + print('removed ','BITWEIGHTS','PROBOBS') + #except: + # print('not removing bitweights') if type[:3] != 'BGS': bitf = fitsio.read(mainp.darkbitweightfile) From 501f0c1319f38e3af061c5ce1a1a6939099e5ddb Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 10 Jan 2024 15:10:00 -0500 Subject: [PATCH 012/297] Update mkCat_main.py --- scripts/main/mkCat_main.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/main/mkCat_main.py b/scripts/main/mkCat_main.py index 529ec86b1..476fe0e78 100644 --- a/scripts/main/mkCat_main.py +++ b/scripts/main/mkCat_main.py @@ -568,13 +568,13 @@ def _wrapper(N): print(fn) ff = Table(fitsio.read(fn)) #try: - ff.remove_columns(['BITWEIGHTS_1','PROBOBS_1','BITWEIGHTS_2','PROBOBS_2']) + ff.remove_columns(['BITWEIGHTS_1','PROB_OBS_1','BITWEIGHTS_2','PROB_OBS_2']) print('removed ','BITWEIGHTS_1','PROBOBS_1','BITWEIGHTS_2','PROBOBS_2') #except: # print('not removing 1/2 bitweights') #try: - ff.remove_columns(['BITWEIGHTS','PROBOBS']) - print('removed ','BITWEIGHTS','PROBOBS') + ff.remove_columns(['BITWEIGHTS','PROB_OBS']) + print('removed ','BITWEIGHTS','PROB_OBS') #except: # print('not removing bitweights') From e54f94f321ea165c34dcfc9275c3b4c60747be79 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 10 Jan 2024 15:15:52 -0500 Subject: [PATCH 013/297] Update mkCat_main.py --- scripts/main/mkCat_main.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/scripts/main/mkCat_main.py b/scripts/main/mkCat_main.py index 476fe0e78..974623641 100644 --- a/scripts/main/mkCat_main.py +++ b/scripts/main/mkCat_main.py @@ -567,16 +567,16 @@ def _wrapper(N): fn = dirout+type+notqso+'_full'+args.use_map_veto+'.dat.fits' print(fn) ff = Table(fitsio.read(fn)) - #try: - ff.remove_columns(['BITWEIGHTS_1','PROB_OBS_1','BITWEIGHTS_2','PROB_OBS_2']) - print('removed ','BITWEIGHTS_1','PROBOBS_1','BITWEIGHTS_2','PROBOBS_2') - #except: - # print('not removing 1/2 bitweights') - #try: - ff.remove_columns(['BITWEIGHTS','PROB_OBS']) - print('removed ','BITWEIGHTS','PROB_OBS') - #except: - # print('not removing bitweights') + try: + ff.remove_columns(['BITWEIGHTS_1','PROB_OBS_1','BITWEIGHTS_2','PROB_OBS_2']) + print('removed ','BITWEIGHTS_1','PROBOBS_1','BITWEIGHTS_2','PROBOBS_2') + except: + print('not removing 1/2 bitweights') + try: + ff.remove_columns(['BITWEIGHTS','PROB_OBS']) + print('removed ','BITWEIGHTS','PROB_OBS') + except: + print('not removing bitweights') if type[:3] != 'BGS': bitf = fitsio.read(mainp.darkbitweightfile) From e6f4303e60faa79691f975f31033eb15b6d846c1 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 10 Jan 2024 15:26:56 -0500 Subject: [PATCH 014/297] Update LSSpipe_Y1.txt --- Sandbox/LSSpipe_Y1.txt | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Sandbox/LSSpipe_Y1.txt b/Sandbox/LSSpipe_Y1.txt index d9c2148e5..0248651ef 100644 --- a/Sandbox/LSSpipe_Y1.txt +++ b/Sandbox/LSSpipe_Y1.txt @@ -218,3 +218,11 @@ srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/mai srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type BGS_BRIGHT-21.5 --fulld n --survey Y1 --verspec iron --version v1 --clusd y --clusran y --splitGC y --nz y --par y --imsys_colname WEIGHT_IMLIN --basedir /global/cfs/cdirs/desi/survey/catalogs/ +#jan 10th re-add bitweights +python scripts/main/mkCat_main.py --type BGS_BRIGHT --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --add_bitweight y --verspec iron --survey Y1 --version v1 + +python scripts/main/mkCat_main.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --add_bitweight y --verspec iron --survey Y1 --version v1 + +python scripts/main/mkCat_main.py --type ELG_LOP --notqso y --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --add_bitweight y --verspec iron --survey Y1 --version v1 + +python scripts/main/mkCat_main.py --type QSO --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --add_bitweight y --verspec iron --survey Y1 --version v1 From 9cf668613686fba9a28174de377c0535a0cd95b2 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 11 Jan 2024 15:14:45 -0500 Subject: [PATCH 015/297] v3_1 complete --- scripts/mock_tools/pota2clus_fast.py | 4 ++-- scripts/mock_tools/process2genabv3_1_pota2clus.sh | 12 ++++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) create mode 100755 scripts/mock_tools/process2genabv3_1_pota2clus.sh diff --git a/scripts/mock_tools/pota2clus_fast.py b/scripts/mock_tools/pota2clus_fast.py index 3915106ff..007e24150 100644 --- a/scripts/mock_tools/pota2clus_fast.py +++ b/scripts/mock_tools/pota2clus_fast.py @@ -63,7 +63,7 @@ parser.add_argument("--specdata_dir",help="where to find the spec data ",default='/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/') parser.add_argument("--minr", help="minimum number for random files",default=0,type=int) parser.add_argument("--maxr", help="maximum for random files, default is all 18)",default=18,type=int) -parser.add_argument("--mockver", default='AbacusSummit_v3', help = "which mocks to use") +parser.add_argument("--mockver", default='AbacusSummit_v3_1', help = "which mocks to use") parser.add_argument("--mockcatver", default=None, help = "if not None, gets added to the output path") parser.add_argument("--tracer", default = 'all') @@ -98,7 +98,7 @@ if args.tracer == 'all': - tracers = ['LRG','ELG_LOP','QSO'] + tracers = ['QSO','LRG','ELG_LOP'] else: tracers = [args.tracer] diff --git a/scripts/mock_tools/process2genabv3_1_pota2clus.sh b/scripts/mock_tools/process2genabv3_1_pota2clus.sh new file mode 100755 index 000000000..ef50eddf5 --- /dev/null +++ b/scripts/mock_tools/process2genabv3_1_pota2clus.sh @@ -0,0 +1,12 @@ +#!/bin/bash +source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main +PYTHONPATH=$PYTHONPATH:$HOME/LSS/py +for (( i=$1;i<=$2;i++ )) +do + srun -N 1 -C cpu -t 00:45:00 --qos interactive --account desi python scripts/mock_tools/pota2clus_fast.py --realization $i + mv $SCRATCH/AbacusSummit_v3_1/mock$i/*GC* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/mock$i/ + mv $SCRATCH/AbacusSummit_v3_1/mock$i/*nz* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/mock$i/ + rm $SCRATCH/AbacusSummit_v3_1/mock$i/* + chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/mock$i/*clustering* + chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/mock$i/*nz* +done \ No newline at end of file From 660ac8b50f05e449115aad7891e5398a81fed000 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 11 Jan 2024 15:20:47 -0500 Subject: [PATCH 016/297] Update getpotaY1_mock.py --- scripts/getpotaY1_mock.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/getpotaY1_mock.py b/scripts/getpotaY1_mock.py index b389ca6cd..ef3a12137 100644 --- a/scripts/getpotaY1_mock.py +++ b/scripts/getpotaY1_mock.py @@ -291,7 +291,7 @@ def parse_datetime(s): try: return datetime.strptime(s, "%Y-%m-%dT%H:%M:%S%z") except ValueError: - d = datetime.strptime(rundate, "%Y-%m-%dT%H:%M:%S") + d = datetime.strptime(s, "%Y-%m-%dT%H:%M:%S") # msg = "Requested run date '{}' is not timezone-aware. Assuming UTC.".format(runtime) d = d.replace(tzinfo=timezone.utc) From a4f175ed2c2f56cb3ac1ff635bc09e1e52270c07 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Sat, 13 Jan 2024 12:02:18 -0500 Subject: [PATCH 017/297] angular clustering validation plots --- scripts/validation/validation_cl_cluszbin.py | 257 +++++++++++++++++ scripts/validation/validation_clandwtheta.py | 279 +++++++++++++++++++ 2 files changed, 536 insertions(+) create mode 100644 scripts/validation/validation_cl_cluszbin.py create mode 100644 scripts/validation/validation_clandwtheta.py diff --git a/scripts/validation/validation_cl_cluszbin.py b/scripts/validation/validation_cl_cluszbin.py new file mode 100644 index 000000000..1efb9a4b5 --- /dev/null +++ b/scripts/validation/validation_cl_cluszbin.py @@ -0,0 +1,257 @@ +import matplotlib.pyplot as plt +import numpy as np +import os +import sys +import argparse + +import fitsio +from astropy.table import join,Table +import healpy as hp + +from LSS.imaging import densvar +import LSS.common_tools as common + + +parser = argparse.ArgumentParser() +parser.add_argument("--version", help="catalog version",default='v1/unblinded') +parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='Y1') +parser.add_argument("--tracers", help="all runs all for given survey",default='all') +parser.add_argument("--verspec",help="version for redshifts",default='iron') +parser.add_argument("--data",help="LSS or mock directory",default='LSS') +parser.add_argument("--ps",help="point size for density map",default=1,type=float) +parser.add_argument("--dpi",help="resolution in saved density map in dots per inch",default=90,type=int) +args = parser.parse_args() + + +indir = '/global/cfs/cdirs/desi/survey/catalogs/'+args.survey+'/'+args.data+'/'+args.verspec+'/LSScats/'+args.version+'/' +outdir = indir+'plots/angular_power/' +if not os.path.exists(outdir): + os.makedirs(outdir) + +randir = '/global/cfs/cdirs/desi/target/catalogs/dr9/0.49.0/randoms/resolve/' +ranall = fitsio.read(randir+'randoms-allsky-1-0.fits',columns=['RA','DEC']) +th,phi = densvar.radec2thphi(ranall['RA'],ranall['DEC']) +ranpix = hp.ang2pix(256,th,phi) +ranpall = np.zeros(12*256*256) +for pix in ranpix: + ranpall[pix] += 1. + +tps = [args.tracers] +if args.tracers == 'all': + tps = ['LRG','ELG_LOPnotqso','QSO','BGS_BRIGHT'] + +pix_list = np.arange(12*256*256) +th,phi = hp.pix2ang(256,pix_list) +ra,dec = densvar.thphi2radec(th,phi) + + +def get_delta(tp,zmin,zmax,reg,racol='RA',decol='DEC',wts='default',thresh=0,nest=False,appfrac=True,maskreg=None):#,ranpall=None + dat = fitsio.read(indir+tp+reg+'_clustering.dat.fits') + sel = dat['Z'] > zmin + sel &= dat['Z'] < zmax + dat = dat[sel] + ran = fitsio.read(indir+tp+reg+'_0_clustering.ran.fits') + sel = ran['Z'] > zmin + sel &= ran['Z'] < zmax + ran = ran[sel] + + th,phi = densvar.radec2thphi(dat[racol],dat[decol]) + datpix = hp.ang2pix(256,th,phi,nest=nest) + datp = np.zeros(12*256*256) + for i in range(0,len(datpix)): + pix = datpix[i] + if wts == 'default': + datp[pix] += dat[i]['WEIGHT'] + #else: + # datp[pix] += 1. + th,phi = densvar.radec2thphi(ran[racol],ran[decol]) + ranpix = hp.ang2pix(256,th,phi,nest=nest) + ranp = np.zeros(12*256*256) + for i in range(0,len(ranpix)): + pix = ranpix[i] + if wts == 'default': + ranp[pix] += ran[i]['WEIGHT'] + if nest: + frac = ranp/ranpall_nest + else: + frac = ranp/ranpall + seltest = (frac*0 == 0) + print('the range for pixel fraction is '+str(min(frac[seltest])),str(max(frac[seltest]))) + sel_thresh = frac > thresh + sel_thresh &= seltest + + mnr = np.sum(datp[sel_thresh])/np.sum(ranp[sel_thresh]) + print('mean data/random is '+str(mnr)) + delta = (datp/ranp/mnr -1) + delta *= frac + delta[~sel_thresh] = hp.UNSEEN + fsky = np.sum(ranp[sel_thresh])/np.sum(ranpall) + Ngal = np.sum(datp[sel_thresh]) + return delta,fsky,frac,Ngal + +zdw = '' + +regl = ['_NGC','_SGC'] + +tpl = ['LRG','LRG','LRG','ELG_LOPnotqso','ELG_LOPnotqso','QSO','BGS_BRIGHT-21.5'] +zrl = [(0.4,0.6),(0.6,0.8),(0.8,1.1),(0.8,1.1),(1.1,1.6),(0.8,2.1),(0.1,0.4)] + +for reg in regl: + for i in range(0,len(tpl)): + tpi = tpl[i] + zri = zrl[i] + delta_i,fsky_i,frac_i,Ngal_i = get_delta(tpi,zri[0],zri[1],reg) + for j in range(i,len(tpl)): + tpj = tpl[j] + zrj = zrl[j] + fname_out = outdir + tpi+'zr'+str(zri[0])+'-'+str(zri[1])+'_cross_'+tpj+'zr'+str(zrj[0])+'-'+str(zrj[1])+reg + delta_j,fsky_j,frac_j,Ngal_j = get_delta(tpj,zrj[0],zrj[1],reg) + cl_ij = hp.anafast(delta_i,delta_j) + lmax = -300 + fsky_eff = np.sqrt(fsky_i*fsky_j) #I doubt this is actually correct...should somehow be cross-correlation of mask? + Neff = np.sqrt(Ngal_i*Ngal_j) #I also doubt this is correct... + plt.loglog(ell[1:lmax],cl_zr[1:lmax]/fsky_eff-4.*np.pi*fsky/Neff) + plt.title(reg.strip('_')+' ' tpi+' '+str(zri[0])+' zmin +# sel_zr &= dtfoz['Z_not4clus'] < zmax +# delta_raw,fsky,frac = get_delta(dtf,ran,maskreg=maskreg) +# cl_raw = hp.anafast(delta_raw) +# ell = np.arange(len(cl_raw)) +# delta_allz,_,_ = get_delta(dtfoz,ran,wts=wt,maskreg=maskreg) +# cl_allz = hp.anafast(delta_allz) +# delta_zr,_,_ = get_delta(dtfoz[sel_zr],ran,wts=wt[sel_zr],maskreg=maskreg) +# cl_zr = hp.anafast(delta_zr) +# print(len(dtf),np.sum(wt),np.sum(wt[sel_zr])) +# neff_oz = (np.sum(wt)+len(dtfoz))/2. +# neff_zr = (np.sum(wt[sel_zr])+len(dtfoz[sel_zr]))/2. +# lmax = -300 +# plt.loglog(ell[1:lmax],cl_raw[1:lmax]/fsky-4.*np.pi*fsky/len(dtf),label='targets in Y1 area') +# plt.loglog(ell[1:lmax],cl_allz[1:lmax]/fsky-4.*np.pi*fsky/neff_oz,label='all z') +# plt.loglog(ell[1:lmax],cl_zr[1:lmax]/fsky-4.*np.pi*fsky/neff_zr,label=str(zmin)+' < z < '+str(zmax)) +# plt.title(tp) +# plt.legend() +# plt.xlabel(r'$\ell$') +# plt.ylabel(r'$C_{\ell}$') +# plt.savefig(outdir+tp+'_cell.png') +# plt.clf() +# print('doing w(theta)') +# sel = delta_raw != hp.UNSEEN +# angl,wth_raw = get_wtheta_auto(sindec[sel],cosdec[sel],sinra[sel],cosra[sel],delta_raw[sel],frac[sel]) +# _,wth_allz = get_wtheta_auto(sindec[sel],cosdec[sel],sinra[sel],cosra[sel],delta_allz[sel],frac[sel]) +# _,wth_zr = get_wtheta_auto(sindec[sel],cosdec[sel],sinra[sel],cosra[sel],delta_zr[sel],frac[sel]) +# +# plt.plot(angl[:-1],1000*angl[:-1]*wth_raw[:-1],label='targets in Y1 area') +# plt.plot(angl[:-1],1000*angl[:-1]*wth_allz[:-1],label='all z') +# plt.plot(angl[:-1],1000*angl[:-1]*wth_zr[:-1],label=str(zmin)+' < z < '+str(zmax)) +# plt.grid() +# plt.title(tp) +# plt.legend() +# plt.xlabel(r'$\theta$') +# plt.ylabel(r'$\theta\times w(\theta)\times 10^3$') +# plt.savefig(outdir+tp+'_wth.png') +# plt.clf() +# +# +# regl = list(maskreg.keys()) +# cls = [] +# cls_raw = [] +# wths = [] +# wths_raw = [] +# fskys = [] +# for reg in regl: +# maskr = maskreg[reg] +# delta_reg,fsky_reg,frac = get_delta(dtfoz[sel_zr],ran,wts=wt[sel_zr],maskreg=maskr) +# delta_reg_raw,_,_ = get_delta(dtf,ran,maskreg=maskr) +# cl_reg = hp.anafast(delta_reg) +# cls.append(cl_reg) +# cl_reg_raw = hp.anafast(delta_reg_raw) +# cls_raw.append(cl_reg_raw) +# +# fskys.append(fsky_reg) +# sel = delta_reg != hp.UNSEEN +# _,wth_reg = get_wtheta_auto(sindec[sel],cosdec[sel],sinra[sel],cosra[sel],delta_reg[sel],frac[sel]) +# wths.append(wth_reg) +# _,wth_reg_raw = get_wtheta_auto(sindec[sel],cosdec[sel],sinra[sel],cosra[sel],delta_reg_raw[sel],frac[sel]) +# wths_raw.append(wth_reg_raw) +# +# for cl,reg,fsky in zip(cls,regl,fskys): +# plt.loglog(ell[1:],cl[1:]/fsky,label=reg) +# plt.title(tp+' '+str(zmin)+' < z < '+str(zmax)) +# plt.legend() +# plt.xlabel(r'$\ell$') +# plt.ylabel(r'$C_{\ell}$') +# plt.savefig(outdir+tp+'_cell_reg.png') +# plt.clf() +# +# for cl,reg,fsky in zip(cls_raw,regl,fskys): +# plt.loglog(ell[1:],cl[1:]/fsky,label=reg) +# plt.title(tp+' targets in Y1') +# plt.legend() +# plt.xlabel(r'$\ell$') +# plt.ylabel(r'$C_{\ell}$') +# plt.savefig(outdir+tp+'_cell_regtar.png') +# plt.clf() +# +# +# for wth,reg in zip(wths,regl): +# plt.plot(angl[:-1],1000*angl[:-1]*wth[:-1],label=reg) +# plt.title(tp+' '+str(zmin)+' < z < '+str(zmax)) +# plt.legend() +# plt.xlabel(r'$\theta$') +# plt.ylabel(r'$\theta\times w(\theta)\times 10^3$') +# plt.savefig(outdir+tp+'_wth_reg.png') +# plt.clf() +# +# for wth,reg in zip(wths_raw,regl): +# plt.plot(angl[:-1],1000*angl[:-1]*wth[:-1],label=reg) +# plt.title(tp+' targets in Y1') +# plt.legend() +# plt.xlabel(r'$\theta$') +# plt.ylabel(r'$\theta\times w(\theta)\times 10^3$') +# plt.savefig(outdir+tp+'_wth_regtar.png') +# plt.clf() +# +# +# +# \ No newline at end of file diff --git a/scripts/validation/validation_clandwtheta.py b/scripts/validation/validation_clandwtheta.py new file mode 100644 index 000000000..739cca0b8 --- /dev/null +++ b/scripts/validation/validation_clandwtheta.py @@ -0,0 +1,279 @@ +import matplotlib.pyplot as plt +import numpy as np +import os +import sys +import argparse + +import fitsio +from astropy.table import join,Table +import healpy as hp + +from LSS.imaging import densvar +import LSS.common_tools as common + + +parser = argparse.ArgumentParser() +parser.add_argument("--version", help="catalog version",default='test') +parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='Y1') +parser.add_argument("--tracers", help="all runs all for given survey",default='all') +parser.add_argument("--verspec",help="version for redshifts",default='iron') +parser.add_argument("--data",help="LSS or mock directory",default='LSS') +parser.add_argument("--ps",help="point size for density map",default=1,type=float) +parser.add_argument("--dpi",help="resolution in saved density map in dots per inch",default=90,type=int) +args = parser.parse_args() + + +indir = '/global/cfs/cdirs/desi/survey/catalogs/'+args.survey+'/'+args.data+'/'+args.verspec+'/LSScats/'+args.version+'/' +outdir = indir+'plots/sky/' + +randir = '/global/cfs/cdirs/desi/target/catalogs/dr9/0.49.0/randoms/resolve/' +ranall = fitsio.read(randir+'randoms-allsky-1-0.fits',columns=['RA','DEC']) +th,phi = densvar.radec2thphi(ranall['RA'],ranall['DEC']) +ranpix = hp.ang2pix(256,th,phi) +ranpall = np.zeros(12*256*256) +for pix in ranpix: + ranpall[pix] += 1. + +tps = [args.tracers] +if args.tracers == 'all': + tps = ['LRG','ELG_LOPnotqso','QSO','BGS_BRIGHT'] + +pix_list = np.arange(12*256*256) +th,phi = hp.pix2ang(256,pix_list) +ra,dec = densvar.thphi2radec(th,phi) + +sindec = np.sin(dec*np.pi/180.) +cosdec = np.cos(dec*np.pi/180.) + +sinra = np.sin(ra*np.pi/180.) +cosra = np.cos(ra*np.pi/180.) + +def get_wtheta_auto(sindec,cosdec,sinra,cosra,odens,frac,thmin=0,thmax=10,bs=.1): + ''' + sines and cosines of ra,dec coordinates, already cut by whatever masking + overdensity (in same pixels) + fractional area of same pixels + ''' + odens /= frac #because it got multiplied by frac for cl + fo = open('tempodenspczw.dat','w') + for i in range(len(sindec)): + fo.write(str(sinra[i])+' '+str(cosra[i])+' '+str(sindec[i])+' '+str(cosdec[i])+' '+str(odens[i])+' '+str(frac[i])+'\n ') + fo.close() + os.system('/global/homes/a/ajross/code/LSSanalysis/pix2p_linbin_test temp 1 '+str(bs)+' '+str(thmax)) + res = np.loadtxt('temp2ptPixclb.dat').transpose() +# nbin = int((thmax-thmin)/bs) +# odl = np.zeros(nbin) +# fracl = np.zeros(nbin) +# binedges = []#np.zeros(nbin+1) +# th = thmax +# while th > thmin: +# be = np.cos(th*np.pi/180.) +# binedges.append(be) +# th -= 0.1 +# print(len(binedges),len(odl)) +# bin_angs = np.flip(np.arange(thmin+bs/2.,thmax,bs)) +# for ii in range(0,len(sindec)): +# for jj in range(ii+1,len(cosdec)): +# cosang = cosdec[ii]*cosdec[jj]*(cosra[ii]*cosra[jj] + sinra[ii]*sinra[jj]) + sindec[ii]*sindec[jj] +# be = binedges[0] +# ba = -1 #start at -1 because of condition below +# while cosang > be: +# ba += 1 +# be = binedges[ba+1] +# +# if ba > -1 and ba < nbin: +# odl[ba] += odens[ii]*odens[jj] #note, frac was already applied to odl +# fracl[ba] += frac[ii]*frac[jj] + return res[0],res[1]#bin_angs,odl/fracl + +def get_delta(dat,ran,racol='RA',decol='DEC',wts=None,wtspix=None,thresh=0,nest=False,appfrac=True,maskreg=None):#,ranpall=None + th,phi = densvar.radec2thphi(dat[racol],dat[decol]) + datpix = hp.ang2pix(256,th,phi,nest=nest) + datp = np.zeros(12*256*256) + for i in range(0,len(datpix)): + pix = datpix[i] + if wts is not None: + datp[pix] += wts[i] + else: + datp[pix] += 1. + if wtspix is not None: + datp *= wtspix + th,phi = densvar.radec2thphi(ran[racol],ran[decol]) + ranpix = hp.ang2pix(256,th,phi,nest=nest) + ranp = np.zeros(12*256*256) + for pix in ranpix: + ranp[pix] += 1. + #ranp /= rannorm + + sel = ranp > thresh + if maskreg is None: + mnr = np.sum(datp[sel])/np.sum(ranp[sel]) + print(mnr) + delta = (datp/ranp/mnr -1) + elif len(maskreg)==len(datp): + if nest == False: + maskreg = hp.reorder(maskreg,n2r=True) + + sel &= maskreg + mnr = np.sum(datp[sel])/np.sum(ranp[sel]) + delta = (datp/ranp/mnr -1) + + else: + regl = list(maskreg.keys())#['South','North','Des'] + delta = np.zeros(len(datp)) + for reg in regl: + mr = maskreg[reg] + if nest == False: + mr = hp.reorder(mr,n2r=True) + + mnr = np.sum(datp[sel&mr])/np.sum(ranp[sel&mr]) + print(reg,mnr) + delta[mr] = (datp[mr]/ranp[mr]/mnr -1) + #if ranpall is not None: + #if appfrac: + if nest: + frac = ranp/ranpall_nest + else: + frac = ranp/ranpall + delta *= frac + delta[~sel] = hp.UNSEEN + fsky = np.sum(ranp[sel])/np.sum(ranpall) + return delta,fsky,frac + +zdw = '' + +for tp in tps: + print('doing '+tp) + dtf = fitsio.read(indir+tp+zdw+'_full.dat.fits') + ran = fitsio.read(indir+tp+zdw+'_0_full.ran.fits') + fnreg = indir+'/regressis_data/main_'+tp+'_256/RF/main_'+tp+'_imaging_weight_256.npy' + rfw = np.load(fnreg,allow_pickle=True) + maskreg = rfw.item()['mask_region'] + + #seld = dtf['PHOTSYS'] == reg + #dtf = dtf[seld] + sel_gz = common.goodz_infull(tp[:3],dtf) + sel_obs = dtf['ZWARN'] != 999999 + dtfoz = dtf[sel_obs&sel_gz] + wt = 1./dtfoz['FRACZ_TILELOCID']*dtfoz['WEIGHT_ZFAIL']*dtfoz['WEIGHT_SYS'] + if 'FRAC_TLOBS_TILES' in list(dtfoz.dtype.names): + print('using FRAC_TLOBS_TILES') + wt *= 1/dtfoz['FRAC_TLOBS_TILES'] + + sel_nan = wt*0 != 0 + wt[sel_nan] = 1. + if tp[:3] == 'LRG': + zmin = 0.4 + zmax = 1.1 + if tp[:3] == 'BGS': + zmin = 0.1 + zmax = 0.4 + if tp[:3] == 'ELG': + zmin = 0.8 + zmax = 1.6 + if tp[:3] == 'QSO': + zmin = 0.8 + zmax = 2.1 + + sel_zr = dtfoz['Z_not4clus'] > zmin + sel_zr &= dtfoz['Z_not4clus'] < zmax + delta_raw,fsky,frac = get_delta(dtf,ran,maskreg=maskreg) + cl_raw = hp.anafast(delta_raw) + ell = np.arange(len(cl_raw)) + delta_allz,_,_ = get_delta(dtfoz,ran,wts=wt,maskreg=maskreg) + cl_allz = hp.anafast(delta_allz) + delta_zr,_,_ = get_delta(dtfoz[sel_zr],ran,wts=wt[sel_zr],maskreg=maskreg) + cl_zr = hp.anafast(delta_zr) + print(len(dtf),np.sum(wt),np.sum(wt[sel_zr])) + neff_oz = (np.sum(wt)+len(dtfoz))/2. + neff_zr = (np.sum(wt[sel_zr])+len(dtfoz[sel_zr]))/2. + lmax = -300 + plt.loglog(ell[1:lmax],cl_raw[1:lmax]/fsky-4.*np.pi*fsky/len(dtf),label='targets in Y1 area') + plt.loglog(ell[1:lmax],cl_allz[1:lmax]/fsky-4.*np.pi*fsky/neff_oz,label='all z') + plt.loglog(ell[1:lmax],cl_zr[1:lmax]/fsky-4.*np.pi*fsky/neff_zr,label=str(zmin)+' < z < '+str(zmax)) + plt.title(tp) + plt.legend() + plt.xlabel(r'$\ell$') + plt.ylabel(r'$C_{\ell}$') + plt.savefig(outdir+tp+'_cell.png') + plt.clf() + print('doing w(theta)') + sel = delta_raw != hp.UNSEEN + angl,wth_raw = get_wtheta_auto(sindec[sel],cosdec[sel],sinra[sel],cosra[sel],delta_raw[sel],frac[sel]) + _,wth_allz = get_wtheta_auto(sindec[sel],cosdec[sel],sinra[sel],cosra[sel],delta_allz[sel],frac[sel]) + _,wth_zr = get_wtheta_auto(sindec[sel],cosdec[sel],sinra[sel],cosra[sel],delta_zr[sel],frac[sel]) + + plt.plot(angl[:-1],1000*angl[:-1]*wth_raw[:-1],label='targets in Y1 area') + plt.plot(angl[:-1],1000*angl[:-1]*wth_allz[:-1],label='all z') + plt.plot(angl[:-1],1000*angl[:-1]*wth_zr[:-1],label=str(zmin)+' < z < '+str(zmax)) + plt.grid() + plt.title(tp) + plt.legend() + plt.xlabel(r'$\theta$') + plt.ylabel(r'$\theta\times w(\theta)\times 10^3$') + plt.savefig(outdir+tp+'_wth.png') + plt.clf() + + + regl = list(maskreg.keys()) + cls = [] + cls_raw = [] + wths = [] + wths_raw = [] + fskys = [] + for reg in regl: + maskr = maskreg[reg] + delta_reg,fsky_reg,frac = get_delta(dtfoz[sel_zr],ran,wts=wt[sel_zr],maskreg=maskr) + delta_reg_raw,_,_ = get_delta(dtf,ran,maskreg=maskr) + cl_reg = hp.anafast(delta_reg) + cls.append(cl_reg) + cl_reg_raw = hp.anafast(delta_reg_raw) + cls_raw.append(cl_reg_raw) + + fskys.append(fsky_reg) + sel = delta_reg != hp.UNSEEN + _,wth_reg = get_wtheta_auto(sindec[sel],cosdec[sel],sinra[sel],cosra[sel],delta_reg[sel],frac[sel]) + wths.append(wth_reg) + _,wth_reg_raw = get_wtheta_auto(sindec[sel],cosdec[sel],sinra[sel],cosra[sel],delta_reg_raw[sel],frac[sel]) + wths_raw.append(wth_reg_raw) + + for cl,reg,fsky in zip(cls,regl,fskys): + plt.loglog(ell[1:],cl[1:]/fsky,label=reg) + plt.title(tp+' '+str(zmin)+' < z < '+str(zmax)) + plt.legend() + plt.xlabel(r'$\ell$') + plt.ylabel(r'$C_{\ell}$') + plt.savefig(outdir+tp+'_cell_reg.png') + plt.clf() + + for cl,reg,fsky in zip(cls_raw,regl,fskys): + plt.loglog(ell[1:],cl[1:]/fsky,label=reg) + plt.title(tp+' targets in Y1') + plt.legend() + plt.xlabel(r'$\ell$') + plt.ylabel(r'$C_{\ell}$') + plt.savefig(outdir+tp+'_cell_regtar.png') + plt.clf() + + + for wth,reg in zip(wths,regl): + plt.plot(angl[:-1],1000*angl[:-1]*wth[:-1],label=reg) + plt.title(tp+' '+str(zmin)+' < z < '+str(zmax)) + plt.legend() + plt.xlabel(r'$\theta$') + plt.ylabel(r'$\theta\times w(\theta)\times 10^3$') + plt.savefig(outdir+tp+'_wth_reg.png') + plt.clf() + + for wth,reg in zip(wths_raw,regl): + plt.plot(angl[:-1],1000*angl[:-1]*wth[:-1],label=reg) + plt.title(tp+' targets in Y1') + plt.legend() + plt.xlabel(r'$\theta$') + plt.ylabel(r'$\theta\times w(\theta)\times 10^3$') + plt.savefig(outdir+tp+'_wth_regtar.png') + plt.clf() + + + + \ No newline at end of file From dfe2ddf0c07d4b8902ec042f501a76aee4b934b3 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Sat, 13 Jan 2024 12:04:25 -0500 Subject: [PATCH 018/297] Update validation_cl_cluszbin.py --- scripts/validation/validation_cl_cluszbin.py | 44 ++++++++++---------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/scripts/validation/validation_cl_cluszbin.py b/scripts/validation/validation_cl_cluszbin.py index 1efb9a4b5..9bbc9271a 100644 --- a/scripts/validation/validation_cl_cluszbin.py +++ b/scripts/validation/validation_cl_cluszbin.py @@ -97,28 +97,28 @@ def get_delta(tp,zmin,zmax,reg,racol='RA',decol='DEC',wts='default',thresh=0,nes zrl = [(0.4,0.6),(0.6,0.8),(0.8,1.1),(0.8,1.1),(1.1,1.6),(0.8,2.1),(0.1,0.4)] for reg in regl: - for i in range(0,len(tpl)): - tpi = tpl[i] - zri = zrl[i] - delta_i,fsky_i,frac_i,Ngal_i = get_delta(tpi,zri[0],zri[1],reg) - for j in range(i,len(tpl)): - tpj = tpl[j] - zrj = zrl[j] - fname_out = outdir + tpi+'zr'+str(zri[0])+'-'+str(zri[1])+'_cross_'+tpj+'zr'+str(zrj[0])+'-'+str(zrj[1])+reg - delta_j,fsky_j,frac_j,Ngal_j = get_delta(tpj,zrj[0],zrj[1],reg) - cl_ij = hp.anafast(delta_i,delta_j) - lmax = -300 - fsky_eff = np.sqrt(fsky_i*fsky_j) #I doubt this is actually correct...should somehow be cross-correlation of mask? - Neff = np.sqrt(Ngal_i*Ngal_j) #I also doubt this is correct... - plt.loglog(ell[1:lmax],cl_zr[1:lmax]/fsky_eff-4.*np.pi*fsky/Neff) - plt.title(reg.strip('_')+' ' tpi+' '+str(zri[0])+' Date: Sat, 13 Jan 2024 12:05:00 -0500 Subject: [PATCH 019/297] Update validation_cl_cluszbin.py --- scripts/validation/validation_cl_cluszbin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/validation/validation_cl_cluszbin.py b/scripts/validation/validation_cl_cluszbin.py index 9bbc9271a..ea738ab4a 100644 --- a/scripts/validation/validation_cl_cluszbin.py +++ b/scripts/validation/validation_cl_cluszbin.py @@ -111,7 +111,7 @@ def get_delta(tp,zmin,zmax,reg,racol='RA',decol='DEC',wts='default',thresh=0,nes fsky_eff = np.sqrt(fsky_i*fsky_j) #I doubt this is actually correct...should somehow be cross-correlation of mask? Neff = np.sqrt(Ngal_i*Ngal_j) #I also doubt this is correct... plt.loglog(ell[1:lmax],cl_zr[1:lmax]/fsky_eff-4.*np.pi*fsky/Neff) - plt.title(reg.strip('_')+' ' tpi+' '+str(zri[0])+' Date: Sat, 13 Jan 2024 12:06:56 -0500 Subject: [PATCH 020/297] Update validation_cl_cluszbin.py --- scripts/validation/validation_cl_cluszbin.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/validation/validation_cl_cluszbin.py b/scripts/validation/validation_cl_cluszbin.py index ea738ab4a..d2677c23d 100644 --- a/scripts/validation/validation_cl_cluszbin.py +++ b/scripts/validation/validation_cl_cluszbin.py @@ -107,6 +107,7 @@ def get_delta(tp,zmin,zmax,reg,racol='RA',decol='DEC',wts='default',thresh=0,nes fname_out = outdir + tpi+'zr'+str(zri[0])+'-'+str(zri[1])+'_cross_'+tpj+'zr'+str(zrj[0])+'-'+str(zrj[1])+reg delta_j,fsky_j,frac_j,Ngal_j = get_delta(tpj,zrj[0],zrj[1],reg) cl_ij = hp.anafast(delta_i,delta_j) + ell = np.arange(len(cl_ij)) lmax = -300 fsky_eff = np.sqrt(fsky_i*fsky_j) #I doubt this is actually correct...should somehow be cross-correlation of mask? Neff = np.sqrt(Ngal_i*Ngal_j) #I also doubt this is correct... From 36fd00ac642e060094b57bb162491baab31df4e0 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Sat, 13 Jan 2024 12:10:06 -0500 Subject: [PATCH 021/297] Update validation_cl_cluszbin.py --- scripts/validation/validation_cl_cluszbin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/validation/validation_cl_cluszbin.py b/scripts/validation/validation_cl_cluszbin.py index d2677c23d..e90c17492 100644 --- a/scripts/validation/validation_cl_cluszbin.py +++ b/scripts/validation/validation_cl_cluszbin.py @@ -111,7 +111,7 @@ def get_delta(tp,zmin,zmax,reg,racol='RA',decol='DEC',wts='default',thresh=0,nes lmax = -300 fsky_eff = np.sqrt(fsky_i*fsky_j) #I doubt this is actually correct...should somehow be cross-correlation of mask? Neff = np.sqrt(Ngal_i*Ngal_j) #I also doubt this is correct... - plt.loglog(ell[1:lmax],cl_zr[1:lmax]/fsky_eff-4.*np.pi*fsky/Neff) + plt.loglog(ell[1:lmax],cl_ij[1:lmax]/fsky_eff-4.*np.pi*fsky/Neff) plt.title(reg.strip('_')+' ' +tpi+' '+str(zri[0])+' Date: Sat, 13 Jan 2024 12:10:58 -0500 Subject: [PATCH 022/297] Update validation_cl_cluszbin.py --- scripts/validation/validation_cl_cluszbin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/validation/validation_cl_cluszbin.py b/scripts/validation/validation_cl_cluszbin.py index e90c17492..d7f91bc0b 100644 --- a/scripts/validation/validation_cl_cluszbin.py +++ b/scripts/validation/validation_cl_cluszbin.py @@ -45,7 +45,7 @@ ra,dec = densvar.thphi2radec(th,phi) -def get_delta(tp,zmin,zmax,reg,racol='RA',decol='DEC',wts='default',thresh=0,nest=False,appfrac=True,maskreg=None):#,ranpall=None +def get_delta(tp,zmin,zmax,reg,racol='RA',decol='DEC',wts='default',thresh=0.1,nest=False,appfrac=True,maskreg=None):#,ranpall=None dat = fitsio.read(indir+tp+reg+'_clustering.dat.fits') sel = dat['Z'] > zmin sel &= dat['Z'] < zmax From a4127610880a3f140d72ecc52d18372269462075 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Sat, 13 Jan 2024 12:13:19 -0500 Subject: [PATCH 023/297] Update validation_cl_cluszbin.py --- scripts/validation/validation_cl_cluszbin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/validation/validation_cl_cluszbin.py b/scripts/validation/validation_cl_cluszbin.py index d7f91bc0b..5474cdee7 100644 --- a/scripts/validation/validation_cl_cluszbin.py +++ b/scripts/validation/validation_cl_cluszbin.py @@ -111,7 +111,7 @@ def get_delta(tp,zmin,zmax,reg,racol='RA',decol='DEC',wts='default',thresh=0.1,n lmax = -300 fsky_eff = np.sqrt(fsky_i*fsky_j) #I doubt this is actually correct...should somehow be cross-correlation of mask? Neff = np.sqrt(Ngal_i*Ngal_j) #I also doubt this is correct... - plt.loglog(ell[1:lmax],cl_ij[1:lmax]/fsky_eff-4.*np.pi*fsky/Neff) + plt.loglog(ell[1:lmax],cl_ij[1:lmax]/fsky_eff-4.*np.pi*fsky_eff/Neff) plt.title(reg.strip('_')+' ' +tpi+' '+str(zri[0])+' Date: Sat, 13 Jan 2024 12:39:23 -0500 Subject: [PATCH 024/297] Update validation_cl_cluszbin.py --- scripts/validation/validation_cl_cluszbin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/validation/validation_cl_cluszbin.py b/scripts/validation/validation_cl_cluszbin.py index 5474cdee7..955d3dad5 100644 --- a/scripts/validation/validation_cl_cluszbin.py +++ b/scripts/validation/validation_cl_cluszbin.py @@ -111,7 +111,7 @@ def get_delta(tp,zmin,zmax,reg,racol='RA',decol='DEC',wts='default',thresh=0.1,n lmax = -300 fsky_eff = np.sqrt(fsky_i*fsky_j) #I doubt this is actually correct...should somehow be cross-correlation of mask? Neff = np.sqrt(Ngal_i*Ngal_j) #I also doubt this is correct... - plt.loglog(ell[1:lmax],cl_ij[1:lmax]/fsky_eff-4.*np.pi*fsky_eff/Neff) + plt.loglog(ell[1:lmax],cl_ij[1:lmax]/fsky_eff)#-4.*np.pi*fsky_eff/Neff) plt.title(reg.strip('_')+' ' +tpi+' '+str(zri[0])+' Date: Tue, 16 Jan 2024 16:00:23 -0500 Subject: [PATCH 025/297] fix bug that left out TARGETID in randoms and add script for cov matrix test --- scripts/mock_tools/ffa2clus_fast.py | 2 +- scripts/mock_tools/testEZcov.py | 183 +++++++++++++++++++ scripts/validation/validation_cl_cluszbin.py | 2 +- 3 files changed, 185 insertions(+), 2 deletions(-) create mode 100644 scripts/mock_tools/testEZcov.py diff --git a/scripts/mock_tools/ffa2clus_fast.py b/scripts/mock_tools/ffa2clus_fast.py index 451667952..e29ebef91 100644 --- a/scripts/mock_tools/ffa2clus_fast.py +++ b/scripts/mock_tools/ffa2clus_fast.py @@ -106,7 +106,7 @@ def splitGC(flroot,datran='.dat',rann=0): fn = Table(fitsio.read(flroot.replace('global','dvs_ro') +app)) if datran == '.ran': - fn.keep_columns(['RA', 'DEC', 'Z', 'WEIGHT', 'WEIGHT_FKP', 'TARGETID_DATA']) + fn.keep_columns(['RA', 'DEC', 'Z', 'WEIGHT', 'WEIGHT_FKP', 'TARGETID_DATA','TARGETID']) #c = SkyCoord(fn['RA']* u.deg,fn['DEC']* u.deg,frame='icrs') #gc = c.transform_to('galactic') sel_ngc = common.splitGC(fn)#gc.b > 0 diff --git a/scripts/mock_tools/testEZcov.py b/scripts/mock_tools/testEZcov.py new file mode 100644 index 000000000..ae62a261c --- /dev/null +++ b/scripts/mock_tools/testEZcov.py @@ -0,0 +1,183 @@ +import numpy as np +import matplotlib as mpl +from matplotlib import pyplot as plt +import sys +import fitsio +import os + +def cat_ells(xis,indrange=None): + if indrange is None: + indrange = [0,len(xis[0])] + xil = [xis[0][indrange[0]:indrange[1]]] + for i in range(1,len(xis)): + xil.append(xis[i][indrange[0]:indrange[1]]) + xic = np.concatenate(xil) + return xic + +def get_xi_cov_desipipe_baseline_txt(smin=20,smax=200,zr='0.4-0.6',tp='LRG',rec='recon_recsym',ells=[0,2,4],Nmock=1000,flavor='ffa',mockversion='v1',thetacut='',mocktype='EZmock'): + from pycorr import TwoPointCorrelationFunction + dirm = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/'+mocktype+'/desipipe/'+mockversion+'/'+flavor+'/baseline_2pt/mock' + xil = [] + sepl = [] + start = 1 + + fnm = dirm +'1/'+rec+'/xi/smu/allcounts_'+tp+'_GCcomb_z'+zr+thetacut+'_d4_poles.txt' #loading first to get binning setup + + rbs = 4 + indmin = smin//rbs + indmax = smax//rbs + rebinned = np.loadtxt(fnm).transpose() + + xis = [] + for ell in ells: + xis.append(rebinned[2+ell//2]) + xin0 = cat_ells(xis,indrange=[indmin,indmax]) + + nbin = len(xin0) + print(nbin) + xiave = np.zeros((nbin)) + cov = np.zeros((nbin,nbin)) + + Ntot = 0 + fac = 1. + for i in range(start,start+Nmock): + nr = str(i) + xinpy = dirm +str(i)+'/'+rec+'/xi/smu/allcounts_'+tp+'_GCcomb_z'+zr+thetacut+'_d4_poles.txt' + if os.path.isfile(xinpy): + rebinned = np.loadtxt(xinpy).transpose() + xis = [] + for ell in ells: + xis.append(rebinned[2+ell//2]) + + xic = cat_ells(xis,indrange=[indmin,indmax]) + xiave += xic + Ntot += 1. + print( Ntot) + xiave = xiave/float(Ntot) + for i in range(1,Nmock+1): + nr = str(i) + xinpy = dirm +str(i)+'/'+rec+'/xi/smu/allcounts_'+tp+'_GCcomb_z'+zr+thetacut+'_d4_poles.txt' + if os.path.isfile(xinpy): + rebinned = np.loadtxt(xinpy).transpose() + xis = [] + for ell in ells: + xis.append(rebinned[2+ell//2]) + + xic = cat_ells(xis,indrange=[indmin,indmax]) + for j in range(0,nbin): + xij = xic[j] + for k in range(0,nbin): + xik = xic[k] + cov[j][k] += (xij-xiave[j])*(xik-xiave[k]) + + cov = cov/float(Ntot) + + return xiave,cov + +def get_xiave_desipipe_ab_baseline(zr='0.4-0.6',tp='LRG',rec='recon_recsym',nmock=25,flavor='complete',mockversion='v3',reg='GCcomb',thetacut=''): + from pycorr import TwoPointCorrelationFunction + dirr = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit/desipipe/'+mockversion+'/'+flavor+'/baseline_2pt/mock' + xil = [] + sepl = [] + + for i in range(0,nmock): + fn = dirr + str(i)+'/'+rec+'/xi/smu/allcounts_'+tp+'_'+reg+'_z'+zr+'_d4_poles.txt' + result = np.loadtxt(fn).transpose() + sep, xis = result[0],result[2:5] + xil.append(xis) + sepl.append(sep) + xi = sum(xil)/nmock + sep = sum(sepl)/nmock + return sep,xi + +def get_xi_desipipe_ab_baseline(mockn,zr='0.4-0.6',tp='LRG',rec='recon_recsym',nmock=25,flavor='complete',mockversion='v3',reg='GCcomb',thetacut=''): + from pycorr import TwoPointCorrelationFunction + dirr = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit/desipipe/'+mockversion+'/'+flavor+'/baseline_2pt/mock' + fn = dirr + str(mockn)+'/'+rec+'/xi/smu/allcounts_'+tp+'_'+reg+'_z'+zr+'_d4_poles.txt' + result = np.loadtxt(fn).transpose() + return result[0],result[2:5] + +def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4]): + indmin = smin//4 + indmax = smax//4 + zr = str(zmin)+'-'+str(zmax) + twa = '' + if tracer == 'ELG_LOP': + twa = 'notqso' + xiave,cov = get_xi_cov_desipipe_baseline_txt(zr=zr,tp=tracer,smin=smin,smax=smax,rec=rec,ells=ells) + sep,xiave_abamtl = get_xiave_desipipe_ab_baseline(zr=zr,tp=tracer+twa,rec=rec,flavor='altmtl',mockversion='v3_1') + _,xiave_abffa = get_xiave_desipipe_ab_baseline(zr=zr,tp=tracer,rec=rec,flavor='ffa',mockversion='v3') + xiave_abamtl_cut = cat_ells(xiave_abamtl[:len(ells)],indrange=[indmin,indmax]) + xiave_abffa_cut = cat_ells(xiave_abffa[:len(ells)],indrange=[indmin,indmax]) + icov = np.linalg.inv(cov) + chi2la = [] + chi2lf = [] + for i in range(0,25): + _,xi_amtl = get_xi_desipipe_ab_baseline(i,zr=zr,tp=tracer+twa,rec=rec,flavor='altmtl',mockversion='v3_1') + xi_amtl_cut = cat_ells(xi_amtl[:len(ells)],indrange=[indmin,indmax]) + damtl = xi_amtl_cut-xiave_abamtl_cut + chi2_amtl = np.dot(damtl,np.dot(damtl,icov)) + chi2la.append(chi2_amtl) + _,xi_ffa = get_xi_desipipe_ab_baseline(i,zr=zr,tp=tracer,rec=rec,flavor='ffa',mockversion='v3') + xi_ffa_cut = cat_ells(xi_ffa[:len(ells)],indrange=[indmin,indmax]) + dffa = xi_ffa_cut-xiave_abffa_cut + chi2_ffa = np.dot(dffa,np.dot(dffa,icov)) + chi2lf.append(chi2_ffa) + #print(i,xi_amtl) + #print(chi2_ffa,chi2_amtl) + meana = np.mean(chi2la) + meanf = np.mean(chi2lf) + fig = plt.figure() + a = plt.hist(chi2lf,histtype='step',label=r'ffa,$\bar{\chi}^2=$'+str(round(meanf,3)),lw=3,color='b') + b = plt.hist(chi2la,histtype='step',label=r'altml,$\bar{\chi}^2=$'+str(round(meana,3)),lw=3,color='r') + plt.plot([meana,meana],[0,max(max(a[0]),max(b[0]))],'r:') + + plt.plot([meanf,meanf],[0,max(max(a[0]),max(b[0]))],'b:') + + titl = tracer+' '+str(zmin)+' Date: Tue, 16 Jan 2024 16:02:45 -0500 Subject: [PATCH 026/297] Update testEZcov.py --- scripts/mock_tools/testEZcov.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/mock_tools/testEZcov.py b/scripts/mock_tools/testEZcov.py index ae62a261c..7d3ff6167 100644 --- a/scripts/mock_tools/testEZcov.py +++ b/scripts/mock_tools/testEZcov.py @@ -155,7 +155,7 @@ def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4]): zrl = [(0.8,2.1)] for rec in recl: for zr in zrl: - for ells in ellsl + for ells in ellsl: fig = compchi2stats(tp,zr[0],zr[1],20,200,rec=rec,ells=ells) figs.append(fig) @@ -163,7 +163,7 @@ def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4]): zrl = [(0.8,1.1),(1.1,1.6)] for rec in recl: for zr in zrl: - for ells in ellsl + for ells in ellsl: fig = compchi2stats(tp,zr[0],zr[1],20,200,rec=rec,ells=ells) figs.append(fig) @@ -171,7 +171,7 @@ def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4]): zrl = [(0.4,0.6),(0.6,0.8),(0.8,1.1)] for rec in recl: for zr in zrl: - for ells in ellsl + for ells in ellsl: fig = compchi2stats(tp,zr[0],zr[1],20,200,rec=rec,ells=ells) figs.append(fig) From 5647eb0579bf6deb2c1bb890e8af935a5037e13f Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 16 Jan 2024 16:03:24 -0500 Subject: [PATCH 027/297] Update testEZcov.py --- scripts/mock_tools/testEZcov.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/scripts/mock_tools/testEZcov.py b/scripts/mock_tools/testEZcov.py index 7d3ff6167..99a493d41 100644 --- a/scripts/mock_tools/testEZcov.py +++ b/scripts/mock_tools/testEZcov.py @@ -156,28 +156,28 @@ def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4]): for rec in recl: for zr in zrl: for ells in ellsl: - fig = compchi2stats(tp,zr[0],zr[1],20,200,rec=rec,ells=ells) - figs.append(fig) + fig = compchi2stats(tp,zr[0],zr[1],20,200,rec=rec,ells=ells) + figs.append(fig) tp = 'ELG_LOP' zrl = [(0.8,1.1),(1.1,1.6)] for rec in recl: for zr in zrl: for ells in ellsl: - fig = compchi2stats(tp,zr[0],zr[1],20,200,rec=rec,ells=ells) - figs.append(fig) + fig = compchi2stats(tp,zr[0],zr[1],20,200,rec=rec,ells=ells) + figs.append(fig) tp = 'LRG' zrl = [(0.4,0.6),(0.6,0.8),(0.8,1.1)] for rec in recl: for zr in zrl: for ells in ellsl: - fig = compchi2stats(tp,zr[0],zr[1],20,200,rec=rec,ells=ells) - figs.append(fig) + fig = compchi2stats(tp,zr[0],zr[1],20,200,rec=rec,ells=ells) + figs.append(fig) outdir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/'' with PdfPages(outdir+'testEZmockcov_smin20smax200.pdf') as pdf: - for fig in figs: - pdf.savefig(fig) - plt.close() + for fig in figs: + pdf.savefig(fig) + plt.close() From afcadd42da471fdb9729e3b1dea2cb0c64ec9b30 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 16 Jan 2024 16:03:45 -0500 Subject: [PATCH 028/297] Update testEZcov.py --- scripts/mock_tools/testEZcov.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mock_tools/testEZcov.py b/scripts/mock_tools/testEZcov.py index 99a493d41..c05d7376b 100644 --- a/scripts/mock_tools/testEZcov.py +++ b/scripts/mock_tools/testEZcov.py @@ -175,7 +175,7 @@ def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4]): fig = compchi2stats(tp,zr[0],zr[1],20,200,rec=rec,ells=ells) figs.append(fig) -outdir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/'' +outdir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/' with PdfPages(outdir+'testEZmockcov_smin20smax200.pdf') as pdf: for fig in figs: pdf.savefig(fig) From a240c8ea4ba82735524e7c3c3d487256fd8196d2 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 16 Jan 2024 16:15:50 -0500 Subject: [PATCH 029/297] Update testEZcov.py --- scripts/mock_tools/testEZcov.py | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/scripts/mock_tools/testEZcov.py b/scripts/mock_tools/testEZcov.py index c05d7376b..9e8a4fa21 100644 --- a/scripts/mock_tools/testEZcov.py +++ b/scripts/mock_tools/testEZcov.py @@ -1,10 +1,14 @@ import numpy as np import matplotlib as mpl from matplotlib import pyplot as plt +from matplotlib.backends.backend_pdf import PdfPages import sys import fitsio import os + + + def cat_ells(xis,indrange=None): if indrange is None: indrange = [0,len(xis[0])] @@ -147,7 +151,8 @@ def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4]): figs = [] - +smin=20 +smax=200 recl = ['recon_recsym',''] ellsl = [[0,2,4],[0,2],[0]] @@ -156,27 +161,38 @@ def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4]): for rec in recl: for zr in zrl: for ells in ellsl: - fig = compchi2stats(tp,zr[0],zr[1],20,200,rec=rec,ells=ells) + fig = compchi2stats(tp,zr[0],zr[1],smin,smax,rec=rec,ells=ells) figs.append(fig) +outdir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/' +with PdfPages(outdir+'testEZmockcov_'+tp+_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: + for fig in figs: + pdf.savefig(fig) + plt.close() + +figs = [] tp = 'ELG_LOP' zrl = [(0.8,1.1),(1.1,1.6)] for rec in recl: for zr in zrl: for ells in ellsl: - fig = compchi2stats(tp,zr[0],zr[1],20,200,rec=rec,ells=ells) + fig = compchi2stats(tp,zr[0],zr[1],smin,smax,rec=rec,ells=ells) figs.append(fig) - +with PdfPages(outdir+'testEZmockcov_'+tp+_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: + for fig in figs: + pdf.savefig(fig) + plt.close() + +figs = [] tp = 'LRG' zrl = [(0.4,0.6),(0.6,0.8),(0.8,1.1)] for rec in recl: for zr in zrl: for ells in ellsl: - fig = compchi2stats(tp,zr[0],zr[1],20,200,rec=rec,ells=ells) + fig = compchi2stats(tp,zr[0],zr[1],smin,smax,rec=rec,ells=ells) figs.append(fig) -outdir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/' -with PdfPages(outdir+'testEZmockcov_smin20smax200.pdf') as pdf: +with PdfPages(outdir+'testEZmockcov_'+tp+_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: for fig in figs: pdf.savefig(fig) plt.close() From 1b722becfd4458b374651bc54dc11ccf9dc44e2e Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 16 Jan 2024 16:16:35 -0500 Subject: [PATCH 030/297] Update testEZcov.py --- scripts/mock_tools/testEZcov.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/mock_tools/testEZcov.py b/scripts/mock_tools/testEZcov.py index 9e8a4fa21..eff652c83 100644 --- a/scripts/mock_tools/testEZcov.py +++ b/scripts/mock_tools/testEZcov.py @@ -165,7 +165,7 @@ def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4]): figs.append(fig) outdir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/' -with PdfPages(outdir+'testEZmockcov_'+tp+_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: +with PdfPages(outdir+'testEZmockcov_'+tp+'_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: for fig in figs: pdf.savefig(fig) plt.close() @@ -178,7 +178,7 @@ def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4]): for ells in ellsl: fig = compchi2stats(tp,zr[0],zr[1],smin,smax,rec=rec,ells=ells) figs.append(fig) -with PdfPages(outdir+'testEZmockcov_'+tp+_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: +with PdfPages(outdir+'testEZmockcov_'+tp+'_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: for fig in figs: pdf.savefig(fig) plt.close() @@ -192,7 +192,7 @@ def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4]): fig = compchi2stats(tp,zr[0],zr[1],smin,smax,rec=rec,ells=ells) figs.append(fig) -with PdfPages(outdir+'testEZmockcov_'+tp+_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: +with PdfPages(outdir+'testEZmockcov_'+tp+'_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: for fig in figs: pdf.savefig(fig) plt.close() From 48106f41f62cb1a7b6f0b86efc565f1d96bedaa7 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 16 Jan 2024 16:22:24 -0500 Subject: [PATCH 031/297] Update testEZcov.py --- scripts/mock_tools/testEZcov.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/mock_tools/testEZcov.py b/scripts/mock_tools/testEZcov.py index eff652c83..43ab0183b 100644 --- a/scripts/mock_tools/testEZcov.py +++ b/scripts/mock_tools/testEZcov.py @@ -151,8 +151,8 @@ def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4]): figs = [] -smin=20 -smax=200 +smin=50 +smax=150 recl = ['recon_recsym',''] ellsl = [[0,2,4],[0,2],[0]] From 87c5e4aee182f38e04fc1aebb4557da4d881a638 Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Thu, 23 Nov 2023 01:18:51 -0800 Subject: [PATCH 032/297] Running altmtl mocks on v3 --- scripts/getpotaY1_mock.py | 1 + scripts/mock_tools/getpota_Y1_script.sh | 25 ++ scripts/mock_tools/prepare_mocks_Y1.py | 347 ++++++++++++++------ scripts/mock_tools/prepare_script.sh | 12 + scripts/mock_tools/run_Y1SecondGen_batch.sh | 7 + scripts/mock_tools/run_mtl_ledger.py | 37 +++ scripts/mock_tools/script_lrgmask_Y1.sh | 25 ++ scripts/readwrite_pixel_bitmask.py | 6 +- 8 files changed, 365 insertions(+), 95 deletions(-) create mode 100755 scripts/mock_tools/getpota_Y1_script.sh create mode 100755 scripts/mock_tools/prepare_script.sh create mode 100755 scripts/mock_tools/run_Y1SecondGen_batch.sh create mode 100644 scripts/mock_tools/run_mtl_ledger.py create mode 100755 scripts/mock_tools/script_lrgmask_Y1.sh diff --git a/scripts/getpotaY1_mock.py b/scripts/getpotaY1_mock.py index ef3a12137..97901135a 100644 --- a/scripts/getpotaY1_mock.py +++ b/scripts/getpotaY1_mock.py @@ -50,6 +50,7 @@ parser.add_argument("--tile-temp-dir", help="Directory for temp tile files, default %(default)s", default=os.path.join(os.environ['SCRATCH'], 'rantiles')) parser.add_argument("--counttiles", default = 'n') +parser.add_argument("--secgen_ver", default = None) parser.add_argument("--nprocs", help="Number of multiprocessing processes to use, default %(default)i", default=multiprocessing.cpu_count()//2, type=int) diff --git a/scripts/mock_tools/getpota_Y1_script.sh b/scripts/mock_tools/getpota_Y1_script.sh new file mode 100755 index 000000000..2bb9c60f1 --- /dev/null +++ b/scripts/mock_tools/getpota_Y1_script.sh @@ -0,0 +1,25 @@ +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 0 --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 1 --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 2 --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 3 --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 4 --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 5 --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 6 --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 7 --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 8 --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 9 --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 10 --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 11 --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 12 --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 13 --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 14 --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 15 --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 16 --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 17 --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 18 --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 19 --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 20 --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 21 --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 22 --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 23 --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 24 --secgen_ver AbacusSummit_v3 diff --git a/scripts/mock_tools/prepare_mocks_Y1.py b/scripts/mock_tools/prepare_mocks_Y1.py index d8daf4939..9fc71fd38 100644 --- a/scripts/mock_tools/prepare_mocks_Y1.py +++ b/scripts/mock_tools/prepare_mocks_Y1.py @@ -2,12 +2,10 @@ from astropy.table import Table, hstack, vstack, Column # A class to represent tables of heterogeneous data. import fitsio import numpy as np -import glob import os -import h5py import argparse import sys - +import json from desitarget.targetmask import obsconditions from desimodel.footprint import is_point_in_desi @@ -17,6 +15,33 @@ from LSS.globals import main +def create_dir(value): + if not os.path.exists(value): + try: + os.makedirs(value, 0o755) + print('Check directories', value) + except OSError as e: + if e.errno != errno.EEXIST: + raise + +def mask_firstgen(main=0, nz=0, Y5=0, sv3=0): + return main * (2**3) + sv3 * (2**2) + Y5 * (2**1) + nz * (2**0) + +def mask_secondgen(nz=0, foot=None, nz_lop=0): + if foot == 'Y1': + Y5 = 0 + Y1 = 1 + elif foot == 'Y5': + Y5 = 1 + Y1 = 0 + else: + Y5 = 0 + Y1 = 0 + return nz * (2**0) + Y5 * (2**1) + nz_lop * (2**2) + Y1 * (2**3) + + + + if os.environ['NERSC_HOST'] == 'cori': scratch = 'CSCRATCH' elif os.environ['NERSC_HOST'] == 'perlmutter': @@ -27,102 +52,233 @@ parser = argparse.ArgumentParser() parser.add_argument("--mockver", help="type of mock to use",default=None) -parser.add_argument("--mockpath", help="Location of mock file(s)",default='/global/cfs/cdirs/desi/cosmosim/FirstGenMocks/AbacusSummit/CutSky/') +parser.add_argument("--mockpath", help="Location of mock file(s)",default='/global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky') parser.add_argument("--mockfile", help="formattable name of mock file(s). e.g. cutsky_{TYPE}_{Z}_AbacusSummit_base_c000_ph{PH}.fits. TYPE will be replaced with tracer type. PH will be replaced with realization number for simulation of mock.",default='cutsky_{TYPE}_{Z}_AbacusSummit_base_c000_ph{PH}.fits') -#parser.add_argument("--realization", help="number for the realization",default=1,type=int) -parser.add_argument("--realmin", help="number for the realization",default=1,type=int) -parser.add_argument("--realmax", help="number for the realization",default=2,type=int) +parser.add_argument("--realmin", help="number for the realization",default=0,type=int) +parser.add_argument("--realmax", help="number for the realization",default=1,type=int) parser.add_argument("--prog", help="dark or bright",default='dark') parser.add_argument("--base_output", help="base directory for output",default='/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/') -parser.add_argument("--prep", help="prepare file for fiberassign?",default='y') parser.add_argument("--apply_mask", help="apply the same mask as applied to desi targets?",default='y') -parser.add_argument("--par", help="running in parallel?",default='n') +parser.add_argument("--downsampling", help="downsample to Y1 target density in SecondGen Abacus mocks?",default='n') +parser.add_argument("--isProduction", help="Say yes if you want to save in main production directory",default='n') +parser.add_argument("--overwrite", help="Overwrite. if it is in production, this always will be no. You must delete by hand first", default=0, type=bool) +parser.add_argument("--split_snapshot", help="apply different snapshots to different redshift ranges?",default='n') +parser.add_argument("--new_version", help="If production, and this is a new version, set to name, for example, AbacusSummit_v3",default=None) args = parser.parse_args() - -tiletab = Table.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/tiles-'+args.prog.upper()+'.fits') +tiletab = Table.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/tiles-{PROG}.fits'.format(PROG = args.prog.upper())) if args.prog == 'dark': types = ['ELG', 'LRG', 'QSO'] - desitar = {'ELG':34,'LRG':1,'QSO':4} - priority = {'ELG':3000,'LRG':3200,'QSO':3400} - mainp = main(tp='QSO',specver='iron') + priority = {'ELG':3000, 'LRG':3200, 'QSO':3400} + mainp = main(tp = 'QSO', specver = 'iron') + desitar = {'ELG':34, 'LRG':1, 'QSO':4} + numobs = {'ELG':2, 'LRG':2, 'QSO':4} + + if args.split_snapshot == 'y': + zs = {'ELG':{'z0.950':[0.,1.1], 'z1.325':[1.1,99.]}, 'LRG':{'z0.500':[0.,0.6], 'z0.800':[0.6,99.]}, 'QSO':{'z1.400':[0.,99.]}} + else: + zs = {'ELG':'z1.100', 'LRG':'z0.800', 'QSO':'z1.400'} + + + if args.mockver == 'ab_secondgen': + desitar = {'ELG':2**1, 'LRG':2**0, 'QSO':2**2} + downsampling = {'ELG':0.7345658717688022, 'LRG':0.708798313382828, 'QSO':0.39728966594530174} + percentage_elg_hip = 0.1 + -for real in range(args.realmin,args.realmax): +if args.isProduction == 'y': + args.base_output = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks' + args.overwrite = False + if args.new_version is not None: + Abacus_dir = args.new_version + else: + 'AbacusSummit' +else: + if args.base_output == '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks' or args.base_output == '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/': + args.base_output = scratch + print('This is not production, run on user scratch', scratch) + else: + print('Saving to path', args.base_output) + + + +for real in range(args.realmin, args.realmax): if not (args.mockver is None): if args.mockver == 'ab_firstgen': mockpath = '/global/cfs/cdirs/desi/cosmosim/FirstGenMocks/AbacusSummit/CutSky/' - file_name = 'cutsky_{TYPE}_{Z}_AbacusSummit_base_c000_ph{PH}.fits' - out_file_name = args.base_output+'/FirstGenMocks/AbacusSummit/forFA'+str(real)+'.fits' - if not os.path.exists(args.base_output+'/FirstGenMocks'): - os.mkdir(args.base_output+'/FirstGenMocks') - print('made '+args.base_output+'/FirstGenMocks') - if not os.path.exists(args.base_output+'/FirstGenMocks/AbacusSummit'): - os.mkdir(args.base_output+'/FirstGenMocks/AbacusSummit') - print('made '+args.base_output+'/FirstGenMocks/AbacusSummit') - mockdir = args.base_output+'/FirstGenMocks/AbacusSummit/' + mockdir = os.path.join(args.base_output, 'FirstGenMocks', 'AbacusSummit') + + out_file_name = os.path.join(mockdir, 'forFA{real}.fits'.format(real=real)) + - if args.mockver == 'ezmocks6': - out_file_name = args.base_output + '/EZMocks_6Gpc/EZMocks_6Gpc_' + str(real) + '.fits' - if not os.path.exists(args.base_output + '/EZMocks_6Gpc'): - os.makedirs(args.base_output + '/EZMocks_6Gpc') - print('made ' + args.base_output + '/EZMocks_6Gpc') - mockdir = args.base_output + '/EZMocks_6Gpc/' + elif args.mockver == 'ezmocks6': + mockdir = os.path.join(args.base_output, 'EZMocks_6Gpc') + mockpath = '/global/cfs/cdirs/desi/cosmosim/FirstGenMocks/EZmock/CutSky_6Gpc' + out_file_name = os.path.join(mockdir, 'EZMocks_6Gpc_{real}.fits'.format(real=real)) + + elif args.mockver == 'ab_secondgen': + mockpath = args.mockpath + file_name = 'cutsky_{TYPE}_{Z}_AbacusSummit_base_c000_ph{PH}.fits' + + mockdir = os.path.join(args.base_output, 'SecondGenMocks', Abacus_dir) + #if args.split_snapshot == 'y': + create_dir(mockdir) + if not os.path.isfile(os.path.join(mockdir, 'prepare_mock_arguments.txt')): + with open(os.path.join(mockdir, 'prepare_mock_arguments.txt'), 'w') as f: + json.dump(args.__dict__, f, indent=2) + + out_file_name = os.path.join(mockdir, 'forFA{real}.fits'.format(real=real)) + #else: + # out_file_name = os.path.join(mockdir, 'forFA{real}.fits'.format(real=real)) else: raise ValueError(args.mockver+' not supported with legacy mockver argument. Use mockpath/mockfilename arguments instead.') else: mockpath = args.mockpath file_name = args.mockfile - out_file_name = args.base_output + '/forFA_Real{0}.fits'.format(real) + mockdir = args.base_output + out_file_name = os.path.join(mockdir, 'forFA{0}.fits'.format(real)) + print('generic mock, it needs a mock generation to continue, it will select mockver = ab_secondgen') + args.mockver = 'ab_secondgen' + - print('will write to '+out_file_name) - if not os.path.exists(args.base_output): - os.makedirs(args.base_output) - print('made '+args.base_output) + print('testing and creating output directory', mockdir) + create_dir(mockdir) + print('will write outputs to ', out_file_name) + mockdir = args.base_output - zs = {'ELG':'z1.100','LRG':'z0.800','QSO':'z1.400'} - def mask(main=0, nz=0, Y5=0, sv3=0): - return main * (2**3) + sv3 * (2**2) + Y5 * (2**1) + nz * (2**0) - if args.prep == 'y': - datat = [] - for type_ in types: - if args.mockver == 'ab_firstgen': + datat = [] + for type_ in types: + if args.mockver == 'ab_firstgen' or args.mockver == 'ab_secondgen': + if args.split_snapshot == 'y': + datas = [] + + for bins in zs[type_]: + print(bins) + thepath = os.path.join(mockpath, type_, bins, file_name.format(TYPE = type_, Z = bins, PH = "%03d" % real)) + print('thepath') + print(thepath) + dat = fitsio.read(thepath, columns=['RA','DEC','Z','Z_COSMO','STATUS'])#f[1].data + mask = (dat['Z']>= zs[type_][bins][0])&(dat['Z']< zs[type_][bins][1]) + datas.append(Table(dat[mask])) + data = vstack(datas) + del datas + del dat + else: thepath = os.path.join(mockpath, type_, zs[type_], file_name.format(TYPE = type_, Z = zs[type_], PH = "%03d" % real)) - print('thepath') - print(thepath) - data = fitsio.read(thepath,columns=['RA','DEC','Z','Z_COSMO','STATUS'])#f[1].data - elif args.mockver == 'ezmocks6': - if type_ == "LRG": - infn1 = "/global/cfs/cdirs/desi/cosmosim/FirstGenMocks/EZmock/CutSky_6Gpc/LRG/z0.800/cutsky_LRG_z0.800_EZmock_B6000G1536Z0.8N216424548_b0.385d4r169c0.3_seed%s_NGC.fits"%real - infn2 = "/global/cfs/cdirs/desi/cosmosim/FirstGenMocks/EZmock/CutSky_6Gpc/LRG/z0.800/cutsky_LRG_z0.800_EZmock_B6000G1536Z0.8N216424548_b0.385d4r169c0.3_seed%s_SGC.fits"%real - elif type_ == "ELG": - infn1 = "/global/cfs/cdirs/desi/cosmosim/FirstGenMocks/EZmock/CutSky_6Gpc/ELG/z1.100/cutsky_ELG_z1.100_EZmock_B6000G1536Z1.1N648012690_b0.345d1.45r40c0.05_seed%s_NGC.fits"%real - infn2 = "/global/cfs/cdirs/desi/cosmosim/FirstGenMocks/EZmock/CutSky_6Gpc/ELG/z1.100/cutsky_ELG_z1.100_EZmock_B6000G1536Z1.1N648012690_b0.345d1.45r40c0.05_seed%s_SGC.fits"%real - elif type_ == "QSO": - infn1 = "/global/cfs/cdirs/desi/cosmosim/FirstGenMocks/EZmock/CutSky_6Gpc/QSO/z1.400/cutsky_QSO_z1.400_EZmock_B6000G1536Z1.4N27395172_b0.053d1.13r0c0.6_seed%s_NGC.fits"%real - infn2 = "/global/cfs/cdirs/desi/cosmosim/FirstGenMocks/EZmock/CutSky_6Gpc/QSO/z1.400/cutsky_QSO_z1.400_EZmock_B6000G1536Z1.4N27395172_b0.053d1.13r0c0.6_seed%s_SGC.fits"%real - # infn1 = "/global/cfs/cdirs/desi/cosmosim/FirstGenMocks/EZmock/CutSky_6Gpc/LRG/z0.800/cutsky_LRG_z0.800_EZmock_B6000G1536Z0.8N216424548_b0.385d4r169c0.3_seed1_NGC.fits" - # infn2 = "/global/cfs/cdirs/desi/cosmosim/FirstGenMocks/EZmock/CutSky_6Gpc/LRG/z0.800/cutsky_LRG_z0.800_EZmock_B6000G1536Z0.8N216424548_b0.385d4r169c0.3_seed1_SGC.fits" - tars1 = Table.read(infn1)#fitsio.read(infn1) - tars2 = Table.read(infn2)#fitsio.read(infn2) - tars1["GALCAP"] = "N" - tars2["GALCAP"] = "S" - tars = vstack([tars1, tars2]) - data = tars - #tars['TARGETID'] = np.arange(len(tars)) - #f = fits.open(thepath) - print(data.dtype.names) - print(type_,len(data)) - status = data['STATUS'][()] - idx = np.arange(len(status)) - mask_main = mask(main=0, nz=1, Y5=0, sv3=0) #no longer cutting to Y5 footprint because it doesn't actually cover Y1 + data = fitsio.read(thepath, columns=['RA','DEC','Z','Z_COSMO','STATUS'])#f[1].data + + elif args.mockver == 'ezmocks6': + path_ezmock = os.path.join(mockpath, type_, zs[type_]) + if type_ == "LRG": + infn1 = os.path.join(path_ezmock, "cutsky_LRG_z0.800_EZmock_B6000G1536Z0.8N216424548_b0.385d4r169c0.3_seed{real}_NGC.fits".format(real = real)) + infn2 = os.path.join(path_ezmock, "cutsky_LRG_z0.800_EZmock_B6000G1536Z0.8N216424548_b0.385d4r169c0.3_seed{real}_SGC.fits".format(real = real)) + elif type_ == "ELG": + infn1 = os.path.join(path_ezmock, "cutsky_ELG_z1.100_EZmock_B6000G1536Z1.1N648012690_b0.345d1.45r40c0.05_seed{real}_NGC.fits".format(real = real)) + infn2 = os.path.join(path_ezmock, "cutsky_ELG_z1.100_EZmock_B6000G1536Z1.1N648012690_b0.345d1.45r40c0.05_seed{real}_SGC.fits".format(real = real)) + elif type_ == "QSO": + infn1 = os.path.join(path_ezmock, "cutsky_QSO_z1.400_EZmock_B6000G1536Z1.4N27395172_b0.053d1.13r0c0.6_seed{real}_NGC.fits".format(real = real)) + infn2 = os.path.join(path_ezmock, "cutsky_QSO_z1.400_EZmock_B6000G1536Z1.4N27395172_b0.053d1.13r0c0.6_seed{real}_SGC.fits".format(real = real)) + tars1 = Table.read(infn1) + tars2 = Table.read(infn2) + tars1["GALCAP"] = "N" + tars2["GALCAP"] = "S" + data = vstack([tars1, tars2]) + + + print(data.dtype.names) + print(type_, len(data)) + status = data['STATUS'][()] + idx = np.arange(len(status)) + + if args.mockver == 'ab_secondgen': + + mask_main = mask_secondgen(nz=1, foot='Y1') + idx_main = idx[(status & (mask_main))==mask_main] + + if type_ == 'LRG' or type_ == 'QSO': + if args.downsampling == 'y': + ran_tot = np.random.uniform(size = len(idx_main)) + idx_main = idx_main[(ran_tot<=downsampling[type_])] + data = data[idx_main] + data = Table(data) + + data['DESI_TARGET'] = desitar[type_] + data['PRIORITY_INIT'] = priority[type_] + data['PRIORITY'] = priority[type_] + data['NUMOBS_MORE'] = numobs[type_] + data['NUMOBS_INIT'] = numobs[type_] + datat.append(data) + + else: + + mask_LOP = mask_secondgen(nz=1, foot='Y1', nz_lop=1) + idx_LOP = idx[(status & (mask_LOP))==mask_LOP] + + + idx_VLO = np.setdiff1d(idx_main, idx_LOP) + + if args.downsampling == 'y': + ran_lop = np.random.uniform(size = len(idx_LOP)) + idx_LOP = idx_LOP[(ran_lop<=downsampling[type_])] + ran_vlo = np.random.uniform(size = len(idx_VLO)) + idx_VLO = idx_VLO[(ran_vlo<=downsampling[type_])] + + data_lop = Table(data[idx_LOP]) + data_vlo = Table(data[idx_VLO]) + + df_lop=data_lop.to_pandas() + df_vlo=data_vlo.to_pandas() + num_HIP_LOP = int(len(df_lop) * percentage_elg_hip) + df_HIP_LOP = df_lop.sample(n=num_HIP_LOP) + remaining_LOP = df_lop.drop(df_HIP_LOP.index) + df_HIP_LOP.reset_index(drop=True, inplace=True) + remaining_LOP.reset_index(drop=True, inplace=True) + + num_HIP_VLO = int(len(df_vlo) * percentage_elg_hip) + df_HIP_VLO = df_vlo.sample(n=num_HIP_VLO) + remaining_VLO = df_vlo.drop(df_HIP_VLO.index) + df_HIP_VLO.reset_index(drop=True, inplace=True) + remaining_VLO.reset_index(drop=True, inplace=True) + + remaining_LOP['PRIORITY_INIT'] = 3100 + remaining_LOP['PRIORITY'] = 3100 + remaining_LOP['DESI_TARGET'] = 2**5 + 2**1 + remaining_VLO['PRIORITY_INIT'] = 3000 + remaining_VLO['PRIORITY'] = 3000 + remaining_VLO['DESI_TARGET'] = 2**7 + 2**1 + + df_HIP_LOP['PRIORITY_INIT'] = 3200 + df_HIP_LOP['PRIORITY'] = 3200 + df_HIP_LOP['DESI_TARGET'] = 2**6 + 2**1 + 2**5 + + df_HIP_VLO['PRIORITY_INIT'] = 3200 + df_HIP_VLO['PRIORITY'] = 3200 + df_HIP_VLO['DESI_TARGET'] = 2**6 + 2**1 + 2**5 + + remaining_LOP['NUMOBS_MORE'] = numobs[type_] + remaining_LOP['NUMOBS_INIT'] = numobs[type_] + remaining_VLO['NUMOBS_MORE'] = numobs[type_] + remaining_VLO['NUMOBS_INIT'] = numobs[type_] + df_HIP_LOP['NUMOBS_MORE'] = numobs[type_] + df_HIP_LOP['NUMOBS_INIT'] = numobs[type_] + df_HIP_VLO['NUMOBS_MORE'] = numobs[type_] + df_HIP_VLO['NUMOBS_INIT'] = numobs[type_] + + datat.append(Table.from_pandas(remaining_LOP)) + datat.append(Table.from_pandas(remaining_VLO)) + datat.append(Table.from_pandas(df_HIP_LOP)) + datat.append(Table.from_pandas(df_HIP_VLO)) + + else: + mask_main = mask_firstgen(main=0, nz=1, Y5=0, sv3=0) #no longer cutting to Y5 footprint because it doesn't actually cover Y1 if type_ == 'LRG': - mask_main = mask(main=1, nz=1, Y5=0, sv3=0) + mask_main = mask_firstgen(main=1, nz=1, Y5=0, sv3=0) idx_main = idx[(status & (mask_main))==mask_main] data = data[idx_main] print(len(data)) @@ -130,13 +286,18 @@ def mask(main=0, nz=0, Y5=0, sv3=0): data['DESI_TARGET'] = desitar[type_] data['PRIORITY_INIT'] = priority[type_] data['PRIORITY'] = priority[type_] + data['NUMOBS_MORE'] = numobs[type_] + data['NUMOBS_INIT'] = numobs[type_] + datat.append(data) - targets = vstack(datat) + + targets = vstack(datat) + del datat + if args.mockver != 'ab_secondgen': print(len(targets),' in Y5 area') - del datat selY1 = is_point_in_desi(tiletab,targets['RA'],targets['DEC']) targets = targets[selY1] - print(len(targets),' in Y1 area') + print(len(targets),' in Y1 area') if args.apply_mask == 'y': print('getting nobs and mask bits') @@ -146,28 +307,26 @@ def mask(main=0, nz=0, Y5=0, sv3=0): for col in maskcols: targets[col] = maskv[col] del maskv - targets = common.cutphotmask(targets,bits=mainp.imbits) + targets = common.cutphotmask(targets, bits=mainp.imbits) - if args.prep == 'y': - n=len(targets) - targets.rename_column('Z_COSMO', 'TRUEZ') - targets.rename_column('Z', 'RSDZ') - targets['BGS_TARGET'] = np.zeros(n, dtype='i8') - targets['MWS_TARGET'] = np.zeros(n, dtype='i8') - targets['SUBPRIORITY'] = np.random.uniform(0, 1, n) - targets['BRICKNAME'] = np.full(n, '000p0000') #- required !?! - targets['OBSCONDITIONS'] = obsconditions.mask(args.prog.upper()) #np.zeros(n, dtype='i8')+int(3) - targets['NUMOBS_MORE'] = np.zeros(n, dtype='i8')+int(1) - targets['NUMOBS_INIT'] = np.zeros(n, dtype='i8')+int(1) - targets['SCND_TARGET'] = np.zeros(n, dtype='i8')+int(0) - targets['ZWARN'] = np.zeros(n, dtype='i8')+int(0) - targets['TARGETID'] = np.arange(1,n+1) - - targets.write(out_file_name, overwrite = True) - - fits.setval(out_file_name, 'EXTNAME', value='TARGETS', ext=1) - fits.setval(out_file_name, 'OBSCON', value=args.prog.upper(), ext=1) + + n=len(targets) + targets.rename_column('Z_COSMO', 'TRUEZ') + targets.rename_column('Z', 'RSDZ') + targets['BGS_TARGET'] = np.zeros(n, dtype='i8') + targets['MWS_TARGET'] = np.zeros(n, dtype='i8') + targets['SUBPRIORITY'] = np.random.uniform(0, 1, n) + targets['BRICKNAME'] = np.full(n, '000p0000') #- required !?! + targets['OBSCONDITIONS'] = obsconditions.mask(args.prog.upper()) #np.zeros(n, dtype='i8')+int(3) + targets['SCND_TARGET'] = np.zeros(n, dtype='i8')+int(0) + targets['ZWARN'] = np.zeros(n, dtype='i8')+int(0) + targets['TARGETID'] = np.arange(1,n+1) + + targets.write(out_file_name, overwrite = args.overwrite) + + fits.setval(out_file_name, 'EXTNAME', value='TARGETS', ext=1) + fits.setval(out_file_name, 'OBSCON', value=args.prog.upper(), ext=1) diff --git a/scripts/mock_tools/prepare_script.sh b/scripts/mock_tools/prepare_script.sh new file mode 100755 index 000000000..0ead070bf --- /dev/null +++ b/scripts/mock_tools/prepare_script.sh @@ -0,0 +1,12 @@ +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 0 --realmax 2 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 2 --realmax 4 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 4 --realmax 6 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 6 --realmax 8 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 8 --realmax 10 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 10 --realmax 12 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 12 --realmax 14 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 14 --realmax 16 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 16 --realmax 18 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 18 --realmax 20 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 20 --realmax 22 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 22 --realmax 25 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 diff --git a/scripts/mock_tools/run_Y1SecondGen_batch.sh b/scripts/mock_tools/run_Y1SecondGen_batch.sh new file mode 100755 index 000000000..023d86020 --- /dev/null +++ b/scripts/mock_tools/run_Y1SecondGen_batch.sh @@ -0,0 +1,7 @@ +SeconGenVer=AbacusSummit_v3 #AbacusSummit +for j in {15..24} +do + echo $j + echo $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled + python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run_mtl_ledger.py $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled DARK +done diff --git a/scripts/mock_tools/run_mtl_ledger.py b/scripts/mock_tools/run_mtl_ledger.py new file mode 100644 index 000000000..2b5965780 --- /dev/null +++ b/scripts/mock_tools/run_mtl_ledger.py @@ -0,0 +1,37 @@ +from desitarget import mtl +import sys +import glob +import os + +par=True + +arg1 = sys.argv[1] #Input mock +arg2 = sys.argv[2] #Output path +obscon = sys.argv[3] #DARK or BRIGHT + +print('Running initial ledgers') +if par: + mtl.make_ledger(arg1, arg2, obscon=obscon.upper(), numproc=12) +else: + mtl.make_ledger(arg1, arg2, obscon=obscon.upper()) + + +print('Creating list of tiles to be processed by AltMTL mock production') + +path = os.path.join(arg2, 'main', obscon.lower()) + +ff = glob.glob(os.path.join(path, 'mtl-{obscon}-hp-*.ecsv'.format(obscon=obscon.lower()))) + +dd=[] + +for f in ff: + dd.append(int(f.split('hp-')[-1].split('.ecsv')[0])) +tosave = ','.join(map(str, sorted(dd))) + +savepath = os.path.join(arg2, 'hpxlist_{obscon}.txt'.format(obscon = obscon.lower())) + +ff = open(savepath, 'w') +ff.write(tosave) +ff.close() + +print('saving list of HP ledgers in '+savepath) diff --git a/scripts/mock_tools/script_lrgmask_Y1.sh b/scripts/mock_tools/script_lrgmask_Y1.sh new file mode 100755 index 000000000..fc55a8bf0 --- /dev/null +++ b/scripts/mock_tools/script_lrgmask_Y1.sh @@ -0,0 +1,25 @@ +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 0 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 1 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 2 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 3 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 4 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 5 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 6 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 7 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 8 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 9 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 10 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 11 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 12 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 13 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 14 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 15 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 16 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 17 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 18 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 19 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 20 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 21 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 22 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 23 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 24 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 diff --git a/scripts/readwrite_pixel_bitmask.py b/scripts/readwrite_pixel_bitmask.py index 3f5b3290b..d1bcb482d 100644 --- a/scripts/readwrite_pixel_bitmask.py +++ b/scripts/readwrite_pixel_bitmask.py @@ -30,6 +30,7 @@ parser.add_argument('-rv', '--tarver', default='targetsDR9v1.1.1', required=False) parser.add_argument( '--cat_type', default='targets')#, choices=['targets','ran','obielg','Ab2ndgen'],required=False) parser.add_argument( '--reg', default='north', choices=['north','south'],required=False) +parser.add_argument( '--secgen_ver', default=None, required=False) args = parser.parse_args() @@ -43,7 +44,10 @@ input_path = '/global/cfs/cdirs/desi/survey/catalogs/image_simulations/ELG/dr9/Y1/'+args.reg+'/file0_rs0_skip0/merged/matched_input_full.fits' output_path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/elg_obiwan_'+args.reg+'_matched_input_full_'+args.tracer+'_imask.fits' if args.cat_type == 'Ab2ndgen': - mockdir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit/' + if args.secgen_ver is None: + mockdir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit/' + else: + mockdir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/%s/' % args.secgen_ver input_path = mockdir+'forFA'+args.input+'.fits' output_path = mockdir+'forFA'+args.input+'_matched_input_full_'+args.tracer+'_imask.fits' if args.cat_type == 'Y1EZmock': From 126278646dc5848f06b82244b94b4746a4f8c48e Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Wed, 29 Nov 2023 03:07:12 -0800 Subject: [PATCH 033/297] changes --- scripts/mock_tools/abamtl_combd_cat_sbatch.sh | 12 +++++++ scripts/mock_tools/mkCat_SecondGen_amtl.py | 31 +++++++++++++------ scripts/mock_tools/run1_AMTLmock_combd_LSS.sh | 7 +++++ ...=> run_Y1SecondGen_initialledger_batch.sh} | 0 4 files changed, 40 insertions(+), 10 deletions(-) create mode 100755 scripts/mock_tools/abamtl_combd_cat_sbatch.sh create mode 100755 scripts/mock_tools/run1_AMTLmock_combd_LSS.sh rename scripts/mock_tools/{run_Y1SecondGen_batch.sh => run_Y1SecondGen_initialledger_batch.sh} (100%) diff --git a/scripts/mock_tools/abamtl_combd_cat_sbatch.sh b/scripts/mock_tools/abamtl_combd_cat_sbatch.sh new file mode 100755 index 000000000..4d147da9d --- /dev/null +++ b/scripts/mock_tools/abamtl_combd_cat_sbatch.sh @@ -0,0 +1,12 @@ +#!/bin/bash +#SBATCH --time=03:00:00 +#SBATCH --qos=regular +#SBATCH --nodes=1 +#SBATCH --constraint=cpu +#SBATCH --array=0-24 +#SBATCH --account=desi + +source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main +PYTHONPATH=$PYTHONPATH:/pscratch/sd/a/acarnero/codes/LSS/py + +srun /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh $SLURM_ARRAY_TASK_ID diff --git a/scripts/mock_tools/mkCat_SecondGen_amtl.py b/scripts/mock_tools/mkCat_SecondGen_amtl.py index d58d7da60..63e22812f 100644 --- a/scripts/mock_tools/mkCat_SecondGen_amtl.py +++ b/scripts/mock_tools/mkCat_SecondGen_amtl.py @@ -23,6 +23,7 @@ import LSS.mocktools as mocktools #import LSS.mkCat_singletile.fa4lsscat as fa from LSS.globals import main +import errno if os.environ['NERSC_HOST'] == 'cori': scratch = 'CSCRATCH' @@ -32,6 +33,15 @@ print('NERSC_HOST is not cori or permutter but is '+os.environ['NERSC_HOST']) sys.exit('NERSC_HOST not known (code only works on NERSC), not proceeding') +def test_dir(value): + if not os.path.exists(value): + try: + os.makedirs(value, 0o755) + print('made ' + value) + except OSError as e: + if e.errno != errno.EEXIST: + raise + parser = argparse.ArgumentParser() parser.add_argument("--tracer", help="tracer type to be selected") @@ -154,18 +164,20 @@ lssdir = os.path.join(maindir, 'mock'+str(mocknum)).format(MOCKNUM=mocknum) -if not os.path.exists(lssdir): - os.mkdir(lssdir) - print('made '+lssdir) +test_dir(lssdir) +#if not os.path.exists(lssdir): +# os.mkdir(lssdir) +# print('made '+lssdir) dirout = os.path.join(lssdir, 'LSScats') dirfinal = dirout if args.outmd == 'scratch': dirout = dirout.replace('/global/cfs/cdirs/desi/survey/catalogs/',os.getenv('SCRATCH')+'/') +test_dir(dirout) -if not os.path.exists(dirout): - os.makedirs(dirout) - print('made '+dirout) +#if not os.path.exists(dirout): +# os.makedirs(dirout) +# print('made '+dirout) if args.tracer != 'dark' and args.tracer != 'bright': @@ -181,16 +193,15 @@ asn = None pa = None outdir = os.path.join(maindir, 'fba' + str(mocknum)).format(MOCKNUM=mocknum) +test_dir(outdir) + if args.mockver == 'ab_secondgen' and args.combd == 'y': print('--- START COMBD ---') print('entering altmtl') tarf = os.path.join(args.targDir, 'forFA%d.fits' % mocknum) ##tarf = '/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit/forFA%d.fits' % mocknum #os.path.join(maindir, 'forFA_Real%d.fits' % mocknum) - fbadir = os.path.join(args.simName, 'Univ000', 'fa', 'MAIN').format(MOCKNUM = mocknum) + fbadir = os.path.join(maindir, 'Univ000', 'fa', 'MAIN').format(MOCKNUM = mocknum) #fbadir = os.path.join(args.simName, 'Univ000', 'fa', 'MAIN').format(MOCKNUM = str(mocknum).zfill(3)) - - if not os.path.exists(outdir): - os.mkdir(outdir) print('entering common.combtiles_wdup_altmtl for FASSIGN') asn = common.combtiles_wdup_altmtl('FASSIGN', tiles, fbadir, os.path.join(outdir, 'datcomb_' + pdir + 'assignwdup.fits'), tarf, addcols=['TARGETID','RSDZ','TRUEZ','ZWARN']) diff --git a/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh b/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh new file mode 100755 index 000000000..6b2621e11 --- /dev/null +++ b/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh @@ -0,0 +1,7 @@ +#!/bin/bash +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer dark --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 + +mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/ +mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/ +chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/* +chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/* diff --git a/scripts/mock_tools/run_Y1SecondGen_batch.sh b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh similarity index 100% rename from scripts/mock_tools/run_Y1SecondGen_batch.sh rename to scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh From 3860aef33e1fc482d281565995ff5877b724f6f7 Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Wed, 29 Nov 2023 03:09:32 -0800 Subject: [PATCH 034/297] Adding combtiles for altmlt mocks --- py/LSS/common_tools.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/py/LSS/common_tools.py b/py/LSS/common_tools.py index f552d61e3..017f087c5 100644 --- a/py/LSS/common_tools.py +++ b/py/LSS/common_tools.py @@ -1425,7 +1425,6 @@ def combtiles_wdup_altmtl(pa_hdu, tiles, fbadir, outf, tarf, addcols=['TARGETID' fa = join(fa,ft,keys=['TARGETID']) if len(fa) != lb4join: print(tile,lb4join,len(fa)) - sel = fa['TARGETID'] >= 0 fa = fa[sel] td += 1 @@ -1505,4 +1504,4 @@ def return_altmtl_fba_fadate(tileid): FAOrigName = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz' fhtOrig = fitsio.read_header(FAOrigName) fadate = fhtOrig['RUNDATE'] - return ''.join(fadate.split('T')[0].split('-')) \ No newline at end of file + return ''.join(fadate.split('T')[0].split('-')) From 0aef2a45c2ffdfeeaeb4608ffb9c68d6c57cde71 Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Fri, 1 Dec 2023 05:36:54 -0800 Subject: [PATCH 035/297] changes --- scripts/mock_tools/abamtl_cat_sbatch.sh | 4 ++-- scripts/mock_tools/run1_AMTLmock_LSS.sh | 10 +++++----- scripts/mock_tools/run1_AMTLmock_combd_LSS.sh | 10 +++++----- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/scripts/mock_tools/abamtl_cat_sbatch.sh b/scripts/mock_tools/abamtl_cat_sbatch.sh index e5175b4fb..52d356a40 100755 --- a/scripts/mock_tools/abamtl_cat_sbatch.sh +++ b/scripts/mock_tools/abamtl_cat_sbatch.sh @@ -3,10 +3,10 @@ #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu -#SBATCH --array=0-24 +#SBATCH --array=10-24 #SBATCH --account=desi source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main PYTHONPATH=$PYTHONPATH:$HOME/LSS/py -srun scripts/mock_tools/run1_AMTLmock_LSS.sh $SLURM_ARRAY_TASK_ID \ No newline at end of file +srun /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run1_AMTLmock_LSS.sh $SLURM_ARRAY_TASK_ID diff --git a/scripts/mock_tools/run1_AMTLmock_LSS.sh b/scripts/mock_tools/run1_AMTLmock_LSS.sh index 8c03897c3..b66908724 100755 --- a/scripts/mock_tools/run1_AMTLmock_LSS.sh +++ b/scripts/mock_tools/run1_AMTLmock_LSS.sh @@ -1,7 +1,7 @@ #!/bin/bash -python scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer ELG_LOP --notqso y --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit -python scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer LRG --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit -python scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer QSO --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer ELG_LOP --notqso y --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer LRG --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer QSO --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 -mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit/altmtl$1/mock$1/LSScats/* /global/cfs/cdirs/desi/survey/catalogs//Y1/mocks/SecondGenMocks/AbacusSummit/altmtl$1/mock$1/LSScats/ -chmod 775 /global/cfs/cdirs/desi/survey/catalogs//Y1/mocks/SecondGenMocks/AbacusSummit/altmtl$1/mock$1/LSScats/*clustering* \ No newline at end of file +mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/LSScats/* /global/cfs/cdirs/desi/survey/catalogs//Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/LSScats/ +chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/LSScats/*clustering* diff --git a/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh b/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh index 6b2621e11..ca97e7c5d 100755 --- a/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh +++ b/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh @@ -1,7 +1,7 @@ #!/bin/bash -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer dark --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer dark --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 --combd y -mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/ -mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/ -chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/* -chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/* +#mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/ +#mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/ +#chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/* +#chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/* From 9b729904fdc034d9bb6791c0a815df3e83fbc381 Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Fri, 1 Dec 2023 06:06:12 -0800 Subject: [PATCH 036/297] prepare_mocks_Y1_dark.py --- scripts/mock_tools/prepare_mocks_Y1_dark.py | 335 ++++++++++++++++++++ 1 file changed, 335 insertions(+) create mode 100644 scripts/mock_tools/prepare_mocks_Y1_dark.py diff --git a/scripts/mock_tools/prepare_mocks_Y1_dark.py b/scripts/mock_tools/prepare_mocks_Y1_dark.py new file mode 100644 index 000000000..9fc71fd38 --- /dev/null +++ b/scripts/mock_tools/prepare_mocks_Y1_dark.py @@ -0,0 +1,335 @@ +from astropy.io import fits # Access to FITS (Flexible Image Transport System) files. +from astropy.table import Table, hstack, vstack, Column # A class to represent tables of heterogeneous data. +import fitsio +import numpy as np +import os +import argparse +import sys +import json +from desitarget.targetmask import obsconditions +from desimodel.footprint import is_point_in_desi + +import LSS.common_tools as common +from LSS.imaging import get_pixel_bitmasknobs as bitmask #get_nobsandmask +from LSS.main.cattools import count_tiles_better +from LSS.globals import main + + +def create_dir(value): + if not os.path.exists(value): + try: + os.makedirs(value, 0o755) + print('Check directories', value) + except OSError as e: + if e.errno != errno.EEXIST: + raise + +def mask_firstgen(main=0, nz=0, Y5=0, sv3=0): + return main * (2**3) + sv3 * (2**2) + Y5 * (2**1) + nz * (2**0) + +def mask_secondgen(nz=0, foot=None, nz_lop=0): + if foot == 'Y1': + Y5 = 0 + Y1 = 1 + elif foot == 'Y5': + Y5 = 1 + Y1 = 0 + else: + Y5 = 0 + Y1 = 0 + return nz * (2**0) + Y5 * (2**1) + nz_lop * (2**2) + Y1 * (2**3) + + + + +if os.environ['NERSC_HOST'] == 'cori': + scratch = 'CSCRATCH' +elif os.environ['NERSC_HOST'] == 'perlmutter': + scratch = 'PSCRATCH' +else: + print('NERSC_HOST is not cori or permutter but is '+os.environ['NERSC_HOST']) + sys.exit('NERSC_HOST not known (code only works on NERSC), not proceeding') + +parser = argparse.ArgumentParser() +parser.add_argument("--mockver", help="type of mock to use",default=None) +parser.add_argument("--mockpath", help="Location of mock file(s)",default='/global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky') +parser.add_argument("--mockfile", help="formattable name of mock file(s). e.g. cutsky_{TYPE}_{Z}_AbacusSummit_base_c000_ph{PH}.fits. TYPE will be replaced with tracer type. PH will be replaced with realization number for simulation of mock.",default='cutsky_{TYPE}_{Z}_AbacusSummit_base_c000_ph{PH}.fits') +parser.add_argument("--realmin", help="number for the realization",default=0,type=int) +parser.add_argument("--realmax", help="number for the realization",default=1,type=int) +parser.add_argument("--prog", help="dark or bright",default='dark') +parser.add_argument("--base_output", help="base directory for output",default='/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/') +parser.add_argument("--apply_mask", help="apply the same mask as applied to desi targets?",default='y') +parser.add_argument("--downsampling", help="downsample to Y1 target density in SecondGen Abacus mocks?",default='n') +parser.add_argument("--isProduction", help="Say yes if you want to save in main production directory",default='n') +parser.add_argument("--overwrite", help="Overwrite. if it is in production, this always will be no. You must delete by hand first", default=0, type=bool) +parser.add_argument("--split_snapshot", help="apply different snapshots to different redshift ranges?",default='n') +parser.add_argument("--new_version", help="If production, and this is a new version, set to name, for example, AbacusSummit_v3",default=None) + +args = parser.parse_args() +tiletab = Table.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/tiles-{PROG}.fits'.format(PROG = args.prog.upper())) + +if args.prog == 'dark': + types = ['ELG', 'LRG', 'QSO'] + priority = {'ELG':3000, 'LRG':3200, 'QSO':3400} + mainp = main(tp = 'QSO', specver = 'iron') + desitar = {'ELG':34, 'LRG':1, 'QSO':4} + numobs = {'ELG':2, 'LRG':2, 'QSO':4} + + if args.split_snapshot == 'y': + zs = {'ELG':{'z0.950':[0.,1.1], 'z1.325':[1.1,99.]}, 'LRG':{'z0.500':[0.,0.6], 'z0.800':[0.6,99.]}, 'QSO':{'z1.400':[0.,99.]}} + else: + zs = {'ELG':'z1.100', 'LRG':'z0.800', 'QSO':'z1.400'} + + + if args.mockver == 'ab_secondgen': + desitar = {'ELG':2**1, 'LRG':2**0, 'QSO':2**2} + downsampling = {'ELG':0.7345658717688022, 'LRG':0.708798313382828, 'QSO':0.39728966594530174} + percentage_elg_hip = 0.1 + + +if args.isProduction == 'y': + args.base_output = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks' + args.overwrite = False + if args.new_version is not None: + Abacus_dir = args.new_version + else: + 'AbacusSummit' +else: + if args.base_output == '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks' or args.base_output == '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/': + args.base_output = scratch + print('This is not production, run on user scratch', scratch) + else: + print('Saving to path', args.base_output) + + + +for real in range(args.realmin, args.realmax): + if not (args.mockver is None): + if args.mockver == 'ab_firstgen': + mockpath = '/global/cfs/cdirs/desi/cosmosim/FirstGenMocks/AbacusSummit/CutSky/' + file_name = 'cutsky_{TYPE}_{Z}_AbacusSummit_base_c000_ph{PH}.fits' + mockdir = os.path.join(args.base_output, 'FirstGenMocks', 'AbacusSummit') + + out_file_name = os.path.join(mockdir, 'forFA{real}.fits'.format(real=real)) + + + elif args.mockver == 'ezmocks6': + mockdir = os.path.join(args.base_output, 'EZMocks_6Gpc') + mockpath = '/global/cfs/cdirs/desi/cosmosim/FirstGenMocks/EZmock/CutSky_6Gpc' + out_file_name = os.path.join(mockdir, 'EZMocks_6Gpc_{real}.fits'.format(real=real)) + + elif args.mockver == 'ab_secondgen': + mockpath = args.mockpath + file_name = 'cutsky_{TYPE}_{Z}_AbacusSummit_base_c000_ph{PH}.fits' + + mockdir = os.path.join(args.base_output, 'SecondGenMocks', Abacus_dir) + #if args.split_snapshot == 'y': + create_dir(mockdir) + if not os.path.isfile(os.path.join(mockdir, 'prepare_mock_arguments.txt')): + with open(os.path.join(mockdir, 'prepare_mock_arguments.txt'), 'w') as f: + json.dump(args.__dict__, f, indent=2) + + out_file_name = os.path.join(mockdir, 'forFA{real}.fits'.format(real=real)) + #else: + # out_file_name = os.path.join(mockdir, 'forFA{real}.fits'.format(real=real)) + + else: + raise ValueError(args.mockver+' not supported with legacy mockver argument. Use mockpath/mockfilename arguments instead.') + else: + mockpath = args.mockpath + file_name = args.mockfile + mockdir = args.base_output + out_file_name = os.path.join(mockdir, 'forFA{0}.fits'.format(real)) + print('generic mock, it needs a mock generation to continue, it will select mockver = ab_secondgen') + args.mockver = 'ab_secondgen' + + + print('testing and creating output directory', mockdir) + create_dir(mockdir) + print('will write outputs to ', out_file_name) + + + mockdir = args.base_output + + + datat = [] + for type_ in types: + if args.mockver == 'ab_firstgen' or args.mockver == 'ab_secondgen': + if args.split_snapshot == 'y': + datas = [] + + for bins in zs[type_]: + print(bins) + thepath = os.path.join(mockpath, type_, bins, file_name.format(TYPE = type_, Z = bins, PH = "%03d" % real)) + print('thepath') + print(thepath) + dat = fitsio.read(thepath, columns=['RA','DEC','Z','Z_COSMO','STATUS'])#f[1].data + mask = (dat['Z']>= zs[type_][bins][0])&(dat['Z']< zs[type_][bins][1]) + datas.append(Table(dat[mask])) + data = vstack(datas) + del datas + del dat + else: + thepath = os.path.join(mockpath, type_, zs[type_], file_name.format(TYPE = type_, Z = zs[type_], PH = "%03d" % real)) + data = fitsio.read(thepath, columns=['RA','DEC','Z','Z_COSMO','STATUS'])#f[1].data + + elif args.mockver == 'ezmocks6': + path_ezmock = os.path.join(mockpath, type_, zs[type_]) + if type_ == "LRG": + infn1 = os.path.join(path_ezmock, "cutsky_LRG_z0.800_EZmock_B6000G1536Z0.8N216424548_b0.385d4r169c0.3_seed{real}_NGC.fits".format(real = real)) + infn2 = os.path.join(path_ezmock, "cutsky_LRG_z0.800_EZmock_B6000G1536Z0.8N216424548_b0.385d4r169c0.3_seed{real}_SGC.fits".format(real = real)) + elif type_ == "ELG": + infn1 = os.path.join(path_ezmock, "cutsky_ELG_z1.100_EZmock_B6000G1536Z1.1N648012690_b0.345d1.45r40c0.05_seed{real}_NGC.fits".format(real = real)) + infn2 = os.path.join(path_ezmock, "cutsky_ELG_z1.100_EZmock_B6000G1536Z1.1N648012690_b0.345d1.45r40c0.05_seed{real}_SGC.fits".format(real = real)) + elif type_ == "QSO": + infn1 = os.path.join(path_ezmock, "cutsky_QSO_z1.400_EZmock_B6000G1536Z1.4N27395172_b0.053d1.13r0c0.6_seed{real}_NGC.fits".format(real = real)) + infn2 = os.path.join(path_ezmock, "cutsky_QSO_z1.400_EZmock_B6000G1536Z1.4N27395172_b0.053d1.13r0c0.6_seed{real}_SGC.fits".format(real = real)) + tars1 = Table.read(infn1) + tars2 = Table.read(infn2) + tars1["GALCAP"] = "N" + tars2["GALCAP"] = "S" + data = vstack([tars1, tars2]) + + + print(data.dtype.names) + print(type_, len(data)) + status = data['STATUS'][()] + idx = np.arange(len(status)) + + if args.mockver == 'ab_secondgen': + + mask_main = mask_secondgen(nz=1, foot='Y1') + idx_main = idx[(status & (mask_main))==mask_main] + + if type_ == 'LRG' or type_ == 'QSO': + if args.downsampling == 'y': + ran_tot = np.random.uniform(size = len(idx_main)) + idx_main = idx_main[(ran_tot<=downsampling[type_])] + data = data[idx_main] + data = Table(data) + + data['DESI_TARGET'] = desitar[type_] + data['PRIORITY_INIT'] = priority[type_] + data['PRIORITY'] = priority[type_] + data['NUMOBS_MORE'] = numobs[type_] + data['NUMOBS_INIT'] = numobs[type_] + datat.append(data) + + else: + + mask_LOP = mask_secondgen(nz=1, foot='Y1', nz_lop=1) + idx_LOP = idx[(status & (mask_LOP))==mask_LOP] + + + idx_VLO = np.setdiff1d(idx_main, idx_LOP) + + if args.downsampling == 'y': + ran_lop = np.random.uniform(size = len(idx_LOP)) + idx_LOP = idx_LOP[(ran_lop<=downsampling[type_])] + ran_vlo = np.random.uniform(size = len(idx_VLO)) + idx_VLO = idx_VLO[(ran_vlo<=downsampling[type_])] + + data_lop = Table(data[idx_LOP]) + data_vlo = Table(data[idx_VLO]) + + df_lop=data_lop.to_pandas() + df_vlo=data_vlo.to_pandas() + num_HIP_LOP = int(len(df_lop) * percentage_elg_hip) + df_HIP_LOP = df_lop.sample(n=num_HIP_LOP) + remaining_LOP = df_lop.drop(df_HIP_LOP.index) + df_HIP_LOP.reset_index(drop=True, inplace=True) + remaining_LOP.reset_index(drop=True, inplace=True) + + num_HIP_VLO = int(len(df_vlo) * percentage_elg_hip) + df_HIP_VLO = df_vlo.sample(n=num_HIP_VLO) + remaining_VLO = df_vlo.drop(df_HIP_VLO.index) + df_HIP_VLO.reset_index(drop=True, inplace=True) + remaining_VLO.reset_index(drop=True, inplace=True) + + remaining_LOP['PRIORITY_INIT'] = 3100 + remaining_LOP['PRIORITY'] = 3100 + remaining_LOP['DESI_TARGET'] = 2**5 + 2**1 + remaining_VLO['PRIORITY_INIT'] = 3000 + remaining_VLO['PRIORITY'] = 3000 + remaining_VLO['DESI_TARGET'] = 2**7 + 2**1 + + df_HIP_LOP['PRIORITY_INIT'] = 3200 + df_HIP_LOP['PRIORITY'] = 3200 + df_HIP_LOP['DESI_TARGET'] = 2**6 + 2**1 + 2**5 + + df_HIP_VLO['PRIORITY_INIT'] = 3200 + df_HIP_VLO['PRIORITY'] = 3200 + df_HIP_VLO['DESI_TARGET'] = 2**6 + 2**1 + 2**5 + + remaining_LOP['NUMOBS_MORE'] = numobs[type_] + remaining_LOP['NUMOBS_INIT'] = numobs[type_] + remaining_VLO['NUMOBS_MORE'] = numobs[type_] + remaining_VLO['NUMOBS_INIT'] = numobs[type_] + df_HIP_LOP['NUMOBS_MORE'] = numobs[type_] + df_HIP_LOP['NUMOBS_INIT'] = numobs[type_] + df_HIP_VLO['NUMOBS_MORE'] = numobs[type_] + df_HIP_VLO['NUMOBS_INIT'] = numobs[type_] + + datat.append(Table.from_pandas(remaining_LOP)) + datat.append(Table.from_pandas(remaining_VLO)) + datat.append(Table.from_pandas(df_HIP_LOP)) + datat.append(Table.from_pandas(df_HIP_VLO)) + + else: + mask_main = mask_firstgen(main=0, nz=1, Y5=0, sv3=0) #no longer cutting to Y5 footprint because it doesn't actually cover Y1 + if type_ == 'LRG': + mask_main = mask_firstgen(main=1, nz=1, Y5=0, sv3=0) + idx_main = idx[(status & (mask_main))==mask_main] + data = data[idx_main] + print(len(data)) + data = Table(data) + data['DESI_TARGET'] = desitar[type_] + data['PRIORITY_INIT'] = priority[type_] + data['PRIORITY'] = priority[type_] + data['NUMOBS_MORE'] = numobs[type_] + data['NUMOBS_INIT'] = numobs[type_] + + datat.append(data) + + targets = vstack(datat) + del datat + if args.mockver != 'ab_secondgen': + print(len(targets),' in Y5 area') + selY1 = is_point_in_desi(tiletab,targets['RA'],targets['DEC']) + targets = targets[selY1] + print(len(targets),' in Y1 area') + + if args.apply_mask == 'y': + print('getting nobs and mask bits') + mask = bitmask.get_nobsandmask(targets) + maskv = mask.get_nobsandmask() + maskcols = ['NOBS_G','NOBS_R','NOBS_Z','MASKBITS'] + for col in maskcols: + targets[col] = maskv[col] + del maskv + targets = common.cutphotmask(targets, bits=mainp.imbits) + + + + n=len(targets) + targets.rename_column('Z_COSMO', 'TRUEZ') + targets.rename_column('Z', 'RSDZ') + targets['BGS_TARGET'] = np.zeros(n, dtype='i8') + targets['MWS_TARGET'] = np.zeros(n, dtype='i8') + targets['SUBPRIORITY'] = np.random.uniform(0, 1, n) + targets['BRICKNAME'] = np.full(n, '000p0000') #- required !?! + targets['OBSCONDITIONS'] = obsconditions.mask(args.prog.upper()) #np.zeros(n, dtype='i8')+int(3) + targets['SCND_TARGET'] = np.zeros(n, dtype='i8')+int(0) + targets['ZWARN'] = np.zeros(n, dtype='i8')+int(0) + targets['TARGETID'] = np.arange(1,n+1) + + targets.write(out_file_name, overwrite = args.overwrite) + + fits.setval(out_file_name, 'EXTNAME', value='TARGETS', ext=1) + fits.setval(out_file_name, 'OBSCON', value=args.prog.upper(), ext=1) + + + + +sys.exit() + From 6cd2ac4749535c3f9df82c1abe4bb321eddf6dcb Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Mon, 4 Dec 2023 04:21:28 -0800 Subject: [PATCH 037/297] adding prepare mock Y1 bright --- scripts/mock_tools/prepare_mocks_Y1_bright.py | 401 ++++++++++++++++++ 1 file changed, 401 insertions(+) create mode 100644 scripts/mock_tools/prepare_mocks_Y1_bright.py diff --git a/scripts/mock_tools/prepare_mocks_Y1_bright.py b/scripts/mock_tools/prepare_mocks_Y1_bright.py new file mode 100644 index 000000000..e86ca1b4c --- /dev/null +++ b/scripts/mock_tools/prepare_mocks_Y1_bright.py @@ -0,0 +1,401 @@ +from astropy.io import fits # Access to FITS (Flexible Image Transport System) files. +from astropy.table import Table, hstack, vstack, Column # A class to represent tables of heterogeneous data. +import fitsio +import numpy as np +import os +import argparse +import sys + +from desitarget.targetmask import obsconditions +from desimodel.footprint import is_point_in_desi + +import LSS.common_tools as common +from LSS.imaging import get_pixel_bitmasknobs as bitmask #get_nobsandmask +from LSS.main.cattools import count_tiles_better +from LSS.globals import main +from datetime import datetime +startTime = datetime.now() + + +def create_dir(value): + if not os.path.exists(value): + try: + os.makedirs(value, 0o755) + print('Check directories', value) + except OSError as e: + if e.errno != errno.EEXIST: + raise + +def mask_firstgen(main=0, nz=0, Y5=0, sv3=0): + return main * (2**3) + sv3 * (2**2) + Y5 * (2**1) + nz * (2**0) + +def mask_secondgen(nz=0, foot=None, nz_lop=0): + if foot == 'Y1': + Y5 = 0 + Y1 = 1 + elif foot == 'Y5': + Y5 = 1 + Y1 = 0 + else: + Y5 = 0 + Y1 = 0 + return nz * (2**0) + Y5 * (2**1) + nz_lop * (2**2) + Y1 * (2**3) + + + + +if os.environ['NERSC_HOST'] == 'cori': + scratch = 'CSCRATCH' +elif os.environ['NERSC_HOST'] == 'perlmutter': + scratch = 'PSCRATCH' +else: + print('NERSC_HOST is not cori or permutter but is '+os.environ['NERSC_HOST']) + sys.exit('NERSC_HOST not known (code only works on NERSC), not proceeding') + +parser = argparse.ArgumentParser() +parser.add_argument("--mockver", help="type of mock to use",default=None) +parser.add_argument("--mockpath", help="Location of mock file(s)",default='/global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky') +parser.add_argument("--mockfile", help="formattable name of mock file(s). e.g. cutsky_{TYPE}_{Z}_AbacusSummit_base_c000_ph{PH}.fits. TYPE will be replaced with tracer type. PH will be replaced with realization number for simulation of mock.",default='cutsky_{TYPE}_{Z}_AbacusSummit_base_c000_ph{PH}.fits') +parser.add_argument("--realmin", help="number for the realization",default=0,type=int) +parser.add_argument("--realmax", help="number for the realization",default=1,type=int) +parser.add_argument("--prog", help="dark or bright",default='dark') +parser.add_argument("--base_output", help="base directory for output",default='/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/') +parser.add_argument("--apply_mask", help="apply the same mask as applied to desi targets?",default='y') +parser.add_argument("--downsampling", help="downsample to Y1 target density in SecondGen Abacus mocks?",default='y') +parser.add_argument("--isProduction", help="Say yes if you want to save in main production directory",default='n') +parser.add_argument("--overwrite", help="Overwrite. if it is in production, this always will be no. You must delete by hand first", default=0, type=bool) +parser.add_argument("--rbandcut", help = "bgs bright cut", type=float) +args = parser.parse_args() + +tiletab = Table.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/tiles-{PROG}.fits'.format(PROG = args.prog.upper())) + +if args.prog == 'dark': + types = ['ELG', 'LRG', 'QSO'] + priority = {'ELG':3000, 'LRG':3200, 'QSO':3400} + mainp = main(tp = 'QSO', specver = 'iron') + desitar = {'ELG':34, 'LRG':1, 'QSO':4} + numobs = {'ELG':2, 'LRG':2, 'QSO':4} + zs = {'ELG':'z1.100','LRG':'z0.800','QSO':'z1.400'} + + if args.mockver == 'ab_secondgen': + desitar = {'ELG':2**1, 'LRG':2**0, 'QSO':2**2} + downsampling = {'ELG':0.7345658717688022, 'LRG':0.708798313382828, 'QSO':0.39728966594530174} + percentage_elg_hip = 0.1 + +if args.prog == 'bright': + types = ['BGS'] + priority = {'BGS': 2100} + mainp = main(tp = 'BGS_BRIGHT', specver = 'iron') + desitar = {'BGS': 2**60} + zs = {'BGS': 'z0.200'} + numobs = {'BGS': 2} + + +if args.isProduction == 'y': + args.base_output = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks' + args.overwrite = False +else: + if args.base_output == '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks' or args.base_output == '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/': + args.base_output = scratch + print('This is not production, run on user scratch', scratch) + else: + print('Saving to path', args.base_output) + + + +for real in range(args.realmin, args.realmax): + if not (args.mockver is None): + if args.mockver == 'ab_firstgen': + mockpath = '/global/cfs/cdirs/desi/cosmosim/FirstGenMocks/AbacusSummit/CutSky/' + file_name = 'cutsky_{TYPE}_{Z}_AbacusSummit_base_c000_ph{PH}.fits' + mockdir = os.path.join(args.base_output, 'FirstGenMocks', 'AbacusSummit') + + out_file_name = os.path.join(mockdir, 'forFA{real}.fits'.format(real=real)) + + + elif args.mockver == 'ezmocks6': + mockdir = os.path.join(args.base_output, 'EZMocks_6Gpc') + mockpath = '/global/cfs/cdirs/desi/cosmosim/FirstGenMocks/EZmock/CutSky_6Gpc' + out_file_name = os.path.join(mockdir, 'EZMocks_6Gpc_{real}.fits'.format(real=real)) + + elif args.mockver == 'ab_secondgen' or args.mockver == 'ab_secondgen_cosmosim': + if args.isProduction == 'y': + mockpath = '/global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky/' + else: + mockpath = args.mockpath + file_name = 'cutsky_{TYPE}_{Z}_AbacusSummit_base_c000_ph{PH}.fits' + + if args.prog == 'dark': + mockdir = os.path.join(args.base_output, 'SecondGenMocks', 'AbacusSummit') + elif args.prog == 'bright': + + mockdir = os.path.join(args.base_output, 'SecondGenMocks', 'AbacusSummitBGS') + + out_file_name = os.path.join(mockdir, 'forFA{real}.fits'.format(real=real)) + +# elif args.mockver == 'ab_secondgen_cosmosim': +# mockpath = '/global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky/' +# file_name = 'cutsky_{TYPE}_{Z}_AbacusSummit_base_c000_ph{PH}.fits' +# mockdir = os.path.join(args.base_output, 'SecondGenMocks', 'AbacusSummit') +# out_file_name = os.path.join(mockdir, 'forFA{real}.fits'.format(real=real)) + + + + else: + raise ValueError(args.mockver+' not supported with legacy mockver argument. Use mockpath/mockfilename arguments instead.') + else: + mockpath = args.mockpath + file_name = args.mockfile + mockdir = args.base_output + out_file_name = os.path.join(mockdir, 'forFA{0}.fits'.format(real)) + print('generic mock, it needs a mock generation to continue, it will select mockver = ab_secondgen') + args.mockver = 'ab_secondgen' + + + print('testing and creating output directory', mockdir) + create_dir(mockdir) + print('will write outputs to ', out_file_name) + + + mockdir = args.base_output + + + datat = [] + for type_ in types: + if args.mockver == 'ab_firstgen' or args.mockver == 'ab_secondgen': + thepath = os.path.join(mockpath, type_, zs[type_], file_name.format(TYPE = type_, Z = zs[type_], PH = "%03d" % real)) + print('thepath') + print(thepath) + data = Table(fitsio.read(thepath, columns=['RA', 'DEC', 'Z', 'Z_COSMO', 'STATUS'])) + + if args.mockver == 'ab_secondgen_cosmosim': + thepath = os.path.join(mockpath, type_, 'v0.1', zs[type_], file_name.format(TYPE = type_, Z = zs[type_], PH = "%03d" % real)) + print('thepath') + print(thepath) + data = Table(fitsio.read(thepath, columns=['RA', 'DEC', 'Z', 'Z_COSMO', 'R_MAG_APP', 'R_MAG_ABS', 'IN_Y1'])) + print("Length before rbandcut") + print(len(data)) + + ''' + data = data[data["R_MAG_APP"] Date: Tue, 12 Dec 2023 02:12:00 -0800 Subject: [PATCH 038/297] more changes --- py/LSS/main/cattools.py | 2 +- scripts/mock_tools/abamtl_cat_sbatch.sh | 2 +- scripts/mock_tools/abamtl_combd_cat_sbatch.sh | 2 +- scripts/mock_tools/mkCat_SecondGen_amtl.py | 3 +- scripts/mock_tools/prepare_mocks_Y1_bright.py | 54 ++++++++++++------- scripts/mock_tools/run1_AMTLmock_LSS.sh | 11 ++-- scripts/mock_tools/run1_AMTLmock_combd_LSS.sh | 2 +- 7 files changed, 46 insertions(+), 30 deletions(-) diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index f27876ab3..e44467da6 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -2427,7 +2427,7 @@ def mkfulldat_mock(zf,imbits,ftar,tp,bit,outf,ftiles,maxp=3400,azf='',azfm='cumu ''' FOR MOCKS with fiberassign, PUT IN SOMETHING TO READ FROM MOCK FIBERASSIGN INFO''' if mockz: - assignf = os.path.join(mockassigndir, 'datcomb_darkassignwdup.fits') + assignf = os.path.join(mockassigndir, 'datcomb_{PROG}assignwdup.fits').format(PROG=prog) fs = fitsio.read(assignf.replace('global', 'dvs_ro')) fs = Table(fs) fs['TILELOCID'] = 10000*fs['TILEID'] +fs['LOCATION'] diff --git a/scripts/mock_tools/abamtl_cat_sbatch.sh b/scripts/mock_tools/abamtl_cat_sbatch.sh index 52d356a40..7c5d7d46e 100755 --- a/scripts/mock_tools/abamtl_cat_sbatch.sh +++ b/scripts/mock_tools/abamtl_cat_sbatch.sh @@ -3,7 +3,7 @@ #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu -#SBATCH --array=10-24 +#SBATCH --array=0 #SBATCH --account=desi source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main diff --git a/scripts/mock_tools/abamtl_combd_cat_sbatch.sh b/scripts/mock_tools/abamtl_combd_cat_sbatch.sh index 4d147da9d..5d2e2bbc9 100755 --- a/scripts/mock_tools/abamtl_combd_cat_sbatch.sh +++ b/scripts/mock_tools/abamtl_combd_cat_sbatch.sh @@ -3,7 +3,7 @@ #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu -#SBATCH --array=0-24 +#SBATCH --array=0 #SBATCH --account=desi source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main diff --git a/scripts/mock_tools/mkCat_SecondGen_amtl.py b/scripts/mock_tools/mkCat_SecondGen_amtl.py index 63e22812f..57a5a5fa9 100644 --- a/scripts/mock_tools/mkCat_SecondGen_amtl.py +++ b/scripts/mock_tools/mkCat_SecondGen_amtl.py @@ -183,7 +183,8 @@ def test_dir(value): if args.tracer != 'dark' and args.tracer != 'bright': if args.tracer == 'BGS_BRIGHT': bit = targetmask.bgs_mask[args.tracer] - desitarg='BGS_TARGET' + desitarg='DESI_TARGET' + ##desitarg='BGS_TARGET' else: bit = targetmask.desi_mask[args.tracer] desitarg='DESI_TARGET' diff --git a/scripts/mock_tools/prepare_mocks_Y1_bright.py b/scripts/mock_tools/prepare_mocks_Y1_bright.py index e86ca1b4c..e386a1810 100644 --- a/scripts/mock_tools/prepare_mocks_Y1_bright.py +++ b/scripts/mock_tools/prepare_mocks_Y1_bright.py @@ -85,7 +85,7 @@ def mask_secondgen(nz=0, foot=None, nz_lop=0): if args.prog == 'bright': types = ['BGS'] priority = {'BGS': 2100} - mainp = main(tp = 'BGS_BRIGHT', specver = 'iron') + mainp = main(tp = 'BGS', specver = 'iron') desitar = {'BGS': 2**60} zs = {'BGS': 'z0.200'} numobs = {'BGS': 2} @@ -219,8 +219,8 @@ def mask_secondgen(nz=0, foot=None, nz_lop=0): idx_main = idx[(in_y1 == 1)] + print('SIZE FROM FILE ORIGINAL', len(data)) if type_ == 'LRG' or type_ == 'QSO': - print(len(data)) if args.downsampling == 'y': ran_tot = np.random.uniform(size = len(idx_main)) idx_main = idx_main[(ran_tot<=downsampling[type_])] @@ -246,6 +246,8 @@ def mask_secondgen(nz=0, foot=None, nz_lop=0): data = Table(data[idx_main]) + print('SIZE FROM FILE AFTER Y1 cut', len(data)) + data['DESI_TARGET'] = desitar[type_] data['PRIORITY_INIT'] = priority[type_] data['PRIORITY'] = priority[type_] @@ -254,26 +256,40 @@ def mask_secondgen(nz=0, foot=None, nz_lop=0): mask_bright = data["R_MAG_APP"]=args.rbandcut)&(data["R_MAG_APP"]<=20.175) dat_bright = data[mask_bright] - dat_faint = data[~mask_bright] - dat_faint = dat_faint[(dat_faint["R_MAG_APP"] <= 20.175)] + dat_faint = data[mask_faint] + + print('size of BRIGHT', len(dat_bright)) + print('size of FAINT', len(dat_faint)) + + dat_bright['BGS_TARGET'] = 2**1 + + dat_faint['BGS_TARGET'] = 2**0 + #dat_faint['PRIORITY_INIT'] = 2000 + #dat_faint['PRIORITY'] = 2000 + + + #dat_faint = dat_faint[(dat_faint["R_MAG_APP"] <= 20.175)] datat.append(dat_bright) PromoteFracBGSFaint=0.2 ran_hip = np.random.uniform(size = len(dat_faint)) - dat_faint['DESI_TARGET'][(ran_hip<=PromoteFracBGSFaint)] += 2**3 - dat_faint['PRIORITY_INIT'][(ran_hip<=PromoteFracBGSFaint)] = 2100 - dat_faint['PRIORITY'][(ran_hip<=PromoteFracBGSFaint)] = 2100 + dat_faint_f = dat_faint[(ran_hip>PromoteFracBGSFaint)] + dat_faint_hip = dat_faint[(ran_hip<=PromoteFracBGSFaint)] - datat.append(dat_faint) + dat_faint_hip['BGS_TARGET'] += 2**3 + + dat_faint_f['PRIORITY_INIT'] = 2000 + + dat_faint_f['PRIORITY'] = 2000 + + + datat.append(dat_faint_f) + datat.append(dat_faint_hip) elif type_ == 'ELG': @@ -357,10 +373,10 @@ def mask_secondgen(nz=0, foot=None, nz_lop=0): targets = vstack(datat) del datat - if args.mockver != 'ab_secondgen' or args.mockver != 'ab_secondgen_cosmosim': - print(len(targets),' in Y5 area') - selY1 = is_point_in_desi(tiletab,targets['RA'],targets['DEC']) - targets = targets[selY1] + ###if args.mockver != 'ab_secondgen' or args.mockver != 'ab_secondgen_cosmosim': + print(len(targets),' in Y5 area') + selY1 = is_point_in_desi(tiletab,targets['RA'],targets['DEC']) + targets = targets[selY1] print(len(targets),' in Y1 area') if args.apply_mask == 'y': @@ -378,9 +394,7 @@ def mask_secondgen(nz=0, foot=None, nz_lop=0): n=len(targets) targets.rename_column('Z_COSMO', 'TRUEZ') targets.rename_column('Z', 'RSDZ') - if args.prog == 'bright': - targets['BGS_TARGET'] = targets['DESI_TARGET'] #2 * np.ones(n, dtype='i8') - else: + if args.prog == 'dark': targets['BGS_TARGET'] = np.zeros(n, dtype='i8') targets['MWS_TARGET'] = np.zeros(n, dtype='i8') targets['SUBPRIORITY'] = np.random.uniform(0, 1, n) diff --git a/scripts/mock_tools/run1_AMTLmock_LSS.sh b/scripts/mock_tools/run1_AMTLmock_LSS.sh index b66908724..6e48d214e 100755 --- a/scripts/mock_tools/run1_AMTLmock_LSS.sh +++ b/scripts/mock_tools/run1_AMTLmock_LSS.sh @@ -1,7 +1,8 @@ #!/bin/bash -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer ELG_LOP --notqso y --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer LRG --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer QSO --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer ELG_LOP --notqso y --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer BGS_ANY --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer BGS_BRIGHT --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer QSO --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 -mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/LSScats/* /global/cfs/cdirs/desi/survey/catalogs//Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/LSScats/ -chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/LSScats/*clustering* +mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl$1/mock$1/LSScats/* /global/cfs/cdirs/desi/survey/catalogs//Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl$1/mock$1/LSScats/ +chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl$1/mock$1/LSScats/*clustering* diff --git a/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh b/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh index ca97e7c5d..fab62df9a 100755 --- a/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh +++ b/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh @@ -1,5 +1,5 @@ #!/bin/bash -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer dark --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 --combd y +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer bright --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS --combd y #mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/ #mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/ From d8770edeb5fe97bdccdef0a0479f68188a2a3ea3 Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Thu, 14 Dec 2023 02:10:57 -0800 Subject: [PATCH 039/297] scripts/mock_tools/run1_AMTLmock_LSS.sh --- scripts/mock_tools/run1_AMTLmock_LSS.sh | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/scripts/mock_tools/run1_AMTLmock_LSS.sh b/scripts/mock_tools/run1_AMTLmock_LSS.sh index 6e48d214e..dc673c6ed 100755 --- a/scripts/mock_tools/run1_AMTLmock_LSS.sh +++ b/scripts/mock_tools/run1_AMTLmock_LSS.sh @@ -1,8 +1,9 @@ #!/bin/bash -#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer ELG_LOP --notqso y --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer BGS_ANY --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer BGS_BRIGHT --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS -#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer QSO --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer ELG_LOP --notqso y --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer BGS_ANY --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer BGS_BRIGHT --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer QSO --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer LRG --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 -mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl$1/mock$1/LSScats/* /global/cfs/cdirs/desi/survey/catalogs//Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl$1/mock$1/LSScats/ -chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl$1/mock$1/LSScats/*clustering* +mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/LSScats/* /global/cfs/cdirs/desi/survey/catalogs//Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/LSScats/ +chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/LSScats/*clustering* From b6564d53ae41d46205463b1ac579e64188c8560a Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Sun, 17 Dec 2023 05:21:06 -0800 Subject: [PATCH 040/297] changes --- scripts/mock_tools/prepare_mocks_Y1_dark.py | 14 ++++++++------ scripts/mock_tools/run1_AMTLmock_combd_LSS.sh | 2 +- .../run_Y1SecondGen_initialledger_batch.sh | 14 ++++++++------ 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/scripts/mock_tools/prepare_mocks_Y1_dark.py b/scripts/mock_tools/prepare_mocks_Y1_dark.py index 9fc71fd38..94f30c138 100644 --- a/scripts/mock_tools/prepare_mocks_Y1_dark.py +++ b/scripts/mock_tools/prepare_mocks_Y1_dark.py @@ -86,18 +86,17 @@ def mask_secondgen(nz=0, foot=None, nz_lop=0): downsampling = {'ELG':0.7345658717688022, 'LRG':0.708798313382828, 'QSO':0.39728966594530174} percentage_elg_hip = 0.1 +Abacus_dir = 'AbacusSummit' if args.isProduction == 'y': args.base_output = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks' args.overwrite = False if args.new_version is not None: Abacus_dir = args.new_version - else: - 'AbacusSummit' else: if args.base_output == '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks' or args.base_output == '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/': - args.base_output = scratch - print('This is not production, run on user scratch', scratch) + args.base_output = os.environ[scratch] + print('This is not production, run on user scratch', os.environ[scratch]) else: print('Saving to path', args.base_output) @@ -234,6 +233,7 @@ def mask_secondgen(nz=0, foot=None, nz_lop=0): df_lop=data_lop.to_pandas() df_vlo=data_vlo.to_pandas() + num_HIP_LOP = int(len(df_lop) * percentage_elg_hip) df_HIP_LOP = df_lop.sample(n=num_HIP_LOP) remaining_LOP = df_lop.drop(df_HIP_LOP.index) @@ -249,6 +249,8 @@ def mask_secondgen(nz=0, foot=None, nz_lop=0): remaining_LOP['PRIORITY_INIT'] = 3100 remaining_LOP['PRIORITY'] = 3100 remaining_LOP['DESI_TARGET'] = 2**5 + 2**1 + + remaining_VLO['PRIORITY_INIT'] = 3000 remaining_VLO['PRIORITY'] = 3000 remaining_VLO['DESI_TARGET'] = 2**7 + 2**1 @@ -259,7 +261,7 @@ def mask_secondgen(nz=0, foot=None, nz_lop=0): df_HIP_VLO['PRIORITY_INIT'] = 3200 df_HIP_VLO['PRIORITY'] = 3200 - df_HIP_VLO['DESI_TARGET'] = 2**6 + 2**1 + 2**5 + df_HIP_VLO['DESI_TARGET'] = 2**6 + 2**1 + 2**7 remaining_LOP['NUMOBS_MORE'] = numobs[type_] remaining_LOP['NUMOBS_INIT'] = numobs[type_] @@ -321,7 +323,7 @@ def mask_secondgen(nz=0, foot=None, nz_lop=0): targets['OBSCONDITIONS'] = obsconditions.mask(args.prog.upper()) #np.zeros(n, dtype='i8')+int(3) targets['SCND_TARGET'] = np.zeros(n, dtype='i8')+int(0) targets['ZWARN'] = np.zeros(n, dtype='i8')+int(0) - targets['TARGETID'] = np.arange(1,n+1) + targets['TARGETID'] = np.random.permutation(np.arange(1,n+1)) targets.write(out_file_name, overwrite = args.overwrite) diff --git a/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh b/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh index fab62df9a..3ea487aa4 100755 --- a/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh +++ b/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh @@ -1,5 +1,5 @@ #!/bin/bash -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer bright --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS --combd y +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_test/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer dark --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_test --combd y #mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/ #mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/ diff --git a/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh index 023d86020..c1615b8fe 100755 --- a/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh +++ b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh @@ -1,7 +1,9 @@ SeconGenVer=AbacusSummit_v3 #AbacusSummit -for j in {15..24} -do - echo $j - echo $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled - python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run_mtl_ledger.py $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled DARK -done +#for j in {15..24} +#do +j=0 +echo $j +#echo $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run_mtl_ledger.py /pscratch/sd/a/acarnero/CutSky_v3_1/forFA$j.fits /pscratch/sd/a/acarnero/CutSky_v3_1/altmtl$j/initled DARK +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run_mtl_ledger.py $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled DARK +#done From 5e9fd5383c2ea0084e98986522d2094520892fd6 Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Mon, 18 Dec 2023 14:38:14 -0800 Subject: [PATCH 041/297] s --- scripts/mock_tools/prepare_script.sh | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/scripts/mock_tools/prepare_script.sh b/scripts/mock_tools/prepare_script.sh index 0ead070bf..d8a476c04 100755 --- a/scripts/mock_tools/prepare_script.sh +++ b/scripts/mock_tools/prepare_script.sh @@ -1,12 +1,12 @@ -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 0 --realmax 2 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 2 --realmax 4 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 4 --realmax 6 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 6 --realmax 8 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 8 --realmax 10 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 10 --realmax 12 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 12 --realmax 14 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 14 --realmax 16 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 16 --realmax 18 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 18 --realmax 20 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 20 --realmax 22 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 22 --realmax 25 --isProduction y --split_snapshot y --new_version AbacusSummit_v3 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 0 --realmax 2 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 2 --realmax 4 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 4 --realmax 6 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 6 --realmax 8 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 8 --realmax 10 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 10 --realmax 12 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 12 --realmax 14 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 14 --realmax 16 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 16 --realmax 18 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 18 --realmax 20 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 20 --realmax 22 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 +srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 22 --realmax 25 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 From 3a8ad433dc839e58bd28c2b8d657e2706384e54f Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Tue, 19 Dec 2023 07:58:22 -0800 Subject: [PATCH 042/297] che --- scripts/mock_tools/all_ab_amtl_tracer.sh | 2 +- scripts/mock_tools/getpota_Y1_script.sh | 50 +++++++++---------- scripts/mock_tools/run1_AMTLmock_LSS.sh | 12 +++-- scripts/mock_tools/run1_AMTLmock_combd_LSS.sh | 2 +- .../run_Y1SecondGen_initialledger_batch.sh | 12 ++--- scripts/mock_tools/script_lrgmask_Y1.sh | 50 +++++++++---------- 6 files changed, 65 insertions(+), 63 deletions(-) diff --git a/scripts/mock_tools/all_ab_amtl_tracer.sh b/scripts/mock_tools/all_ab_amtl_tracer.sh index d1fbac023..4a6f558b6 100755 --- a/scripts/mock_tools/all_ab_amtl_tracer.sh +++ b/scripts/mock_tools/all_ab_amtl_tracer.sh @@ -3,6 +3,6 @@ source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main PYTHONPATH=$PYTHONPATH:$HOME/LSS/py for ((i=$1;i<=$2;i++ )) do - srun -N 1 -C cpu -t 01:00:00 --qos interactive --account desi python scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $i --survey Y1 --add_gtl y --specdata iron --tracer $3 --notqso $4 --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit + srun -N 1 -C cpu -t 01:00:00 --qos interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /pscratch/sd/a/acarnero/SecondGenMocks/AbacusSummit/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $i --survey Y1 --add_gtl y --specdata iron --tracer $3 --notqso $4 --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /pscratch/sd/a/acarnero/SecondGenMocks/AbacusSummit done diff --git a/scripts/mock_tools/getpota_Y1_script.sh b/scripts/mock_tools/getpota_Y1_script.sh index 2bb9c60f1..e9cd542c5 100755 --- a/scripts/mock_tools/getpota_Y1_script.sh +++ b/scripts/mock_tools/getpota_Y1_script.sh @@ -1,25 +1,25 @@ -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 0 --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 1 --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 2 --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 3 --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 4 --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 5 --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 6 --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 7 --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 8 --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 9 --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 10 --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 11 --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 12 --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 13 --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 14 --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 15 --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 16 --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 17 --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 18 --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 19 --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 20 --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 21 --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 22 --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 23 --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 24 --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 0 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 1 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 2 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 3 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 4 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 5 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 6 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 7 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 8 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 9 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 10 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 11 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 12 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 13 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 14 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 15 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 16 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 17 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 18 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 19 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 20 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 21 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 22 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 23 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 24 --secgen_ver AbacusSummit_v3_1 diff --git a/scripts/mock_tools/run1_AMTLmock_LSS.sh b/scripts/mock_tools/run1_AMTLmock_LSS.sh index dc673c6ed..34b0fd2ca 100755 --- a/scripts/mock_tools/run1_AMTLmock_LSS.sh +++ b/scripts/mock_tools/run1_AMTLmock_LSS.sh @@ -1,9 +1,11 @@ #!/bin/bash -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer ELG_LOP --notqso y --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /pscratch/sd/a/acarnero/SecondGenMocks/AbacusSummit/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer ELG_LOP --notqso y --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /pscratch/sd/a/acarnero/SecondGenMocks/AbacusSummit +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /pscratch/sd/a/acarnero/SecondGenMocks/AbacusSummit/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer LRG --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /pscratch/sd/a/acarnero/SecondGenMocks/AbacusSummit +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /pscratch/sd/a/acarnero/SecondGenMocks/AbacusSummit/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer QSO --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /pscratch/sd/a/acarnero/SecondGenMocks/AbacusSummit #python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer BGS_ANY --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS #python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer BGS_BRIGHT --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer QSO --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer LRG --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer QSO --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer LRG --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 -mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/LSScats/* /global/cfs/cdirs/desi/survey/catalogs//Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/LSScats/ -chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/LSScats/*clustering* +#mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit/altmtl$1/mock$1/LSScats/* /global/cfs/cdirs/desi/survey/catalogs//Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/LSScats/ +#chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/LSScats/*clustering* diff --git a/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh b/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh index 3ea487aa4..f772e9746 100755 --- a/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh +++ b/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh @@ -1,5 +1,5 @@ #!/bin/bash -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_test/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer dark --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_test --combd y +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_test/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer dark --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_test --combd y --joindspec y #mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/ #mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/ diff --git a/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh index c1615b8fe..c09874908 100755 --- a/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh +++ b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh @@ -1,9 +1,9 @@ -SeconGenVer=AbacusSummit_v3 #AbacusSummit -#for j in {15..24} -#do -j=0 +SeconGenVer=AbacusSummit_v3_1 #AbacusSummit +for j in {0..24} +do +#j=0 echo $j #echo $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run_mtl_ledger.py /pscratch/sd/a/acarnero/CutSky_v3_1/forFA$j.fits /pscratch/sd/a/acarnero/CutSky_v3_1/altmtl$j/initled DARK +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run_mtl_ledger.py $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled DARK #python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run_mtl_ledger.py $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled DARK -#done +done diff --git a/scripts/mock_tools/script_lrgmask_Y1.sh b/scripts/mock_tools/script_lrgmask_Y1.sh index fc55a8bf0..92dca491f 100755 --- a/scripts/mock_tools/script_lrgmask_Y1.sh +++ b/scripts/mock_tools/script_lrgmask_Y1.sh @@ -1,25 +1,25 @@ -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 0 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 1 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 2 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 3 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 4 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 5 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 6 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 7 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 8 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 9 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 10 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 11 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 12 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 13 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 14 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 15 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 16 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 17 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 18 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 19 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 20 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 21 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 22 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 23 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 24 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 0 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 1 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 2 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 3 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 4 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 5 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 6 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 7 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 8 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 9 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 10 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 11 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 12 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 13 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 14 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 15 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 16 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 17 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 18 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 19 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 20 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 21 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 22 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 23 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 24 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 From d234ff5d10a69647335f70e3d4645a63cafc03f6 Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Tue, 19 Dec 2023 13:54:43 -0800 Subject: [PATCH 043/297] ch --- scripts/mock_tools/getpota_Y1_script.sh | 12 ++++++------ .../run_Y1SecondGen_initialledger_batch.sh | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/scripts/mock_tools/getpota_Y1_script.sh b/scripts/mock_tools/getpota_Y1_script.sh index e9cd542c5..fbe84236b 100755 --- a/scripts/mock_tools/getpota_Y1_script.sh +++ b/scripts/mock_tools/getpota_Y1_script.sh @@ -1,9 +1,9 @@ -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 0 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 1 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 2 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 3 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 4 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 5 --secgen_ver AbacusSummit_v3_1 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 0 --secgen_ver AbacusSummit_v3_1 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 1 --secgen_ver AbacusSummit_v3_1 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 2 --secgen_ver AbacusSummit_v3_1 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 3 --secgen_ver AbacusSummit_v3_1 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 4 --secgen_ver AbacusSummit_v3_1 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 5 --secgen_ver AbacusSummit_v3_1 srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 6 --secgen_ver AbacusSummit_v3_1 srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 7 --secgen_ver AbacusSummit_v3_1 srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 8 --secgen_ver AbacusSummit_v3_1 diff --git a/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh index c09874908..724511790 100755 --- a/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh +++ b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh @@ -1,5 +1,5 @@ SeconGenVer=AbacusSummit_v3_1 #AbacusSummit -for j in {0..24} +for j in {4..24} do #j=0 echo $j From b88ed96f458ecc0a674ebf0854e45231a267199b Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Mon, 8 Jan 2024 03:37:07 -0800 Subject: [PATCH 044/297] A --- scripts/mock_tools/abamtl_combd_cat_sbatch.sh | 2 +- scripts/mock_tools/run1_AMTLmock_combd_LSS.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/mock_tools/abamtl_combd_cat_sbatch.sh b/scripts/mock_tools/abamtl_combd_cat_sbatch.sh index 5d2e2bbc9..46e17db41 100755 --- a/scripts/mock_tools/abamtl_combd_cat_sbatch.sh +++ b/scripts/mock_tools/abamtl_combd_cat_sbatch.sh @@ -3,7 +3,7 @@ #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu -#SBATCH --array=0 +#SBATCH --array=0-3,5-24 #SBATCH --account=desi source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main diff --git a/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh b/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh index f772e9746..0d4554598 100755 --- a/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh +++ b/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh @@ -1,5 +1,5 @@ #!/bin/bash -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_test/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer dark --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_test --combd y --joindspec y +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer dark --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1 --combd y --joindspec y #mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/ #mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/ From 1b0979850dd530003b594d423abe2056f9a18d37 Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Thu, 18 Jan 2024 02:58:05 -0800 Subject: [PATCH 045/297] Adding new codes to mock processing --- bin/Y1ALTMTLRealizationsBRIGHT_mock.sh | 310 +++ bin/Y1ALTMTLRealizationsBRIGHT_mock_init.sh | 337 +++ bin/Y1ALTMTLRealizationsDARK_mock.sh | 319 +++ bin/Y1ALTMTLRealizationsDARK_mock_init.sh | 341 +++ bin/Y1Bitweights128RealizationsDARK_mock.sh | 341 +++ bin/dateLoopAltMTLBugFix_mock_batch.sh | 67 + bin/runAltMTLParallel.py | 8 +- bin/runAltMTLRealizations.py | 175 ++ py/LSS/SV3/altmtltools.py | 4 +- py/LSS/main/cattools.py | 36 +- py/LSS/main/mockaltmtltools.py | 2223 +++++++++++++++++ scripts/mock_tools/add_extra_realizations.py | 28 + scripts/mock_tools/add_extra_tilesTracker.py | 28 + scripts/mock_tools/mkCat_SecondGen_amtl.py | 44 +- scripts/mock_tools/prepare_mocks_Y1_bright.py | 14 +- scripts/mock_tools/prepare_script_bright.sh | 9 + 16 files changed, 4253 insertions(+), 31 deletions(-) create mode 100755 bin/Y1ALTMTLRealizationsBRIGHT_mock.sh create mode 100755 bin/Y1ALTMTLRealizationsBRIGHT_mock_init.sh create mode 100755 bin/Y1ALTMTLRealizationsDARK_mock.sh create mode 100755 bin/Y1ALTMTLRealizationsDARK_mock_init.sh create mode 100755 bin/Y1Bitweights128RealizationsDARK_mock.sh create mode 100755 bin/dateLoopAltMTLBugFix_mock_batch.sh create mode 100755 bin/runAltMTLRealizations.py create mode 100644 py/LSS/main/mockaltmtltools.py create mode 100644 scripts/mock_tools/add_extra_realizations.py create mode 100644 scripts/mock_tools/add_extra_tilesTracker.py create mode 100755 scripts/mock_tools/prepare_script_bright.sh diff --git a/bin/Y1ALTMTLRealizationsBRIGHT_mock.sh b/bin/Y1ALTMTLRealizationsBRIGHT_mock.sh new file mode 100755 index 000000000..159715dba --- /dev/null +++ b/bin/Y1ALTMTLRealizationsBRIGHT_mock.sh @@ -0,0 +1,310 @@ +#!/bin/bash +start=`date +%s.%N` + +#simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written +#simName=JL_DebugReprocReprod2 +simName="altmtl{mock_number}" +#Location where you have cloned the LSS Repo +path2LSS=/pscratch/sd/a/acarnero/codes/LSS/bin + +# Flags for debug/verbose mode/profiling code time usage. +# Uncomment second set of options to turn on the modes +#debug='' +#verbose='' +profile='' +debug='--debug' +verbose='--verbose' +#profile='--profile' + +if [ -z "$debug" ] +then + echo "\$debug is empty" +else + echo "\$debug is set" + pwd + InitWorkingDirectory=`pwd` + cd $path2LSS + cd .. + pwd + pip install --user . + cd $InitWorkingDirectory + pwd + echo "end of pip in script attempt" +fi + +#Uncomment second option if running on mocks +mock='--mock' + +#ALTMTLHOME is a home directory for all of your alternate MTLs. Default is your scratch directory +#There will be an environment variable $ALTMTLHOME for the "survey alt MTLs" +#However, you should specify your own directory to a. not overwrite the survey alt MTLs +# and b. keep your alt MTLs somewhere that you have control/access + +#Uncomment the following line to set your own/nonscratch directory +#ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ +ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/ + +if [[ "${NERSC_HOST}" == "cori" ]]; then + CVal='haswell' + QVal='interactive' + ProcPerNode=32 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$CSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi +elif [[ "${NERSC_HOST}" == "perlmutter" ]]; then + srunConfig='-C cpu -q regular' + CVal='cpu' + QVal='interactive' + ProcPerNode=128 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$PSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi + +else + echo "This code is only supported on NERSC Cori and NERSC Perlmutter. Goodbye" + exit 1234 +fi + + + + +#Options for InitializeAltMTLs + +#Random seed. Change to any integer you want (or leave the same) +#If seed is different between two otherwise identical runs, the initial MTLs will also be different +#seed is also saved in output directory +#seed=14126579 +seed=3593589 +#Number of realizations to generate. Ideally a multiple of 64 for bitweights +#However, you can choose smaller numbers for debugging +mockinit=0 +mockend=1 +let ndir=$mockend-$mockinit + + +#Uncomment second option if you want to clobber already existing files for Alt MTL generation +overwrite='' +#overwrite='--overwrite' + +#Observing conditions for generating MTLs (should be all caps "DARK" or "BRIGHT") +#obscon='DARK' +obscon='BRIGHT' + +#Survey to generate MTLs for (should be lowercase "sv3" or "main", sv2, sv1, and cmx are untested and will likely fail) +#survey='sv3' +survey='main' +# options are default None (empty strings). Uncommenting the second options will set them to the Y1 start and end dates. +startDate='' +#endDate='' +#startDate='2021-05-13T08:15:37+00:00' +endDate='2022-06-24T00:00:00+00:00' + +#For rundate formatting in simName, either manually modify the string below +#to be the desired date or comment that line out and uncomment the +#following line to autogenerate date strings. +#To NOT use any date string specification, use the third line, an empty string +#datestring='071322' +#datestring=`date +%y%m%d` +datestring='' + +#Can save time in MTL generation by first writing files to local tmp directory and then copying over later +#uncommenting the second option will directly write to your output directory +usetmp='' +#usetmp='--dontUseTemp' + +if [ -z $usetmp ] +then + outputMTLDirBaseBase=`mktemp -d /dev/shm/"$USER"_tempdirXXXX` +else + outputMTLDirBaseBase=$ALTMTLHOME +fi +printf -v outputMTLDirBase "$outputMTLDirBaseBase/$simName/" $datestring $ndir $survey +printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $survey + +#List of healpixels to create Alt MTLs for +#hpListFile="$path2LSS/MainSurveyHPList_mock.txt" +hpListFile="/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl0/initled/hpxlist_bright.txt" +#hpListFile="$path2LSS/DebugMainHPList.txt" +#hpListFile="$path2LSS/SV3HPList.txt" + +#These two options only are considered if the obscon is BRIGHT +#First option indicates whether to shuffle the top level priorities +#of BGS_FAINT/BGS_FAINT_HIP. Uncomment section option to turn off shuffling of bright time priorities +#Second option indicates what fraction/percent +#of BGS_FAINT to promote to BGS_FAINT_HIP. Default is 20%, same as SV3 + +#shuffleBrightPriorities='--shuffleBrightPriorities' +shuffleBrightPriorities='' + + +#shuffleELGPriorities='' +#shuffleELGPriorities='--shuffleELGPriorities' + +#PromoteFracBGSFaint=0.2 +PromoteFracBGSFaint=0.0 +#PromoteFracELG=0.1 +PromoteFracELG=0.0 + +# location of original MTLs to shuffle. +# Default directory is a read only mount of the CFS filesystem +# You can only access that directory from compute nodes. +# Do NOT use the commented out directory (the normal mount of CFS) +# unless the read only mount is broken +#exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ +#exampleLedgerBase=/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ +#exampleLedgerBase=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ +#exampleLedgerBase=$SCRATCH/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ +#Options for DateLoopAltMTL and runAltMTLParallel +exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl0/initled + +#Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). +#Default = Empty String/False. Uncomment second option if you want to restart from the first observations +#PLEASE DO NOT CHANGEME +echo "Fix QR resetting for new argparse usage" +qR='' +#qR='-qr' + +#Number of observation dates to loop through +#Defaults to 40 dates for SV3 +NObsDates=99999 + +# Whether to submit a new job with dateLoopAltMTL for each date +# or to submit a single job +# multiDate=0 +multiDate='--multiDate' +echo 'setting QVal here for debug. Fix later.' +#QVal='debug' +QVal='regular' +#QVal='interactive' +#Number of nodes to run on. This will launch up to 64*N jobs +#if that number of alternate universes have already been generated +#Calculated automatically from number of sims requested and number of processes per node. Be careful if setting manually +NNodes=$(( ($ndir + $ProcPerNode - 1 )/$ProcPerNode )) +#echo $NNodes +#getosubp: grab subpriorities from the original (exampleledgerbase) MTLs +#This should only be turned on for SV testing/debugging purposes +#This should not be required for main survey debugging. +getosubp='' +#getosubp='--getosubp' + +#shuffleSubpriorities(reproducing) must be left as empty strings to ensure +#subpriorities are shuffled. debug mode for main survey +#will only require these flags to be set by uncommenting second options + +#dontShuffleSubpriorities='' +#reproducing='' +#dontShuffleSubpriorities='--dontShuffleSubpriorities' +#reproducing='--reproducing' +#Include secondary targets? +secondary='' +#secondary='--secondary' + + +#If running from mocks, must set target directory. +#Otherwise this is optional +#targfile='' #CHANGEME IF RUNNING ON MOCKS +#targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory +targfile="--targfile=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/forFA{mock_number}.fits" +#targfile='--targfile=/cscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA1.fits' +#targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' + + +#Default is use numobs from ledger. Uncomment second option to set numobs NOT from ledger +numobs_from_ledger='' +#numobs_from_ledger='--NumObsNotFromLedger' + +#Uncomment second line to force redo fiber assignment if it has already been done. +redoFA='' +#redoFA='--redoFA' + + +#Options for MakeBitweightsParallel +#True/False(1/0) as to whether to split bitweight calculation +#among nodes by MPI between realizations +#splitByReal=1 + +#Split the calculation of bitweights into splitByChunk +#chunks of healpixels. +#splitByChunk=1 + +#Set to true (1) if you want to clobber already existing bitweight files +overwrite2='' +#overwrite2='--overwrite' +#Actual running of scripts + +#Copy this script to output directory for reproducbility +thisFileName=$outputMTLFinalDestination/$0 + +echo $thisFileName + +#if [ -f "$thisFileName" ] +#then +# echo "File is found. Checking to see it is identical to the original." +# cmp $0 $thisFileName +# comp=$? +# if [[ $comp -eq 1 ]] +# then +# echo "Files are not identical." +# echo "If this is intended, please delete or edit the original copied script at $thisFileName" +# echo "If this is unintended, you can reuse the original copied script at that same location" +# echo "goodbye" +# exit 3141 +# elif [[ $comp -eq 0 ]] +# then +# echo "files are same, continuing" +# else +# echo "Something has gone very wrong. Exit code for cmp was $a" +# exit $a +# fi +#else +# echo "Copied script is not found. Copying now, making directories as needed." +# mkdir -p $outputMTLFinalDestination +# cp $SLURM_SUBMIT_DIR $0 $outputMTLFinalDestination/$0 +#fi + +if [ -d "$outputMTLFinalDestination" ] +then + echo "output final directory exists" + echo $outputMTLFinalDestination +else + echo "output final directory does not exist. Creating and copying script there" + mkdir -p $outputMTLFinalDestination + cp $0 $outputMTLFinalDestination +fi + +if [ -z $getosubp ] +then + touch $outputMTLFinalDestination/GetOSubpTrue +fi + +printf -v OFDL "%s/dateLoop%sAltMTLOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring + +runtimeInit=$( echo "$endInit - $start" | bc -l ) +argstring="--altMTLBaseDir=$outputMTLFinalDestination --obscon=$obscon --survey=$survey --ProcPerNode=$ProcPerNode $numobs_from_ledger $redoFA $getosubp $debug $verbose $secondary $mock $targfile $multiDate $reproducing --mockmin=$mockinit --mockmax=$mockend" +echo 'argstring for dateloop' +echo $argstring +nohup bash $path2LSS/dateLoopAltMTLBugFix_mock_batch.sh $NObsDates $NNodes $path2LSS $CVal $QVal $qR $argstring >& $OFDL + +endDL=`date +%s.%N` + +if [ $? -ne 0 ]; then + runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) + echo "runtime for Dateloop of $NObsDates days" + echo $runtimeDateLoop + exit 12345 +fi +runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +echo "runtime for Dateloop of $NObsDates days" +echo $runtimeDateLoop +exit 54321 + + + +runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) + +echo "runtime for Dateloop of $NObsDates days" +echo $runtimeDateLoop diff --git a/bin/Y1ALTMTLRealizationsBRIGHT_mock_init.sh b/bin/Y1ALTMTLRealizationsBRIGHT_mock_init.sh new file mode 100755 index 000000000..faffd575f --- /dev/null +++ b/bin/Y1ALTMTLRealizationsBRIGHT_mock_init.sh @@ -0,0 +1,337 @@ +#!/bin/bash +start=`date +%s.%N` + +#simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written +#simName=JL_DebugReprocReprod2 +simName="altmtl0" +#Location where you have cloned the LSS Repo +path2LSS=/pscratch/sd/a/acarnero/codes/LSS/bin + +# Flags for debug/verbose mode/profiling code time usage. +# Uncomment second set of options to turn on the modes +#debug='' +#verbose='' +profile='' +debug='--debug' +verbose='--verbose' +#profile='--profile' + +if [ -z "$debug" ] +then + echo "\$debug is empty" +else + echo "\$debug is set" + pwd + InitWorkingDirectory=`pwd` + cd $path2LSS + cd .. + pwd + pip install --user . + cd $InitWorkingDirectory + pwd + echo "end of pip in script attempt" +fi + +#Uncomment second option if running on mocks +mock='--mock' + +#ALTMTLHOME is a home directory for all of your alternate MTLs. Default is your scratch directory +#There will be an environment variable $ALTMTLHOME for the "survey alt MTLs" +#However, you should specify your own directory to a. not overwrite the survey alt MTLs +# and b. keep your alt MTLs somewhere that you have control/access + +#Uncomment the following line to set your own/nonscratch directory +#ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ +ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/ + +if [[ "${NERSC_HOST}" == "cori" ]]; then + CVal='haswell' + QVal='interactive' + ProcPerNode=32 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$CSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi +elif [[ "${NERSC_HOST}" == "perlmutter" ]]; then + srunConfig='-C cpu -q regular' + CVal='cpu' + QVal='interactive' + ProcPerNode=128 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$PSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi + +else + echo "This code is only supported on NERSC Cori and NERSC Perlmutter. Goodbye" + exit 1234 +fi + + + + +#Options for InitializeAltMTLs + +#Random seed. Change to any integer you want (or leave the same) +#If seed is different between two otherwise identical runs, the initial MTLs will also be different +#seed is also saved in output directory +#seed=14126579 +seed=3593589 +#Number of realizations to generate. Ideally a multiple of 64 for bitweights +#However, you can choose smaller numbers for debugging +ndir=1 + +#Uncomment second option if you want to clobber already existing files for Alt MTL generation +overwrite='' +#overwrite='--overwrite' + +#Observing conditions for generating MTLs (should be all caps "DARK" or "BRIGHT") +#obscon='DARK' +obscon='BRIGHT' + +#Survey to generate MTLs for (should be lowercase "sv3" or "main", sv2, sv1, and cmx are untested and will likely fail) +#survey='sv3' +survey='main' +# options are default None (empty strings). Uncommenting the second options will set them to the Y1 start and end dates. +startDate='' +#endDate='' +#startDate='2021-05-13T08:15:37+00:00' +endDate='2022-06-24T00:00:00+00:00' + +#For rundate formatting in simName, either manually modify the string below +#to be the desired date or comment that line out and uncomment the +#following line to autogenerate date strings. +#To NOT use any date string specification, use the third line, an empty string +#datestring='071322' +#datestring=`date +%y%m%d` +datestring='' + +#Can save time in MTL generation by first writing files to local tmp directory and then copying over later +#uncommenting the second option will directly write to your output directory +usetmp='' +#usetmp='--dontUseTemp' + +if [ -z $usetmp ] +then + outputMTLDirBaseBase=`mktemp -d /dev/shm/"$USER"_tempdirXXXX` +else + outputMTLDirBaseBase=$ALTMTLHOME +fi +printf -v outputMTLDirBase "$outputMTLDirBaseBase/$simName/" $datestring $ndir $survey +printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $survey + +#List of healpixels to create Alt MTLs for +#hpListFile="$path2LSS/MainSurveyHPList_mock.txt" +hpListFile="/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl0/initled/hpxlist_bright.txt" +#hpListFile="$path2LSS/DebugMainHPList.txt" +#hpListFile="$path2LSS/SV3HPList.txt" + +#These two options only are considered if the obscon is BRIGHT +#First option indicates whether to shuffle the top level priorities +#of BGS_FAINT/BGS_FAINT_HIP. Uncomment section option to turn off shuffling of bright time priorities +#Second option indicates what fraction/percent +#of BGS_FAINT to promote to BGS_FAINT_HIP. Default is 20%, same as SV3 + +#shuffleBrightPriorities='--shuffleBrightPriorities' +shuffleBrightPriorities='' + + +#shuffleELGPriorities='' +shuffleELGPriorities='--shuffleELGPriorities' + +#PromoteFracBGSFaint=0.2 +PromoteFracBGSFaint=0.0 +PromoteFracELG=0.1 +#PromoteFracELG=0.0 + +# location of original MTLs to shuffle. +# Default directory is a read only mount of the CFS filesystem +# You can only access that directory from compute nodes. +# Do NOT use the commented out directory (the normal mount of CFS) +# unless the read only mount is broken +#exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ +#exampleLedgerBase=/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ +#exampleLedgerBase=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ +#exampleLedgerBase=$SCRATCH/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ +#Options for DateLoopAltMTL and runAltMTLParallel +exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl0/initled + +#Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). +#Default = Empty String/False. Uncomment second option if you want to restart from the first observations +#PLEASE DO NOT CHANGEME +echo "Fix QR resetting for new argparse usage" +qR='' +#qR='-qr' + +#Number of observation dates to loop through +#Defaults to 40 dates for SV3 +NObsDates=99999 + +# Whether to submit a new job with dateLoopAltMTL for each date +# or to submit a single job +# multiDate=0 +multiDate='--multiDate' +echo 'setting QVal here for debug. Fix later.' +#QVal='debug' +#QVal='regular' +QVal='interactive' +#Number of nodes to run on. This will launch up to 64*N jobs +#if that number of alternate universes have already been generated +#Calculated automatically from number of sims requested and number of processes per node. Be careful if setting manually +NNodes=$(( ($ndir + $ProcPerNode - 1 )/$ProcPerNode )) +#echo $NNodes +#getosubp: grab subpriorities from the original (exampleledgerbase) MTLs +#This should only be turned on for SV testing/debugging purposes +#This should not be required for main survey debugging. +getosubp='' +#getosubp='--getosubp' + +#shuffleSubpriorities(reproducing) must be left as empty strings to ensure +#subpriorities are shuffled. debug mode for main survey +#will only require these flags to be set by uncommenting second options + +dontShuffleSubpriorities='' +reproducing='' +#dontShuffleSubpriorities='--dontShuffleSubpriorities' +#reproducing='--reproducing' +#Include secondary targets? +secondary='' +#secondary='--secondary' + + +#If running from mocks, must set target directory. +#Otherwise this is optional +#targfile='' #CHANGEME IF RUNNING ON MOCKS +#targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory +targfile="--targfile=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/forFA0.fits" +#targfile='--targfile=/cscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA1.fits' +#targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' + + +#Default is use numobs from ledger. Uncomment second option to set numobs NOT from ledger +numobs_from_ledger='' +#numobs_from_ledger='--NumObsNotFromLedger' + +#Uncomment second line to force redo fiber assignment if it has already been done. +redoFA='' +#redoFA='--redoFA' + + +#Options for MakeBitweightsParallel +#True/False(1/0) as to whether to split bitweight calculation +#among nodes by MPI between realizations +#splitByReal=1 + +#Split the calculation of bitweights into splitByChunk +#chunks of healpixels. +#splitByChunk=1 + +#Set to true (1) if you want to clobber already existing bitweight files +overwrite2='' +#overwrite2='--overwrite' +#Actual running of scripts + +#Copy this script to output directory for reproducbility +thisFileName=$outputMTLFinalDestination/$0 + +echo $thisFileName + +if [ -f "$thisFileName" ] +then + echo "File is found. Checking to see it is identical to the original." + cmp $0 $thisFileName + comp=$? + if [[ $comp -eq 1 ]] + then + echo "Files are not identical." + echo "If this is intended, please delete or edit the original copied script at $thisFileName" + echo "If this is unintended, you can reuse the original copied script at that same location" + echo "goodbye" + exit 3141 + elif [[ $comp -eq 0 ]] + then + echo "files are same, continuing" + else + echo "Something has gone very wrong. Exit code for cmp was $a" + exit $a + fi +else + echo "Copied script is not found. Copying now, making directories as needed." + mkdir -p $outputMTLFinalDestination + cp $SLURM_SUBMIT_DIR $0 $outputMTLFinalDestination/$0 +fi + +if [ -d "$outputMTLFinalDestination" ] +then + echo "output final directory exists" + echo $outputMTLFinalDestination +else + echo "output final directory does not exist. Creating and copying script there" + mkdir -p $outputMTLFinalDestination + cp $0 $outputMTLFinalDestination +fi + +if [ -z $getosubp ] +then + touch $outputMTLFinalDestination/GetOSubpTrue +fi + +printf -v OFIM "%s/Initialize%sAltMTLsParallelOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $date + +echo "srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclusive $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM" +srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 02:15:00 --mem=0 --exclusive $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM +if [ $? -ne 0 ]; then + exit 1234 + endInit=`date +%s.%N` + runtimeInit=$( echo "$endInit - $start" | bc -l ) + echo "runtime for initialization" + echo $runtimeInit +fi + +endInit=`date +%s.%N` +runtimeInit=$( echo "$endInit - $start" | bc -l ) +echo "runtime for initialization" +echo $runtimeInit + +#printf -v OFDL "%s/dateLoop%sAltMTLOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring + +#runtimeInit=$( echo "$endInit - $start" | bc -l ) +#argstring="--altMTLBaseDir=$outputMTLFinalDestination --obscon=$obscon --survey=$survey --ProcPerNode=$ProcPerNode $numobs_from_ledger $redoFA $getosubp $debug $verbose $secondary $mock $targfile $multiDate $reproducing" +#echo 'argstring for dateloop' +#echo $argstring +#nohup bash $path2LSS/dateLoopAltMTLBugFix.sh $NObsDates $NNodes $path2LSS $CVal $QVal $qR $argstring >& $OFDL + +#endDL=`date +%s.%N` + +#if [ $? -ne 0 ]; then +# runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +# echo "runtime for Dateloop of $NObsDates days" +# echo $runtimeDateLoop +# exit 12345 +#fi +#runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +#echo "runtime for Dateloop of $NObsDates days" +#echo $runtimeDateLoop +#exit 54321 + + + +#printf -v OFBW "%s/MakeBitweights%sOutput%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring +#srun --nodes=1 -C $CVal -q $QVal -A desi -t 04:00:00 --mem=120000 $path2LSS/MakeBitweights.py --survey=$survey --obscon=$obscon --ndir=$ndir --ProcPerNode=$ProcPerNode --HPListFile=$hpListFile --outdir=$outputMTLFinalDestination $overwrite2 $verbose $debug >& $OFBW + +#endBW=`date +%s.%N` + + + +runtimeInit=$( echo "$endInit - $start" | bc -l ) +#runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +#runtimeBitweights=$( echo "$endBW - $endDL" | bc -l ) + +echo "runtime for initialization" +echo $runtimeInit +#echo "runtime for Dateloop of $NObsDates days" +#echo $runtimeDateLoop +#echo "runtime for making bitweights" +#echo $runtimeBitweights diff --git a/bin/Y1ALTMTLRealizationsDARK_mock.sh b/bin/Y1ALTMTLRealizationsDARK_mock.sh new file mode 100755 index 000000000..622bb0f55 --- /dev/null +++ b/bin/Y1ALTMTLRealizationsDARK_mock.sh @@ -0,0 +1,319 @@ +#!/bin/bash +start=`date +%s.%N` + +##TEMPrealization=0 + +#simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written +#simName=JL_DebugReprocReprod2 +simName="altmtl{mock_number}" +#Location where you have cloned the LSS Repo +path2LSS=/pscratch/sd/a/acarnero/codes/LSS/bin/ + +# Flags for debug/verbose mode/profiling code time usage. +# Uncomment second set of options to turn on the modes +#debug='' +#verbose='' +profile='' +debug='--debug' +verbose='--verbose' +#profile='--profile' + +#if [ -z "$debug" ] +#then +# echo "\$debug is empty" +#else +# echo "\$debug is set" +# pwd +# InitWorkingDirectory=`pwd` +# cd $path2LSS +# cd .. +# pwd +# pip install --user . +# cd $InitWorkingDirectory +# pwd +# echo "end of pip in script attempt" +#fi + +#Uncomment second option if running on mocks +#mock='' +mock='--mock' + +#ALTMTLHOME is a home directory for all of your alternate MTLs. Default is your scratch directory +#There will be an environment variable $ALTMTLHOME for the "survey alt MTLs" +#However, you should specify your own directory to a. not overwrite the survey alt MTLs +# and b. keep your alt MTLs somewhere that you have control/access + +#Uncomment the following line to set your own/nonscratch directory +#ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ +ALTMTLHOME=/pscratch/sd/a/acarnero/test_main/ +#ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/ + +if [[ "${NERSC_HOST}" == "cori" ]]; then + CVal='haswell' + QVal='interactive' + ProcPerNode=32 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$CSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi +elif [[ "${NERSC_HOST}" == "perlmutter" ]]; then + srunConfig='-C cpu -q regular' + CVal='cpu' + QVal='interactive' + ProcPerNode=128 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$PSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi + +else + echo "This code is only supported on NERSC Cori and NERSC Perlmutter. Goodbye" + exit 1234 +fi + + + + +#Options for InitializeAltMTLs + +#Random seed. Change to any integer you want (or leave the same) +#If seed is different between two otherwise identical runs, the initial MTLs will also be different +#seed is also saved in output directory +#seed=14126579 +seed=3593589 +#Number of realizations to generate. Ideally a multiple of 64 for bitweights +#However, you can choose smaller numbers for debugging +#Mock realization +mockinit=10 +mockend=11 +let ndir=$mockend-$mockinit + + +#Uncomment second option if you want to clobber already existing files for Alt MTL generation +overwrite='' +#overwrite='--overwrite' + +#Observing conditions for generating MTLs (should be all caps "DARK" or "BRIGHT") +obscon='DARK' +#obscon='BRIGHT' + +#Survey to generate MTLs for (should be lowercase "sv3" or "main", sv2, sv1, and cmx are untested and will likely fail) +#survey='sv3' +survey='main' +# options are default None (empty strings). Uncommenting the second options will set them to the Y1 start and end dates. +startDate='' +#endDate='' +#startDate='2021-05-13T08:15:37+00:00' +endDate='2022-06-24T00:00:00+00:00' + +#For rundate formatting in simName, either manually modify the string below +#to be the desired date or comment that line out and uncomment the +#following line to autogenerate date strings. +#To NOT use any date string specification, use the third line, an empty string +#datestring='071322' +#datestring=`date +%y%m%d` +datestring='' + +#Can save time in MTL generation by first writing files to local tmp directory and then copying over later +#uncommenting the second option will directly write to your output directory +usetmp='' +#usetmp='--dontUseTemp' + +if [ -z $usetmp ] +then + outputMTLDirBaseBase=`mktemp -d /dev/shm/"$USER"_tempdirXXXX` +else + outputMTLDirBaseBase=$ALTMTLHOME +fi +printf -v outputMTLDirBase "$outputMTLDirBaseBase/$simName/" $datestring $ndir $survey +printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $survey + +#List of healpixels to create Alt MTLs for +#hpListFile="$path2LSS/MainSurveyHPList_mock.txt" +##TEMPhpListFile="/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$realization/initled/hpxlist_dark.txt" +#hpListFile="$path2LSS/MainSurveyHPList.txt" +#hpListFile="$path2LSS/DebugMainHPList.txt" +#hpListFile="$path2LSS/SV3HPList.txt" + +#These two options only are considered if the obscon is BRIGHT +#First option indicates whether to shuffle the top level priorities +#of BGS_FAINT/BGS_FAINT_HIP. Uncomment section option to turn off shuffling of bright time priorities +#Second option indicates what fraction/percent +#of BGS_FAINT to promote to BGS_FAINT_HIP. Default is 20%, same as SV3 + +#shuffleBrightPriorities='--shuffleBrightPriorities' +shuffleBrightPriorities='' + + +shuffleELGPriorities='' +#shuffleELGPriorities='--shuffleELGPriorities' + +#PromoteFracBGSFaint=0.2 +PromoteFracBGSFaint=0.0 +#PromoteFracELG=0.1 +PromoteFracELG=0. + +# location of original MTLs to shuffle. +# Default directory is a read only mount of the CFS filesystem +# You can only access that directory from compute nodes. +# Do NOT use the commented out directory (the normal mount of CFS) +# unless the read only mount is broken +##TEMPexampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$realization/initled +#exampleLedgerBase=/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ +#exampleLedgerBase=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ +#exampleLedgerBase=$SCRATCH/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ +#Options for DateLoopAltMTL and runAltMTLParallel + +#Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). +#Default = Empty String/False. Uncomment second option if you want to restart from the first observations +#PLEASE DO NOT CHANGEME +echo "Fix QR resetting for new argparse usage" +qR='' +#qR='-qr' + +#Number of observation dates to loop through +#Defaults to 40 dates for SV3 +NObsDates=99999 + +# Whether to submit a new job with dateLoopAltMTL for each date +# or to submit a single job +# multiDate=0 +multiDate='--multiDate' +echo 'setting QVal here for debug. Fix later.' +#QVal='debug' +#QVal='regular' +QVal='interactive' +# + + + +#Number of nodes to run on. This will launch up to 64*N jobs +#if that number of alternate universes have already been generated +#Calculated automatically from number of sims requested and number of processes per node. Be careful if setting manually +NNodes=$(( ($ndir + $ProcPerNode - 1 )/$ProcPerNode )) +#echo $NNodes +#getosubp: grab subpriorities from the original (exampleledgerbase) MTLs +#This should only be turned on for SV testing/debugging purposes +#This should not be required for main survey debugging. +getosubp='' +#getosubp='--getosubp' + +#shuffleSubpriorities(reproducing) must be left as empty strings to ensure +#subpriorities are shuffled. debug mode for main survey +#will only require these flags to be set by uncommenting second options + +dontShuffleSubpriorities='' +reproducing='' +#dontShuffleSubpriorities='--dontShuffleSubpriorities' +#reproducing='--reproducing' +#Include secondary targets? +secondary='' +#secondary='--secondary' + + +#If running from mocks, must set target directory. +#Otherwise this is optional +#targfile='' #CHANGEME IF RUNNING ON MOCKS +#targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory +targfile="--targfile=/pscratch/sd/a/acarnero/test_main/forFA{mock_number}.fits" +##targfile="--targfile=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/forFA{mock_number}.fits" +#targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' + + +#Default is use numobs from ledger. Uncomment second option to set numobs NOT from ledger +numobs_from_ledger='' +#numobs_from_ledger='--NumObsNotFromLedger' + +#Uncomment second line to force redo fiber assignment if it has already been done. +redoFA='' +#redoFA='--redoFA' + + +#Options for MakeBitweightsParallel +#True/False(1/0) as to whether to split bitweight calculation +#among nodes by MPI between realizations +#splitByReal=1 + +#Split the calculation of bitweights into splitByChunk +#chunks of healpixels. +#splitByChunk=1 + +#Set to true (1) if you want to clobber already existing bitweight files +overwrite2='' +#overwrite2='--overwrite' +#Actual running of scripts + +#Copy this script to output directory for reproducbility +thisFileName=$outputMTLFinalDestination/$0 + +echo $thisFileName + +#if [ -f "$thisFileName" ] +#then +# echo "File is found. Checking to see it is identical to the original." +# cmp $0 $thisFileName +# comp=$? +# if [[ $comp -eq 1 ]] +# then +# echo "Files are not identical." +# echo "If this is intended, please delete or edit the original copied script at $thisFileName" +# echo "If this is unintended, you can reuse the original copied script at that same location" +# echo "goodbye" +# exit 3141 +# elif [[ $comp -eq 0 ]] +# then +# echo "files are same, continuing" +# else +# echo "Something has gone very wrong. Exit code for cmp was $a" +# exit $a +# fi +#else +# echo "Copied script is not found. Copying now, making directories as needed." +# mkdir -p $outputMTLFinalDestination +# cp $SLURM_SUBMIT_DIR $0 $outputMTLFinalDestination/$0 +#fi + +if [ -d "$outputMTLFinalDestination" ] +then + echo "output final directory exists" + echo $outputMTLFinalDestination +else + echo "output final directory does not exist. Creating and copying script there" + mkdir -p $outputMTLFinalDestination + cp $0 $outputMTLFinalDestination +fi + +if [ -z $getosubp ] +then + touch $outputMTLFinalDestination/GetOSubpTrue +fi + +printf -v OFDL "%s/dateLoop%sAltMTLOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring + +runtimeInit=$( echo "$endInit - $start" | bc -l ) +argstring="--altMTLBaseDir=$outputMTLFinalDestination --obscon=$obscon --survey=$survey --ProcPerNode=$ProcPerNode $numobs_from_ledger $redoFA $getosubp $debug $verbose $secondary $mock $targfile $multiDate $reproducing --mockmin=$mockinit --mockmax=$mockend" +echo 'argstring for dateloop' +echo $argstring +nohup bash $path2LSS/dateLoopAltMTLBugFix_mock_batch.sh $NObsDates $NNodes $path2LSS $CVal $QVal $qR $argstring >& $OFDL + +endDL=`date +%s.%N` + +if [ $? -ne 0 ]; then + runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) + echo "runtime for Dateloop of $NObsDates days" + echo $runtimeDateLoop + exit 12345 +fi +runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +echo "runtime for Dateloop of $NObsDates days" +echo $runtimeDateLoop +exit 54321 + + + +runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) + +echo "runtime for Dateloop of $NObsDates days" +echo $runtimeDateLoop diff --git a/bin/Y1ALTMTLRealizationsDARK_mock_init.sh b/bin/Y1ALTMTLRealizationsDARK_mock_init.sh new file mode 100755 index 000000000..309ee9fa9 --- /dev/null +++ b/bin/Y1ALTMTLRealizationsDARK_mock_init.sh @@ -0,0 +1,341 @@ +#!/bin/bash +start=`date +%s.%N` + +#simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written +#simName=JL_DebugReprocReprod2 +simName="altmtl10" +#Location where you have cloned the LSS Repo +path2LSS=/pscratch/sd/a/acarnero/codes/LSS/bin + +# Flags for debug/verbose mode/profiling code time usage. +# Uncomment second set of options to turn on the modes +#debug='' +#verbose='' +profile='' +debug='--debug' +verbose='--verbose' +#profile='--profile' + +if [ -z "$debug" ] +then + echo "\$debug is empty" +else + echo "\$debug is set" + pwd + InitWorkingDirectory=`pwd` + cd $path2LSS + cd .. + pwd + pip install --user . + cd $InitWorkingDirectory + pwd + echo "end of pip in script attempt" +fi + +#Uncomment second option if running on mocks +mock='--mock' + +#ALTMTLHOME is a home directory for all of your alternate MTLs. Default is your scratch directory +#There will be an environment variable $ALTMTLHOME for the "survey alt MTLs" +#However, you should specify your own directory to a. not overwrite the survey alt MTLs +# and b. keep your alt MTLs somewhere that you have control/access + +#Uncomment the following line to set your own/nonscratch directory +#ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ +#ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/ +ALTMTLHOME=/pscratch/sd/a/acarnero/test_main/ + +if [[ "${NERSC_HOST}" == "cori" ]]; then + CVal='haswell' + QVal='interactive' + ProcPerNode=32 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$CSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi +elif [[ "${NERSC_HOST}" == "perlmutter" ]]; then + srunConfig='-C cpu -q regular' + CVal='cpu' + QVal='interactive' + ProcPerNode=128 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$PSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi + +else + echo "This code is only supported on NERSC Cori and NERSC Perlmutter. Goodbye" + exit 1234 +fi + + + + +#Options for InitializeAltMTLs + +#Random seed. Change to any integer you want (or leave the same) +#If seed is different between two otherwise identical runs, the initial MTLs will also be different +#seed is also saved in output directory +#seed=14126579 +seed=3593589 +#Number of realizations to generate. Ideally a multiple of 64 for bitweights +#However, you can choose smaller numbers for debugging +ndir=1 + +#Uncomment second option if you want to clobber already existing files for Alt MTL generation +overwrite='' +#overwrite='--overwrite' + +#Observing conditions for generating MTLs (should be all caps "DARK" or "BRIGHT") +obscon='DARK' +#obscon='BRIGHT' + +#Survey to generate MTLs for (should be lowercase "sv3" or "main", sv2, sv1, and cmx are untested and will likely fail) +#survey='sv3' +survey='main' +# options are default None (empty strings). Uncommenting the second options will set them to the Y1 start and end dates. +startDate='' +#endDate='' +#startDate='2021-05-13T08:15:37+00:00' +endDate='2022-06-24T00:00:00+00:00' + +#For rundate formatting in simName, either manually modify the string below +#to be the desired date or comment that line out and uncomment the +#following line to autogenerate date strings. +#To NOT use any date string specification, use the third line, an empty string +#datestring='071322' +#datestring=`date +%y%m%d` +datestring='' + +#Can save time in MTL generation by first writing files to local tmp directory and then copying over later +#uncommenting the second option will directly write to your output directory +usetmp='' +#usetmp='--dontUseTemp' + +if [ -z $usetmp ] +then + outputMTLDirBaseBase=`mktemp -d /dev/shm/"$USER"_tempdirXXXX` +else + outputMTLDirBaseBase=$ALTMTLHOME +fi +printf -v outputMTLDirBase "$outputMTLDirBaseBase/$simName/" $datestring $ndir $survey +printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $survey + +#List of healpixels to create Alt MTLs for +#hpListFile="$path2LSS/MainSurveyHPList_mock.txt" +#hpListFile="/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl4/initled/hpxlist_dark.txt" +hpListFile="/pscratch/sd/a/acarnero/test_main/altmtl10/initled/hpxlist_dark.txt" +#hpListFile="$path2LSS/DebugMainHPList.txt" +#hpListFile="$path2LSS/SV3HPList.txt" + +#These two options only are considered if the obscon is BRIGHT +#First option indicates whether to shuffle the top level priorities +#of BGS_FAINT/BGS_FAINT_HIP. Uncomment section option to turn off shuffling of bright time priorities +#Second option indicates what fraction/percent +#of BGS_FAINT to promote to BGS_FAINT_HIP. Default is 20%, same as SV3 + +#shuffleBrightPriorities='--shuffleBrightPriorities' +shuffleBrightPriorities='' + + +shuffleELGPriorities='' +#shuffleELGPriorities='--shuffleELGPriorities' + +#PromoteFracBGSFaint=0.2 +PromoteFracBGSFaint=0.0 +#PromoteFracELG=0.1 +PromoteFracELG=0.0 + +# location of original MTLs to shuffle. +# Default directory is a read only mount of the CFS filesystem +# You can only access that directory from compute nodes. +# Do NOT use the commented out directory (the normal mount of CFS) +# unless the read only mount is broken +#exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ +#exampleLedgerBase=/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ +#exampleLedgerBase=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ +#exampleLedgerBase=$SCRATCH/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ +#Options for DateLoopAltMTL and runAltMTLParallel +#exampleLedgerBase=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl4/initled/ +exampleLedgerBase=/pscratch/sd/a/acarnero/test_main/altmtl10/initled/ + +#Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). +#Default = Empty String/False. Uncomment second option if you want to restart from the first observations +#PLEASE DO NOT CHANGEME +echo "Fix QR resetting for new argparse usage" +qR='' +#qR='-qr' + +#Number of observation dates to loop through +#Defaults to 40 dates for SV3 +NObsDates=99999 + +# Whether to submit a new job with dateLoopAltMTL for each date +# or to submit a single job +# multiDate=0 +multiDate='--multiDate' +echo 'setting QVal here for debug. Fix later.' +#QVal='debug' +QVal='regular' +#QVal='interactive' +#Number of nodes to run on. This will launch up to 64*N jobs +#if that number of alternate universes have already been generated +#Calculated automatically from number of sims requested and number of processes per node. Be careful if setting manually +NNodes=$(( ($ndir + $ProcPerNode - 1 )/$ProcPerNode )) +#echo $NNodes +#getosubp: grab subpriorities from the original (exampleledgerbase) MTLs +#This should only be turned on for SV testing/debugging purposes +#This should not be required for main survey debugging. +getosubp='' +#getosubp='--getosubp' + +#shuffleSubpriorities(reproducing) must be left as empty strings to ensure +#subpriorities are shuffled. debug mode for main survey +#will only require these flags to be set by uncommenting second options + +#dontShuffleSubpriorities='' +#reproducing='' +dontShuffleSubpriorities='--dontShuffleSubpriorities' +reproducing='--reproducing' +#Include secondary targets? +secondary='' +#secondary='--secondary' + + +#If running from mocks, must set target directory. +#Otherwise this is optional +#targfile='' #CHANGEME IF RUNNING ON MOCKS +#targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory +targfile="--targfile=/pscratch/sd/a/acarnero/test_main/forFA10.fits" +## THIS IS THE GOODtargfile="--targfile=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/forFA4.fits" +#targfile='--targfile=/cscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA1.fits' +#targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' + + +#Default is use numobs from ledger. Uncomment second option to set numobs NOT from ledger +numobs_from_ledger='' +#numobs_from_ledger='--NumObsNotFromLedger' + +#Uncomment second line to force redo fiber assignment if it has already been done. +redoFA='' +#redoFA='--redoFA' + + +#Options for MakeBitweightsParallel +#True/False(1/0) as to whether to split bitweight calculation +#among nodes by MPI between realizations +#splitByReal=1 + +#Split the calculation of bitweights into splitByChunk +#chunks of healpixels. +#splitByChunk=1 + +#Set to true (1) if you want to clobber already existing bitweight files +overwrite2='' +#overwrite2='--overwrite' +#Actual running of scripts + +#Copy this script to output directory for reproducbility +thisFileName=$outputMTLFinalDestination/$0 + +echo $thisFileName + +if [ -f "$thisFileName" ] +then + echo "File is found. Checking to see it is identical to the original." + cmp $0 $thisFileName + comp=$? + if [[ $comp -eq 1 ]] + then + echo "Files are not identical." + echo "If this is intended, please delete or edit the original copied script at $thisFileName" + echo "If this is unintended, you can reuse the original copied script at that same location" + echo "goodbye" + exit 3141 + elif [[ $comp -eq 0 ]] + then + echo "files are same, continuing" + else + echo "Something has gone very wrong. Exit code for cmp was $a" + exit $a + fi +else + echo "Copied script is not found. Copying now, making directories as needed." + mkdir -p $outputMTLFinalDestination + cp $SLURM_SUBMIT_DIR $0 $outputMTLFinalDestination/$0 +fi + +if [ -d "$outputMTLFinalDestination" ] +then + echo "output final directory exists" + echo $outputMTLFinalDestination +else + echo "output final directory does not exist. Creating and copying script there" + mkdir -p $outputMTLFinalDestination + cp $0 $outputMTLFinalDestination +fi + +if [ -z $getosubp ] +then + touch $outputMTLFinalDestination/GetOSubpTrue +fi + +printf -v OFIM "%s/Initialize%sAltMTLsParallelOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $date + +echo "srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclusive $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM" +srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 02:15:00 --mem=0 --exclusive $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM +if [ $? -ne 0 ]; then + exit 1234 + endInit=`date +%s.%N` + runtimeInit=$( echo "$endInit - $start" | bc -l ) + echo "runtime for initialization" + echo $runtimeInit +fi + +endInit=`date +%s.%N` +runtimeInit=$( echo "$endInit - $start" | bc -l ) +echo "runtime for initialization" +echo $runtimeInit + +#printf -v OFDL "%s/dateLoop%sAltMTLOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring + +#runtimeInit=$( echo "$endInit - $start" | bc -l ) +#argstring="--altMTLBaseDir=$outputMTLFinalDestination --obscon=$obscon --survey=$survey --ProcPerNode=$ProcPerNode $numobs_from_ledger $redoFA $getosubp $debug $verbose $secondary $mock $targfile $multiDate $reproducing" +#echo 'argstring for dateloop' +#echo $argstring +#nohup bash $path2LSS/dateLoopAltMTLBugFix.sh $NObsDates $NNodes $path2LSS $CVal $QVal $qR $argstring >& $OFDL + +#endDL=`date +%s.%N` + +#if [ $? -ne 0 ]; then +# runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +# echo "runtime for Dateloop of $NObsDates days" +# echo $runtimeDateLoop +# exit 12345 +#fi +#runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +#echo "runtime for Dateloop of $NObsDates days" +#echo $runtimeDateLoop +#exit 54321 + + + +#printf -v OFBW "%s/MakeBitweights%sOutput%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring +#srun --nodes=1 -C $CVal -q $QVal -A desi -t 04:00:00 --mem=120000 $path2LSS/MakeBitweights.py --survey=$survey --obscon=$obscon --ndir=$ndir --ProcPerNode=$ProcPerNode --HPListFile=$hpListFile --outdir=$outputMTLFinalDestination $overwrite2 $verbose $debug >& $OFBW + +#endBW=`date +%s.%N` + + + +runtimeInit=$( echo "$endInit - $start" | bc -l ) +#runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +#runtimeBitweights=$( echo "$endBW - $endDL" | bc -l ) + +echo "runtime for initialization" +echo $runtimeInit +#echo "runtime for Dateloop of $NObsDates days" +#echo $runtimeDateLoop +#echo "runtime for making bitweights" +#echo $runtimeBitweights diff --git a/bin/Y1Bitweights128RealizationsDARK_mock.sh b/bin/Y1Bitweights128RealizationsDARK_mock.sh new file mode 100755 index 000000000..5b0068350 --- /dev/null +++ b/bin/Y1Bitweights128RealizationsDARK_mock.sh @@ -0,0 +1,341 @@ +#!/bin/bash +start=`date +%s.%N` + +#simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written +#simName=JL_DebugReprocReprod2 +simName=altmtl0_R256 +#Location where you have cloned the LSS Repo +path2LSS=/pscratch/sd/a/acarnero/codes/LSS/bin/ + +# Flags for debug/verbose mode/profiling code time usage. +# Uncomment second set of options to turn on the modes +#debug='' +#verbose='' +profile='' +debug='--debug' +verbose='--verbose' +#profile='--profile' + +if [ -z "$debug" ] +then + echo "\$debug is empty" +else + echo "\$debug is set" + pwd + InitWorkingDirectory=`pwd` + cd $path2LSS + cd .. + pwd + pip install --user . + cd $InitWorkingDirectory + pwd + echo "end of pip in script attempt" +fi + +#Uncomment second option if running on mocks +#mock='' +mock='--mock' + +#ALTMTLHOME is a home directory for all of your alternate MTLs. Default is your scratch directory +#There will be an environment variable $ALTMTLHOME for the "survey alt MTLs" +#However, you should specify your own directory to a. not overwrite the survey alt MTLs +# and b. keep your alt MTLs somewhere that you have control/access + +#Uncomment the following line to set your own/nonscratch directory +#ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ +ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/ + +if [[ "${NERSC_HOST}" == "cori" ]]; then + CVal='haswell' + QVal='interactive' + ProcPerNode=32 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$CSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi +elif [[ "${NERSC_HOST}" == "perlmutter" ]]; then + srunConfig='-C cpu -q regular' + CVal='cpu' + QVal='interactive' + ProcPerNode=128 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$PSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi + +else + echo "This code is only supported on NERSC Cori and NERSC Perlmutter. Goodbye" + exit 1234 +fi + + + + +#Options for InitializeAltMTLs + +#Random seed. Change to any integer you want (or leave the same) +#If seed is different between two otherwise identical runs, the initial MTLs will also be different +#seed is also saved in output directory +#seed=14126579 +seed=3593589 +#Number of realizations to generate. Ideally a multiple of 64 for bitweights +#However, you can choose smaller numbers for debugging +ndir=256 + +#Uncomment second option if you want to clobber already existing files for Alt MTL generation +overwrite='' +#overwrite='--overwrite' + +#Observing conditions for generating MTLs (should be all caps "DARK" or "BRIGHT") +obscon='DARK' +#obscon='BRIGHT' + +#Survey to generate MTLs for (should be lowercase "sv3" or "main", sv2, sv1, and cmx are untested and will likely fail) +#survey='sv3' +survey='main' +# options are default None (empty strings). Uncommenting the second options will set them to the Y1 start and end dates. +startDate='' +#endDate='' +#startDate='2021-05-13T08:15:37+00:00' +endDate='2022-06-24T00:00:00+00:00' + +#For rundate formatting in simName, either manually modify the string below +#to be the desired date or comment that line out and uncomment the +#following line to autogenerate date strings. +#To NOT use any date string specification, use the third line, an empty string +#datestring='071322' +#datestring=`date +%y%m%d` +datestring='' + +#Can save time in MTL generation by first writing files to local tmp directory and then copying over later +#uncommenting the second option will directly write to your output directory +usetmp='' +#usetmp='--dontUseTemp' + +if [ -z $usetmp ] +then + outputMTLDirBaseBase=`mktemp -d /dev/shm/"$USER"_tempdirXXXX` +else + outputMTLDirBaseBase=$ALTMTLHOME +fi +printf -v outputMTLDirBase "$outputMTLDirBaseBase/$simName/" $datestring $ndir $survey +printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $survey + +#List of healpixels to create Alt MTLs for +#hpListFile="$path2LSS/MainSurveyHPList_mock.txt" +#hpListFile="$path2LSS/MainSurveyHPList.txt" +#hpListFile="$path2LSS/DebugMainHPList.txt" +#hpListFile="$path2LSS/SV3HPList.txt" +hpListFile="/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl0/initled/hpxlist_dark.txt" + +#These two options only are considered if the obscon is BRIGHT +#First option indicates whether to shuffle the top level priorities +#of BGS_FAINT/BGS_FAINT_HIP. Uncomment section option to turn off shuffling of bright time priorities +#Second option indicates what fraction/percent +#of BGS_FAINT to promote to BGS_FAINT_HIP. Default is 20%, same as SV3 + +#shuffleBrightPriorities='--shuffleBrightPriorities' +shuffleBrightPriorities='' + + +shuffleELGPriorities='' +#shuffleELGPriorities='--shuffleELGPriorities' + +#PromoteFracBGSFaint=0.2 +PromoteFracBGSFaint=0.0 +#PromoteFracELG=0.1 +PromoteFracELG=0.0 + +# location of original MTLs to shuffle. +# Default directory is a read only mount of the CFS filesystem +# You can only access that directory from compute nodes. +# Do NOT use the commented out directory (the normal mount of CFS) +# unless the read only mount is broken +exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl0/initled/ +#exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ +#exampleLedgerBase=/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ +#exampleLedgerBase=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ +#exampleLedgerBase=$SCRATCH/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ +#Options for DateLoopAltMTL and runAltMTLParallel + +#Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). +#Default = Empty String/False. Uncomment second option if you want to restart from the first observations +#PLEASE DO NOT CHANGEME +echo "Fix QR resetting for new argparse usage" +qR='' +#qR='-qr' + +#Number of observation dates to loop through +#Defaults to 40 dates for SV3 +NObsDates=99999 + +# Whether to submit a new job with dateLoopAltMTL for each date +# or to submit a single job +# multiDate=0 +multiDate='--multiDate' +echo 'setting QVal here for debug. Fix later.' +#QVal='debug' +QVal='regular' +#QVal='interactive' +#Number of nodes to run on. This will launch up to 64*N jobs +#if that number of alternate universes have already been generated +#Calculated automatically from number of sims requested and number of processes per node. Be careful if setting manually +NNodes=$(( ($ndir + $ProcPerNode - 1 )/$ProcPerNode )) +#echo $NNodes +#getosubp: grab subpriorities from the original (exampleledgerbase) MTLs +#This should only be turned on for SV testing/debugging purposes +#This should not be required for main survey debugging. +getosubp='' +#getosubp='--getosubp' + +#shuffleSubpriorities(reproducing) must be left as empty strings to ensure +#subpriorities are shuffled. debug mode for main survey +#will only require these flags to be set by uncommenting second options + +dontShuffleSubpriorities='' +reproducing='' +#dontShuffleSubpriorities='--dontShuffleSubpriorities' +#reproducing='--reproducing' +#Include secondary targets? +secondary='' +#secondary='--secondary' + + +#If running from mocks, must set target directory. +#Otherwise this is optional +#targfile='' #CHANGEME IF RUNNING ON MOCKS +#targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory +#targfile='--targfile=/cscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA1.fits' +#targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' +targfile="--targfile=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/forFA0.fits" + +#Default is use numobs from ledger. Uncomment second option to set numobs NOT from ledger +numobs_from_ledger='' +#numobs_from_ledger='--NumObsNotFromLedger' + +#Uncomment second line to force redo fiber assignment if it has already been done. +redoFA='' +#redoFA='--redoFA' + + +#Options for MakeBitweightsParallel +#True/False(1/0) as to whether to split bitweight calculation +#among nodes by MPI between realizations +#splitByReal=1 + +#Split the calculation of bitweights into splitByChunk +#chunks of healpixels. +#splitByChunk=1 + +#Set to true (1) if you want to clobber already existing bitweight files +overwrite2='' +#overwrite2='--overwrite' +#Actual running of scripts + +#Copy this script to output directory for reproducbility +thisFileName=$outputMTLFinalDestination/$0 + +echo $thisFileName + +if [ -f "$thisFileName" ] +then + echo "File is found. Checking to see it is identical to the original." + cmp $0 $thisFileName + comp=$? + if [[ $comp -eq 1 ]] + then + echo "Files are not identical." + echo "If this is intended, please delete or edit the original copied script at $thisFileName" + echo "If this is unintended, you can reuse the original copied script at that same location" + echo "goodbye" + exit 3141 + elif [[ $comp -eq 0 ]] + then + echo "files are same, continuing" + else + echo "Something has gone very wrong. Exit code for cmp was $a" + exit $a + fi +else + echo "Copied script is not found. Copying now, making directories as needed." + mkdir -p $outputMTLFinalDestination + cp $SLURM_SUBMIT_DIR $0 $outputMTLFinalDestination/$0 +fi + +if [ -d "$outputMTLFinalDestination" ] +then + echo "output final directory exists" + echo $outputMTLFinalDestination +else + echo "output final directory does not exist. Creating and copying script there" + mkdir -p $outputMTLFinalDestination + cp $0 $outputMTLFinalDestination +fi + +if [ -z $getosubp ] +then + touch $outputMTLFinalDestination/GetOSubpTrue +fi + +printf -v OFIM "%s/Initialize%sAltMTLsParallelOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $date + +echo "srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclusive $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM" +#srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclusive --dependency=afterany:20412532 $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM + +#cp -r $outputMTLFinalDestination/ "$ALTMTLHOME/BACKUPInitial_$simName/" +#exit 1234 +#if [ $? -ne 0 ]; then +# exit 1234 +# endInit=`date +%s.%N` +# runtimeInit=$( echo "$endInit - $start" | bc -l ) +# echo "runtime for initialization" +# echo $runtimeInit +#fi + +#endInit=`date +%s.%N` +#runtimeInit=$( echo "$endInit - $start" | bc -l ) +#echo "runtime for initialization" +#echo $runtimeInit + +printf -v OFDL "%s/dateLoop%sAltMTLOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring + +runtimeInit=$( echo "$endInit - $start" | bc -l ) +argstring="--altMTLBaseDir=$outputMTLFinalDestination --obscon=$obscon --survey=$survey --ProcPerNode=$ProcPerNode $numobs_from_ledger $redoFA $getosubp $debug $verbose $secondary $mock $targfile $multiDate $reproducing" +echo 'argstring for dateloop' +echo $argstring +nohup bash $path2LSS/dateLoopAltMTLBugFix.sh $NObsDates $NNodes $path2LSS $CVal $QVal $qR $argstring >& $OFDL + +endDL=`date +%s.%N` + +if [ $? -ne 0 ]; then + runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) + echo "runtime for Dateloop of $NObsDates days" + echo $runtimeDateLoop + exit 12345 +fi +runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +echo "runtime for Dateloop of $NObsDates days" +echo $runtimeDateLoop +exit 54321 + + + +printf -v OFBW "%s/MakeBitweights%sOutput%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring +srun --nodes=1 -C $CVal -q $QVal -A desi -t 04:00:00 --mem=120000 $path2LSS/MakeBitweights.py --survey=$survey --obscon=$obscon --ndir=$ndir --ProcPerNode=$ProcPerNode --HPListFile=$hpListFile --outdir=$outputMTLFinalDestination $overwrite2 $verbose $debug >& $OFBW + +endBW=`date +%s.%N` + + + +runtimeInit=$( echo "$endInit - $start" | bc -l ) +runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +runtimeBitweights=$( echo "$endBW - $endDL" | bc -l ) + +echo "runtime for initialization" +echo $runtimeInit +echo "runtime for Dateloop of $NObsDates days" +echo $runtimeDateLoop +echo "runtime for making bitweights" +echo $runtimeBitweights diff --git a/bin/dateLoopAltMTLBugFix_mock_batch.sh b/bin/dateLoopAltMTLBugFix_mock_batch.sh new file mode 100755 index 000000000..05b7b6dc9 --- /dev/null +++ b/bin/dateLoopAltMTLBugFix_mock_batch.sh @@ -0,0 +1,67 @@ +#!/bin/bash + + +### THIS IS WHAT COMES FROM dataLoopAltMTLBugFix_mock ------------------------------- +#argstring="--altMTLBaseDir=$outputMTLFinalDestination --obscon=$obscon --survey=$survey --ProcPerNode=$ProcPerNode $numobs_from_ledger $redoFA $getosubp $debug $verbose $secondary $mock $targfile $multiDate $reproducing --mockmin=$mockinit --mockmax=$mockend --initpath=$initpath" +#echo 'argstring for dateloop' +#echo $argstring +#nohup bash $path2LSS/dateLoopAltMTLBugFix_mock.sh $NObsDates $NNodes $path2LSS $CVal $QVal $qR $argstring >& $OFDL +#-------------------- + +echo "All Arguments" +echo $@ + +NObsDates=$1 + +NNodes=$2 + +path2LSS=$3 + +CVal=$4 + +QVal=$5 + +argstring=${@:6} + +echo 'argstring' +echo "$argstring" + + +#for i in $(seq 0 1 $NObsDates) +#do +# echo " NextDate" +# echo "" +# echo "" +# echo "" +# echo $i +# echo "" +# echo "" +# echo "" +if [ $QVal = 'interactive' ]; +then + + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 03:00:00 $path2LSS/runAltMTLRealizations.py $argstring + #srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 03:00:00 --dependency=afterany:17881308 $path2LSS/runAltMTLParallel.py $argstring +fi +if [ $QVal = 'regular' ]; +then + echo "srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:19598458 $path2LSS/runAltMTLRealizations.py $argstring" + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:20272831 $path2LSS/runAltMTLRealizations.py $argstring + #srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:17881308 $path2LSS/runAltMTLParallel.py $argstring +fi + +if [ $QVal = 'debug' ]; +then + + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 00:15:00 $path2LSS/runAltMTLRealizations.py $argstring + #srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 00:15:00 --dependency=afterany:17881308 $path2LSS/runAltMTLParallel.py $argstring +fi +#retcode=$? +#qR=0 #DO NOT CHANGE. This prevents further restarts after the first if qR is set to 1 at top. +#if [ $retcode -ne 0 ]; then +# echo 'something went wrong' +# echo $retcode +# exit 1234 +#fi + +#done diff --git a/bin/runAltMTLParallel.py b/bin/runAltMTLParallel.py index 5af9e20fc..f630d4bbd 100755 --- a/bin/runAltMTLParallel.py +++ b/bin/runAltMTLParallel.py @@ -4,10 +4,11 @@ from multiprocessing import Pool from LSS.SV3 import altmtltools as amt +from LSS.main import mockaltmtltools as mockamt from astropy.table import Table, vstack, join #import altmtltools as amt from desiutil.log import get_logger -import dill +#import dill from sys import argv import os import numpy as np @@ -117,7 +118,10 @@ def procFunc(nproc): print(targets['DEC'][0:5]) else: targets = None - retval = amt.loop_alt_ledger(args.obscon, survey = args.survey, mtldir = args.mtldir, zcatdir = args.zcatdir, altmtlbasedir = args.altMTLBaseDir, ndirs = ndirs, numobs_from_ledger = args.numobs_from_ledger,secondary = args.secondary, getosubp = args.getosubp, quickRestart = args.quickRestart, multiproc = multiproc, nproc = nproc, singleDate = singleDate, redoFA = args.redoFA, mock = args.mock, targets = targets, debug = args.debug, verbose = args.verbose, reproducing = args.reproducing) + if args.mock: + retval = mockamt.loop_alt_ledger(args.obscon, survey = args.survey, mtldir = args.mtldir, zcatdir = args.zcatdir, altmtlbasedir = args.altMTLBaseDir, ndirs = ndirs, numobs_from_ledger = args.numobs_from_ledger,secondary = args.secondary, getosubp = args.getosubp, quickRestart = args.quickRestart, multiproc = multiproc, nproc = nproc, singleDate = singleDate, redoFA = args.redoFA, mock = args.mock, targets = targets, debug = args.debug, verbose = args.verbose, reproducing = args.reproducing) + else: + retval = amt.loop_alt_ledger(args.obscon, survey = args.survey, mtldir = args.mtldir, zcatdir = args.zcatdir, altmtlbasedir = args.altMTLBaseDir, ndirs = ndirs, numobs_from_ledger = args.numobs_from_ledger,secondary = args.secondary, getosubp = args.getosubp, quickRestart = args.quickRestart, multiproc = multiproc, nproc = nproc, singleDate = singleDate, redoFA = args.redoFA, mock = args.mock, targets = targets, debug = args.debug, verbose = args.verbose, reproducing = args.reproducing) if args.verbose: log.debug('finished with one iteration of procFunc') if type(retval) == int: diff --git a/bin/runAltMTLRealizations.py b/bin/runAltMTLRealizations.py new file mode 100755 index 000000000..fe03c4340 --- /dev/null +++ b/bin/runAltMTLRealizations.py @@ -0,0 +1,175 @@ +#!/global/common/software/desi/perlmutter/desiconda/20230111-2.1.0/conda/bin/python -u +from desiutil.iers import freeze_iers +freeze_iers() + +from multiprocessing import Pool +from LSS.main import mockaltmtltools as amt +from astropy.table import Table, vstack, join +#import altmtltools as amt +from desiutil.log import get_logger +from sys import argv +import os +import numpy as np +import multiprocessing as mp +import logging +import atexit +import glob +import cProfile, pstats, io +from pstats import SortKey +import argparse + +#Base directory for the alternate MTLs created in the InitializeAltMTLs script + + +parser = argparse.ArgumentParser( + prog = 'RunAltMTLParallel', + description = 'Progresses alternate MTLs through the MTL update loop in parallel. More documentation available on the DESI wiki. ') +parser.add_argument('-a', '--altMTLBaseDir', dest='altMTLBaseDir', required=True, type=str, help = 'the path to the location where alt MTLs are stored, up to, but not including survey and obscon information.') + +parser.add_argument('-obscon', '--obscon', dest='obscon', default='DARK', help = 'observation conditions, either BRIGHT or DARK.', required = False, type = str) +parser.add_argument('-mockmin', '--mockmin', dest='mockmin', default=0, help = 'Minimum mock number', required = False, type = int) +parser.add_argument('-mockmax', '--mockmax', dest='mockmax', default=6, help = 'Maximum mock number', required = False, type = int) +parser.add_argument('-s', '--survey', dest='survey', default='sv3', help = 'DESI survey to create Alt MTLs for. Either sv3 or main.', required = False, type = str) +parser.add_argument('-sec', '--secondary', dest = 'secondary', default=False, action='store_true', help = 'set flag to incorporate secondary targets.') +parser.add_argument('-mock', '--mock', dest = 'mock', default=True, action='store_true', help = 'set flag if running pipeline on mocks.') +parser.add_argument('-tf', '--targfile', dest='targfile', required=False, default = None, type=str, help = 'Location for target file for mocks or data. Only required if mocks are being processed.') +parser.add_argument('-v', '--verbose', dest = 'verbose', default=False, action='store_true', help = 'set flag to enter verbose mode') +parser.add_argument('-qr', '--quickRestart', dest = 'quickRestart', default=False, action='store_true', help = 'set flag to remove any AMTL updates that have already been performed. Useful for rapidfire debugging of steps in this part of the pipeline.') +parser.add_argument('-rep', '--reproducing', action='store_true', dest='reproducing', default=False, help = 'WARNING: THIS FLAG SHOULD ONLY BE USED FOR DEBUGGING. Pass this flag to confirm to the alt mtl code that you are trying to reproduce real MTLs. This option should (must?) be used in conjunction with --shuffleSubpriorities.', required = False) +parser.add_argument('-prof', '--profile', dest = 'profile', default=False, action='store_true', help = 'set flag to profile code time usage. This flag may not profile all components of any particular stage of the AMTL pipeline. ') +parser.add_argument('-d', '--debug', dest = 'debug', default=False, action='store_true', help = 'set flag to enter debug mode.') +parser.add_argument('-nfl', '--NumObsNotFromLedger', dest = 'numobs_from_ledger', default=True, action='store_false', help = 'If True (flag is NOT set) then inherit the number of observations so far from the ledger rather than expecting it to have a reasonable value in the zcat.') + +parser.add_argument('-redoFA', '--redoFA', dest = 'redoFA', default=False, action='store_true', help = 'pass this flag to regenerate already existing fiber assignment files.') + +parser.add_argument('-getosubp', '--getosubp', action='store_true', dest='getosubp', default=False, help = 'WARNING: THIS FLAG SHOULD ONLY BE USED FOR DEBUGGING AND NEVER FOR MOCKS. Pass this flag to grab subpriorities directly from the real survey MTLs for fiberassignment.', required = False) +parser.add_argument('-md', '--multiDate', action='store_true', dest='multiDate', default=False, help = 'Currently this flag is being debugged. In the future, it will switch between interactive submission of each date as a separate job (True) and of all nights to be looped through until a single job`s time runs out. ', required = False) +parser.add_argument('-ppn', '--ProcPerNode', dest='ProcPerNode', default=None, help = 'Number of processes to spawn per requested node. If not specified, determined automatically from NERSC_HOST.', required = False, type = int) +parser.add_argument('-rmbd', '--realMTLBaseDir', dest='mtldir', default='/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/', help = 'Location of the real (or mock) MTLs that serve as the basis for the alternate MTLs. Defaults to location of data MTLs. Do NOT include survey or obscon information here. ', required = False, type = str) +parser.add_argument('-zcd', '--zCatDir', dest='zcatdir', default='/global/cfs/cdirs/desi/spectro/redux/daily/', help = 'Location of the real redshift catalogs for use in alt MTL loop. Defaults to location of survey zcatalogs.', required = False, type = str) + +print(argv) + +args = parser.parse_args() + +if args.profile: + pr = cProfile.Profile() + pr.enable() + + + +log = get_logger() + +if args.mock: + assert(not (args.targfile is None)) + print('args.getosubp') + print(args.getosubp) + assert(not (args.getosubp)) + +# Leave confirmation file in output directory if using original subpriorities +if args.getosubp: + from pathlib import Path + Path(args.altMTLBaseDir + '/GETOSUBPTRUE').touch() + +#Get information about environment for multiprocessing +NodeID = int(os.getenv('SLURM_NODEID')) +SlurmNProcs = int(os.getenv('SLURM_NPROCS')) +try: + NNodes = int(os.getenv('SLURM_JOB_NUM_NODES')) +except: + log.warning('no SLURM_JOB_NUM_NODES env set. You may not be on a compute node.') + NNodes = 1 + +if args.ProcPerNode is None: + if 'cori' in os.getenv['NERSC_HOST'].lower(): + args.ProcPerNode = 32 + elif 'perlmutter' in os.getenv['NERSC_HOST'].lower(): + args.ProcPerNode = 128 + else: + raise ValueError('Code is only supported on NERSC Cori and NERSC perlmutter.') + +NProc = int(NNodes*args.ProcPerNode) +log.info('NProc = {0:d}'.format(NProc)) +log.info('NNodes = {0:d}'.format(NNodes)) + + + + +# These should be constant/default. +# If this changes, add these to argparse + +ndirs = None +multiproc = True +singleDate = not(args.multiDate) + + +def procFunc(nproc): + if args.verbose: + log.debug('calling procFunc') + if not(args.targfile is None): + targets = Table.read(args.targfile.format(mock_number=nproc)) + print('targets.dtype') + print(targets.dtype) + print('targets[0:5]') + print(targets[0:5]) + print('targets TARGETID,RA,DEC') + print(targets['TARGETID'][0:5]) + print(targets['RA'][0:5]) + print(targets['DEC'][0:5]) + else: + targets = None + retval = amt.loop_alt_ledger(args.obscon, survey = args.survey, mtldir = args.mtldir, zcatdir = args.zcatdir, altmtlbasedir = args.altMTLBaseDir.format(mock_number=nproc), ndirs = ndirs, numobs_from_ledger = args.numobs_from_ledger,secondary = args.secondary, getosubp = args.getosubp, quickRestart = args.quickRestart, multiproc = multiproc, nproc = nproc, singleDate = singleDate, redoFA = args.redoFA, mock = args.mock, targets = targets, debug = args.debug, verbose = args.verbose, reproducing = args.reproducing) + if args.verbose: + log.debug('finished with one iteration of procFunc') + if type(retval) == int: + if args.verbose: + log.debug('retval') + log.debug(retval) + if retval == 151: + log.info('No more data. Ending script.') + return 151 + return retval + elif args.verbose: + print('retval') + print(retval) + + return 42 + +inds = [] +#start = int(NodeID*NProc/SlurmNProcs) +#end = int((NodeID + 1)*NProc/SlurmNProcs) +log.info('NodeID = {0:d}'.format(NodeID)) +log.info('StartProc = {0:d}'.format(args.mockmin)) +log.info('EndProc = {0:d}'.format(args.mockmax)) + + +for i in range(args.mockmin, args.mockmax): + log.info('Process i = {0}'.format(i)) + files = glob.glob(args.altMTLBaseDir.format(mock_number=i)) + #files = glob.glob(args.altMTLBaseDir + "Univ{0:03d}/*".format(i)) + if len(files): + pass + else: + log.info('no files in dir number {0}, not processing that directory.'.format(i)) + continue + inds.append(i) + +assert(len(inds)) +p = Pool(NProc) +atexit.register(p.close) +result = p.map(procFunc,inds) + + +if args.profile: + pr.disable() + s = io.StringIO() + sortby = SortKey.CUMULATIVE + ps = pstats.Stats(pr, stream=s).sort_stats(sortby) + ps.print_stats() + for i in range(1000): + outFN = args.altMTLBaseDir + '/runAltMTLParallel_{0:d}.prof'.format(int(i)) + if os.isFile(outFN): + continue + else: + ps.dump_stats(outFN) + print(s.getvalue()) diff --git a/py/LSS/SV3/altmtltools.py b/py/LSS/SV3/altmtltools.py index 3b38e7b4e..a0d35ef47 100644 --- a/py/LSS/SV3/altmtltools.py +++ b/py/LSS/SV3/altmtltools.py @@ -1248,7 +1248,7 @@ def update_alt_ledger(altmtldir,althpdirname, altmtltilefn, actions, survey = ' raise ValueError('If processing mocks, you MUST specify a target file') log.info('update loc a') update_ledger(althpdirname, altZCat, obscon=obscon.upper(), - numobs_from_ledger=numobs_from_ledger, targets = targets) + numobs_from_ledger=numobs_from_ledger)#, targets = targets) didUpdateHappen = True elif targets is None: log.info('update loc b') @@ -2199,4 +2199,4 @@ def write_amtl_tile_tracker(dirname, tiles, obscon = 'dark', survey = 'main'): # #if np.sum(todaysTiles['ALTARCHIVEDATE'] == None) == 0: TileTracker.write(TileTrackerFN, format = 'ascii.ecsv', overwrite = True) - return 'done' \ No newline at end of file + return 'done' diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index e44467da6..f2fbc7e8f 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -3380,7 +3380,7 @@ def add_zfail_weight2full(indir,tp='',tsnrcut=80,readpars=False,hpmapcut='_HPmap -def mkclusdat(fl,weighttileloc=True,zmask=False,tp='',dchi2=9,tsnrcut=80,rcut=None,ntilecut=0,ccut=None,ebits=None,zmin=0,zmax=6,write_cat='y',splitNS='n',return_cat='n',compmd='ran',kemd='',wsyscol=None,use_map_veto='',subfrac=1,zsplit=None): +def mkclusdat(fl,weighttileloc=True,zmask=False,tp='',dchi2=9,tsnrcut=80,rcut=None,ntilecut=0,ccut=None,ebits=None,zmin=0,zmax=6,write_cat='y',splitNS='n',return_cat='n',compmd='ran',kemd='',wsyscol=None,use_map_veto='',subfrac=1,zsplit=None, ismock=False): import LSS.common_tools as common from LSS import ssr_tools ''' @@ -3611,23 +3611,27 @@ def mkclusdat(fl,weighttileloc=True,zmask=False,tp='',dchi2=9,tsnrcut=80,rcut=No #ff['flux_r_dered'] = ff['FLUX_R']/ff['MW_TRANSMISSION_R'] #kl.append('flux_r_dered') #print(kl) - fcols = ['G','R','Z','W1','W2'] - ff = common.add_dered_flux(ff,fcols) - for col in fcols: - kl.append('flux_'+col.lower()+'_dered') - print(kl) - if kemd == 'phot': - restcols = ['REST_GMR_0P1','REST_GMR_0P0','ABSMAG_RP0','ABSMAG_RP1'] - for col in restcols: - kl.append(col) + if not ismock: + fcols = ['G','R','Z','W1','W2'] + ff = common.add_dered_flux(ff,fcols) + for col in fcols: + kl.append('flux_'+col.lower()+'_dered') + print(kl) + if kemd == 'phot': + restcols = ['REST_GMR_0P1','REST_GMR_0P0','ABSMAG_RP0','ABSMAG_RP1'] + for col in restcols: + kl.append(col) if ccut == '-21.5': - from LSS.tabulated_cosmo import TabulatedDESI - cosmo = TabulatedDESI() - dis_dc = cosmo.comoving_radial_distance - dm = 5.*np.log10(dis_dc(ff['Z'])*(1.+ff['Z'])) + 25. - r_dered = 22.5 - 2.5*np.log10(ff['flux_r_dered']) - abr = r_dered -dm + if ismock: + abr = ff['R_MAG_ABS'] + else: + from LSS.tabulated_cosmo import TabulatedDESI + cosmo = TabulatedDESI() + dis_dc = cosmo.comoving_radial_distance + dm = 5.*np.log10(dis_dc(ff['Z'])*(1.+ff['Z'])) + 25. + r_dered = 22.5 - 2.5*np.log10(ff['flux_r_dered']) + abr = r_dered -dm sel = abr < float(ccut) print('comparison before/after abs mag cut') print(len(ff),len(ff[sel])) diff --git a/py/LSS/main/mockaltmtltools.py b/py/LSS/main/mockaltmtltools.py new file mode 100644 index 000000000..97db7dfab --- /dev/null +++ b/py/LSS/main/mockaltmtltools.py @@ -0,0 +1,2223 @@ +from desiutil.iers import freeze_iers +freeze_iers() + +import collections.abc +from time import time +import astropy +import astropy.io +import astropy.io.fits as pf +from astropy.table import Table,join + +import memory_profiler +from memory_profiler import profile + + +##TEMP +MODULE_PATH = '/global/homes/a/acarnero/.local/lib/python3.10/site-packages/desitarget/__init__.py' +MODULE_NAME = 'desitarget' +import importlib +import sys +spec = importlib.util.spec_from_file_location(MODULE_NAME, MODULE_PATH) +module = importlib.util.module_from_spec(spec) +sys.modules[spec.name] = module +spec.loader.exec_module(module) +## + +import desitarget +#from desitarget import io, mtl +from desitarget.cuts import random_fraction_of_trues +from desitarget.mtl import get_mtl_dir, get_mtl_tile_file_name,get_mtl_ledger_format +from desitarget.mtl import get_zcat_dir, get_ztile_file_name, tiles_to_be_processed +from desitarget.mtl import make_zcat,survey_data_model,update_ledger, get_utc_date + +from desitarget.targets import initial_priority_numobs, decode_targetid +from desitarget.targetmask import obsconditions, obsmask +from desitarget.targetmask import desi_mask, bgs_mask, mws_mask, zwarn_mask + +from desiutil.log import get_logger + +import fitsio + +import healpy as hp + + +from LSS.bitweights import pack_bitweights +from LSS.SV3.fatools import get_fba_fromnewmtl +import LSS.SV3.fatools as fatools + +import matplotlib.pyplot as plt + +import numpy as np +from numpy import random as rand +import numpy.lib.recfunctions as rfn + +import os +import pickle +import subprocess +import sys +from time import sleep + +import cProfile, pstats +import io as ProfileIO +from pstats import SortKey + +import glob + + +pr = cProfile.Profile() + +log = get_logger() + +os.environ['DESIMODEL'] = '/global/common/software/desi/cori/desiconda/current/code/desimodel/master' + +mtlformatdict = {"PARALLAX": '%16.8f', 'PMRA': '%16.8f', 'PMDEC': '%16.8f'} + + +zcatdatamodel = np.array([], dtype=[ + ('RA', '>f8'), ('DEC', '>f8'), ('TARGETID', '>i8'), + ('NUMOBS', '>i4'), ('Z', '>f8'), ('ZWARN', '>i8'), ('ZTILEID', '>i4') + ]) + +#mtltilefiledm = np.array([], dtype=[ +# ('TILEID', '>i4'), ('TIMESTAMP', 'U25'), +# ('VERSION', 'U14'), ('PROGRAM', 'U6'), ('ZDATE', 'U8') +# ]) + +mtltilefiledm = np.array([], dtype = [ + ('TILEID', '>i4'), ('TIMESTAMP', 'i8'), ('ARCHIVEDATE', '>i8')]) + +def datesInMonthForYear(yyyy): + # if divisible by 4 + if (yyyy % 4) == 0: + # if not divisible by 100, leap year + if not ((yyyy % 100) == 0): + monthLengths = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] + # if divisible by 100 and 400, leap year + elif ((yyyy % 400) == 0): + monthLengths = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] + # if divisble by 100 and not 400, no leap year + else: + monthLengths = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] + + else: + monthLengths = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] + return monthLengths + +def nextDate(date): + # JL takes NITE in YYYYMMDD form and increments to the next date + yyyy, mm, dd = int(str(date)[0:4]), int(str(date)[4:6]), int(str(date)[6:]) + log.info('date = {0}'.format(date)) + monthLengths = datesInMonthForYear(yyyy) + log.info('monthLengths array is {0}'.format(monthLengths)) + log.info('yyyy, mm, dd = {0}, {1}, {2}'.format(yyyy, mm, dd)) + if dd == monthLengths[mm - 1]: + if mm == 12: + mm = '01' + yyyy = str(yyyy+1) + else: + mm = str(mm+1).zfill(2) + + dd = '01' + else: + dd = str(dd + 1).zfill(2) + log.info('yyyy, mm, dd = {0}, {1}, {2}'.format(yyyy, mm, dd)) + return ''.join([str(yyyy), str(mm).zfill(2), str(dd).zfill(2)]) + +def evaluateMask(bits, mask, evalMultipleBits = False): + if evalMultipleBits: + return (bits & mask) == mask + return (bits & mask) > 0 + + + +def flipBit(cat, bit2Flip, cond = None, fieldName = 'DESI_TARGET', mode = 'on'): + #only works on single bits + assert( np.abs( np.log2(bit2Flip) - int(np.log2(bit2Flip)) ) < 0.001 ) + + if cond is None: + if mode.lower() == 'on': + cond = np.invert(evaluateMask(cat[fieldName], bit2Flip)) + elif mode.lower() == 'off': + cond = evaluateMask(cat[fieldName], bit2Flip) + #elif mode.lower() == 'both': + # cond = np.ones(cat.shape[0], dtype = bool) + else: + #raise ValueError('`mode` must be `on` `off` or `both`') + raise ValueError('`mode` must be `on` or `off`') + + assert( len(cond) == len(cat)) + + if np.sum(cond) == 0: + log.warning('This call to flipBit does not flip any bits.') + return cat + + + if mode == 'on': + cat[fieldName][cond] = cat[fieldName][cond] | bit2Flip + elif mode == 'off': + cond = cond & ((cat[fieldName] & bit2Flip) == bit2Flip) + cat[fieldName][cond] = cat[fieldName][cond] ^ bit2Flip + #elif mode == 'both': + # cat[fieldName][cond] = cat[fieldName][cond] ^ bit2Flip + else: + #raise ValueError('`mode` must be `on` `off` or `both`') + raise ValueError('`mode` must be `on` or `off`') + + return cat + +def processTileFile(infile, outfile, startDate, endDate): + #ztilefile, outputMTLDir + ztilefn, startDate, endDate + if (startDate is None) and (endDate is None): + #os.symlink(infile, outfile) + from shutil import copyfile + copyfile(infile, outfile) + return 0 + + + if (startDate is None) or (startDate == ''): + startDate = 0 + else: + startDate = int(startDate.split('T')[0].replace('-', '')) + if (endDate is None) or (endDate == ''): + endDate = 9999999999 + else: + endDate = int(endDate.split('T')[0].replace('-', '')) + + origtf = Table.read(infile) + + origtf = origtf[origtf['LASTNIGHT'].astype(int) >= startDate ] + origtf = origtf[origtf['LASTNIGHT'].astype(int) <= endDate ] + + + origtf.write(outfile, overwrite = True, format = 'ascii.ecsv') + return 0 +def uniqueTimestampFATimePairs(tileList, withFlag = False): + output = [] + for t in tileList: + if withFlag: + datepair = (t['ORIGMTLTIMESTAMP'], t['FAMTLTIME'], t['REPROCFLAG']) + else: + datepair = (t['ORIGMTLTIMESTAMP'], t['FAMTLTIME']) + + if datepair in output: + continue + else: + output.append(datepair) + + return output +def uniqueArchiveDateZDatePairs(tileList, withFlag = False): + output = [] + for t in tileList: + if withFlag: + datepair = (t['ZDATE'], t['ARCHIVEDATE'], t['REPROCFLAG']) + else: + datepair = (t['ZDATE'], t['ARCHIVEDATE']) + + if datepair in output: + continue + else: + output.append(datepair) + + return output + +def findTwin(altFiber, origFiberList, survey = 'sv3', obscon = 'dark'): + log.critical('this function isn\'t ready yet. Goodbye') + raise NotImplementedError('Fiber Twin method not implemented yet.') + if survey == 'sv3': + if obscon == 'dark': + altTargBits = altFiber['SV3_DESI_TARGET'] + altTargBitsSec = altFiber['SV3_BGS_TARGET'] + altTargBitsMWS = altFiber['SV3_MWS_TARGET'] + + origTargBitList = origFiberList['SV3_DESI_TARGET'] + origTargBitListSec = origFiberList['SV3_BGS_TARGET'] + origTargBitListMWS = origFiberList['SV3_MWS_TARGET'] + + elif obscon == 'bright': + altTargBits = altFiber['SV3_BGS_TARGET'] + altTargBitsSec = altFiber['SV3_DESI_TARGET'] + altTargBitsMWS = altFiber['SV3_MWS_TARGET'] + + origTargBitList = origFiberList['SV3_BGS_TARGET'] + origTargBitListSec = origFiberList['SV3_DESI_TARGET'] + origTargBitListMWS = origFiberList['SV3_MWS_TARGET'] + else: + raise ValueError('Invalid value for \'obscon\': {0}'.format(obscon)) + elif survey == 'main': + if obscon == 'dark': + altTargBits = altFiber['DESI_TARGET'] + altTargBitsSec = altFiber['BGS_TARGET'] + altTargBitsMWS = altFiber['MWS_TARGET'] + + origTargBitList = origFiberList['DESI_TARGET'] + origTargBitListSec = origFiberList['BGS_TARGET'] + origTargBitListMWS = origFiberList['MWS_TARGET'] + + elif obscon == 'bright': + altTargBits = altFiber['BGS_TARGET'] + altTargBitsSec = altFiber['DESI_TARGET'] + altTargBitsMWS = altFiber['MWS_TARGET'] + origTargBitList = origFiberList['BGS_TARGET'] + origTargBitListSec = origFiberList['DESI_TARGET'] + origTargBitListMWS = origFiberList['MWS_TARGET'] + + else: + raise ValueError('Invalid value for \'obscon\': {0}'.format(obscon)) + else: + raise ValueError('Invalid value for \'survey\': {0}'.format(survey)) + + altFS = altFiber['FIBERSTATUS'] + origFS = origFiberList['FIBERSTATUS'] + + + ''' + BGSBits = initialentries['SV3_BGS_TARGET'] + BGSFaintHIP = ((BGSBits & 8) == 8) + BGSFaintAll = ((BGSBits & 1) == 1) | BGSFaintHIP + + #Set all BGS_FAINT_HIP to BGS_FAINT + + initialentries['SV3_BGS_TARGET'][BGSFaintHIP] = (BGSBits[BGSFaintHIP] & ~8) + initialentries['PRIORITY'][BGSFaintHIP] = 102000*np.ones(np.sum(BGSFaintHIP)) + + NewBGSBits = initialentries['SV3_BGS_TARGET'] + NewBGSFaintHIP = ((BGSBits & 8) == 8) + NewBGSFaintAll = ((BGSBits & 1) == 1) | NewBGSFaintHIP + NewBGSPriors = initialentries['PRIORITY'] + #Select 20% of BGS_FAINT to promote using function from + BGSFaintNewHIP = random_fraction_of_trues(PromoteFracBGSFaint, BGSFaintAll) + #Promote them + + initialentries['SV3_BGS_TARGET'][BGSFaintNewHIP] = (BGSBits[BGSFaintNewHIP] | 8) + initialentries['PRIORITY'][BGSFaintNewHIP] = 102100*np.ones(np.sum(BGSFaintNewHIP)).astype(int) + ''' + + +def createFAmap(FAReal, FAAlt, TargAlt = None, changeFiberOpt = None, debug = False, + verbose = False, mock = False, mockTrueZKey = None): + # Options for 'changeFiberOpt': + # None: do nothing different to version 1 + # AllTwins: Find a twin fiber with a target of the + # same type and similar Fiber assignment for all + # unsimilar target types + # SomeTwins: Find a twin as above but only for + # assignments where the original fiber was unassigned + + TIDReal = FAReal['TARGETID'] + TIDAlt = FAAlt['TARGETID'] + FibReal = FAReal['FIBER'] + FibAlt = FAAlt['FIBER'] + + if not (changeFiberOpt is None): + raise NotImplementedError('changeFiberOpt is not implemented yet.') + assert(not(TargAlt is None)) + jTargs = join(FAAlt, TargAlt, keys = "TARGETID") + + Real2Alt = {} + Alt2Real = {} + if debug: + inc1 = 0 + inc2 = 0 + negMisMatch = [] + for tr, fr in zip(TIDReal, FibReal): + taMatch = TIDAlt[FibAlt == fr] + assert(len(taMatch) == 1) + if debug: + try: + assert(tr == taMatch[0]) + except: + inc1+=1 + Real2Alt[tr] = taMatch[0] + + for ta, fa in zip(TIDAlt, FibAlt): + trMatch = TIDReal[FibReal == fa] + try: + assert(len(trMatch) == 1) + except: + if ta < 0: + negMisMatch.append(ta) + continue + else: + log.info(ta) + + assert(0) + if debug or verbose: + try: + assert(ta == trMatch[0]) + except: + inc2+=1 + if (changeFiberOpt is None) or (changeFiberOpt == 'SomeTwins') or (ta == trMatch[0]): + Alt2Real[ta] = trMatch[0] + elif changeFiberOpt == 'AllTwins': + #if jTargs['SV3_'] + assert(0) + pass + + + if debug or verbose: + log.info('no matches for negative tas {0}'.format(negMisMatch)) + log.info(inc1) + log.info(inc2) + return Alt2Real, Real2Alt + + + +def makeAlternateZCat(zcat, real2AltMap, alt2RealMap, debug = False, verbose = False): + from collections import Counter + zcatids = zcat['TARGETID'] + altZCat = Table(zcat) + if debug: + failures = 0 + negativeIDs = 0 + for n, i in zip(zcatids, range(len(zcatids))): + cond = (n == zcatids) + if debug and (n < 0): + negativeIDs +=1 + altid = real2AltMap[n] + + altZCat['TARGETID'][i] = altid + if debug: + log.info('negIDs') + log.info(negativeIDs) + log.info('failures') + log.info(failures) + log.info('testctr') + d = Counter(altZCat['TARGETID']) + res = [ k for k, v in d.items() if v > 1] + if debug: + log.info('res') + log.info(res) + if len(res): + log.info('how many pre dup cuts') + log.info(zcatids.shape) + cond2 = np.ones(zcatids.shape, dtype=bool) + for i in res: + log.info('test') + log.info(np.sum(zcatids == i)) + cond2 = cond2 & (altcatids != i) + log.info("how many post dup cuts") + log.info(np.sum(cond2)) + else: + log.info("supposedly, no duplicates") + return altZCat + +def checkMTLChanged(MTLFile1, MTLFile2): + MTL1 = desitarget.io.read_mtl_ledger(MTLFile1, unique = True) + MTL2 = desitarget.io.read_mtl_ledger(MTLFile2, unique = True) + NDiff = 0 + NDiff2 = 0 + NDiff3 = 0 + for tar1 in MTL1: + tar2 = MTL2[MTL2['TARGETID'] == tar1['TARGETID']] + + if tar1['NUMOBS'] != tar2['NUMOBS']: + NDiff +=1 + + if tar1['TIMESTAMP'] != tar2['TIMESTAMP']: + NDiff2 +=1 + + if tar1['SUBPRIORITY'] != tar2['SUBPRIORITY']: + NDiff3 +=1 + + print('Number targets with different NUMOBS') + print(NDiff) + print('Number targets with different TIMESTAMP') + print(NDiff2) + print('Number targets with different SUBPRIORITY') + print(NDiff3) + +def makeTileTrackerFN(dirName, survey, obscon): + return dirName + '/{0}survey-{1}obscon-TileTracker.ecsv'.format(survey, obscon.upper()) +def makeTileTracker(altmtldir, survey = 'main', obscon = 'DARK', startDate = None, + endDate = None, overwrite = True): + """Create action file which orders all actions to do with AMTL in order + in which real survey did them. + + Parameters + ---------- + altmtldir : :class:`str` + Path to the directory for a single realization of alternate MTL + ledgers. e.g. /pscratch/u/user/simName/Univ000/ + obscon : :class:`str`, optional, defaults to "dark" + A string matching ONE obscondition in the desitarget bitmask yaml + file (i.e. in `desitarget.targetmask.obsconditions`), e.g. "DARK" + Governs how priorities are set when merging targets. + survey : :class:`str`, optional, defaults to "main" + Used to look up the correct ledger, in combination with `obscon`. + Options are ``'main'`` and ``'svX``' (where X is 1, 2, 3 etc.) + for the main survey and different iterations of SV, respectively. + + + Returns + ------- + + [Nothing] + + Notes + ----- + - Writes a tiletracker file to {altmtldir}/{survey.lower()}survey-{obscon.upper()}obscon-TileTracker.ecsv + """ + + TileTrackerFN = makeTileTrackerFN(altmtldir, survey, obscon) + + if (survey.lower() == 'main') or (survey.lower() == 'y1'): + + surveyForTSS = 'main' + if survey.lower() == 'y1': + TileFN = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/tiles-{0}.fits'.format(obscon.upper()) + else: + TileFN = '/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/ops/tiles-main.ecsv' + elif survey.lower() == 'sv3': + surveyForTSS = 'sv3' + TileFN = '/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/tiles-{0}.fits'.format(obscon.upper()) + else: + raise ValueError('only valid values for `survey` are `main` and `sv3.` {0} was provided'.format(survey)) + + FABaseDir = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/' + + Tiles = Table.read(TileFN) + + TSSFN = '/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/ops/tiles-specstatus.ecsv' + + TSS = Table.read(TSSFN) + + MTLDTFN = '/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/mtl-done-tiles.ecsv' + + MTLDT = Table.read(MTLDTFN) + + #tiles-specstatus file filtered to only matching obscon and surveySURVEY FAPRGRM + TSS_Sel = TSS[(TSS['SURVEY'] == surveyForTSS) & (TSS['FAPRGRM'] == obscon.lower())] + + TilesSel = np.unique(TSS_Sel['TILEID']) + + TileIDs = [] + TypeOfActions = [] + TimesOfActions = [] + doneFlag = [] + archiveDates = [] + + for tileid in TilesSel: + print('tileid = {0}'.format(tileid)) + + ts = str(tileid).zfill(6) + + thisTileMTLDT = MTLDT[MTLDT['TILEID'] == tileid] + + if len(thisTileMTLDT) > 1: + thisTileMTLDT.sort('TIMESTAMP') + elif len(thisTileMTLDT) == 0: + continue + else: + log.info(len(thisTileMTLDT)) + log.info(thisTileMTLDT['ARCHIVEDATE']) + log.info(thisTileMTLDT['ARCHIVEDATE'][0]) + log.info(type(thisTileMTLDT['ARCHIVEDATE'][0])) + if thisTileMTLDT['ARCHIVEDATE'][0] > int(endDate): + continue + reprocFlag = False + thisFAFN = FABaseDir + f'/{ts[0:3]}/fiberassign-{ts}.fits' + + thisfhtOrig = fitsio.read_header(thisFAFN) + thisfadate = thisfhtOrig['MTLTIME'] + thisfadate = desitarget.mtl.add_to_iso_date(thisfadate, 1) + thisfanite = int(''.join(thisfadate.split('T')[0].split('-'))) + if thisfanite > endDate: + continue + + TileIDs.append(tileid) + TypeOfActions.append('fa') + TimesOfActions.append(thisfadate) + archiveDates.append(thisfanite) + if thisfanite < startDate: + doneFlag.append(True) + else: + doneFlag.append(False) + + for update in thisTileMTLDT: + + + thisupdateTimestamp = update['TIMESTAMP'] + thisupdateNite = int(''.join(thisupdateTimestamp.split('T')[0].split('-'))) + if (thisupdateNite > endDate): + continue + + TileIDs.append(tileid) + if reprocFlag: + TypeOfActions.append('reproc') + else: + TypeOfActions.append('update') + TimesOfActions.append(thisupdateTimestamp) + if (thisupdateNite < startDate): + doneFlag.append(True) + else: + doneFlag.append(False) + archiveDates.append(update['ARCHIVEDATE']) + reprocFlag = True + ActionList = [TileIDs, TypeOfActions, TimesOfActions, doneFlag, archiveDates] + t = Table(ActionList, + names=('TILEID', 'ACTIONTYPE', 'ACTIONTIME', 'DONEFLAG', 'ARCHIVEDATE'), + meta={'Name': 'AltMTLTileTracker', 'StartDate': startDate, 'EndDate': endDate, 'amtldir':altmtldir}) + t.sort(['ACTIONTIME', 'ACTIONTYPE', 'TILEID']) + + t.write(TileTrackerFN, format='ascii.ecsv', overwrite = overwrite) + + + + +def trimToMTL(notMTL, MTL, debug = False, verbose = False): + # JL trims a target file, which possesses all of the information in an MTL, down + # JL to the columns allowed in the MTL data model. + allNames = notMTL.dtype.names + MTLNames = MTL.dtype.names + for n in allNames: + if n in MTLNames: + if debug: + print('allowed') + print(n) + continue + else: + if debug: + print('killed') + print(n) + notMTL = rfn.drop_fields(notMTL, n) + return notMTL + + +#@profile +def initializeAlternateMTLs(initMTL, outputMTL, nAlt = 2, genSubset = None, seed = 314159, + obscon = 'DARK', survey = 'sv3', saveBackup = False, overwrite = False, startDate = None, endDate = None, + ztilefile = '/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/ops/tiles-specstatus.ecsv', + hpnum = None, shuffleBrightPriorities = False, PromoteFracBGSFaint = 0.2, shuffleELGPriorities = False, + PromoteFracELG = 0.1, shuffleSubpriorities = True, reproducing = False, usetmp = False, + finalDir = None, profile = False, debug = False, verbose = False): + if profile: + pr.enable() + if verbose or debug: + log.info('starting initializeAltMTLs') + + if (shuffleSubpriorities ^ reproducing): + pass + else: + log.critical('If you are not shuffling subpriorities, you MUST be in debug/reproduction mode.') + raise ValueError('If you are not shuffling subpriorities, you MUST be in debug/reproduction mode.') + + if ('trunk' in outputMTL.lower()) or ('ops' in outputMTL.lower()): + raise ValueError("In order to prevent accidental overwriting of the real MTLs, please remove \'ops\' and \'trunk\' from your MTL output directory") + + if (not usetmp) or (usetmp and (outputMTL.startswith('/dev/shm/') or not(outputMTL.startswith('/tmp/')))): + pass + else: + log.critical('You are trying to write to local tmp directories but \ + your write directory is not in local tmp (/dev/shm/ or /tmp/).') + log.critical('directory name: {0}'.format(outputMTL)) + raise ValueError('usetmp set to True but output directory not in tmp. Output directory is {0}'.format(outputMTL)) + + + if debug: + log.info('initMTL') + log.info(initMTL) + ztilefn = ztilefile.split('/')[-1] + fn = initMTL.split('/')[-1] + log.info('reading initial MTL(s)') + allentries = Table.read(initMTL) + + meta = allentries.meta + if verbose or debug: + log.info('MTL metadata') + log.info(meta) + log.info('initial MTL') + log.info(initMTL) + log.info('output MTL') + log.info(outputMTL) + + if not ('Univ' in outputMTL): + log.warning('Code currently relies on using Univ as realization delimiter. \ + Code may function improperly.') + altmtldir = os.path.dirname(outputMTL).split('Univ')[0] + origmtldir = os.path.dirname(initMTL).split(survey)[0] + #zcatdir = os.path.dirname(ztilefile) + + if (startDate is None) or (startDate == ''): + + firstTS = allentries[0]["TIMESTAMP"] + initialentries = allentries[allentries["TIMESTAMP"] == firstTS] + subpriorsInit = initialentries["SUBPRIORITY"] + startDateShort = 19990101 + else: + log.debug('startdate') + log.debug(startDate) + initialentries = allentries[allentries["TIMESTAMP"] <= startDate] + subpriorsInit = initialentries["SUBPRIORITY"] + + origmtltilefn = os.path.join(origmtldir, get_mtl_tile_file_name(secondary=False)) + altmtltilefn = os.path.join(altmtldir, get_mtl_tile_file_name(secondary=False)) + startDateShort = int(startDate.split('T')[0].replace('-', '')) + if ('T' in endDate) & ('-' in endDate): + endDateShort = int(endDate.split('T')[0].replace('-', '')) + else: + endDateShort = int(endDate) + + if verbose or debug: + log.info('generate subset? {0}'.format(genSubset)) + if not genSubset is None: + if type(genSubset) == int: + if debug: + log.info('genSubset Int') + iterloop = [genSubset] + elif (type(genSubset) == list) or (type(genSubset) == np.ndarray): + if debug: + log.info('genSubset Arraylike') + iterloop = genSubset + else: + if debug: + log.info('genSubset None') + iterloop = range(nAlt) + if verbose or debug: + log.info('starting iterloop') + for n in iterloop: + if verbose or debug: + log.info('Realization {0:d}'.format(n)) + outputMTLDir = outputMTL.format(n) + if verbose or debug: + log.info('outputMTLDir') + log.info(outputMTLDir) + outfile = outputMTLDir +'/' + str(survey).lower() + '/' + str(obscon).lower() + '/' + str(fn) + if verbose or debug: + log.info('outfile') + log.info(outfile) + if os.path.exists(outfile): + if overwrite: + if verbose or debug: + log.info('overwrite') + os.remove(outfile) + else: + if verbose or debug: + log.info('continuing') + continue + if type(hpnum) == str: + try: + hpnum = int(hpnum) + except: + log.info('hpnum is string but not integer. Value is {0}'.format(hpnum)) + raise ValueError('hpnum is string but not integer. Value is {0}'.format(hpnum)) + rand.seed(seed + hpnum + n) + elif isinstance(hpnum, int) or isinstance(hpnum, np.int64): + rand.seed(seed + hpnum + n) + elif isinstance(hpnum, float) or isinstance(hpnum, np.float64): + assert(np.abs(hpnum - int(hpnum)) < 0.01) + rand.seed(seed + int(hpnum) + n) + else: + log.info('hpnum = {0}'.format(hpnum)) + log.info('type(hpnum) = {0}'.format(type(hpnum))) + assert(0) + rand.seed(seed + n) + if verbose or debug: + log.info('pre creating output dir') + if not os.path.exists(outputMTLDir): + os.makedirs(outputMTLDir) + if not os.path.exists(finalDir.format(n)): + os.makedirs(finalDir.format(n)) + if not os.path.isfile(finalDir.format(n) + '/' + ztilefn): + processTileFile(ztilefile, outputMTLDir + ztilefn, startDate, endDate) + #os.symlink(ztilefile, outputMTLDir + ztilefn) + thisTileTrackerFN = makeTileTrackerFN(finalDir.format(n), survey, obscon) + log.info('path to tiletracker = {0}'.format(thisTileTrackerFN)) + if not os.path.isfile(thisTileTrackerFN): + makeTileTracker(finalDir.format(n), survey = survey, obscon = obscon,overwrite = False, + startDate = startDateShort, endDate = endDateShort) + #makeTileTracker(outputMTLDir, survey = survey, obscon = obscon,overwrite = False, + #startDate = startDateShort, endDate = endDateShort) + subpriors = initialentries['SUBPRIORITY'] + + if (not reproducing) and shuffleSubpriorities: + newSubpriors = rand.uniform(size = len(subpriors)) + else: + newSubpriors = np.copy(subpriors) + try: + + assert((np.std(subpriorsInit - newSubpriors) > 0.001) | (len(subpriors) < 2) | ((not shuffleSubpriorities) and reproducing) ) + except: + log.warning('first shuffle failed') + log.warning('size of initial subprior array') + log.warning(len(subpriorsInit)) + + newSubpriors = rand.uniform(size = len(subpriors)) + assert((np.std(subpriorsInit - newSubpriors) > 0.001) | (len(subpriors) < 2)) + + initialentries['SUBPRIORITY'] = newSubpriors + + + # add main priority values + + + if (obscon.lower() == 'bright') and (shuffleBrightPriorities): + if (survey.lower() == 'sv3'): + BGSHIPBit = 2**3 + BGSBit = 2**0 + BGSPriorityInit = 102000 + BGSHIPPriority = 102100 + + BGSBits = initialentries['SV3_BGS_TARGET'] + elif (survey.lower() == 'main'): + BGSHIPBit = 2**3 + BGSBit = 2**0 + BGSPriorityInit = 2000 + BGSHIPPriority = 2100 + BGSBits = initialentries['BGS_TARGET'] + else: + raise ValueError('Survey.lower should be `sv3` or `main` but is instead {0:s}'.format(survey.lower())) + BGSFaintHIP = ((BGSBits & BGSHIPBit) == BGSHIPBit) + BGSFaintAll = ((BGSBits & BGSBit) == BGSBit) | BGSFaintHIP + + #Set all BGS_FAINT_HIP to BGS_FAINT + + initialentries['SV3_BGS_TARGET'][BGSFaintHIP] = (BGSBits[BGSFaintHIP] & ~BGSHIPBit) + initialentries['PRIORITY'][BGSFaintHIP] = BGSPriorityInit*np.ones(np.sum(BGSFaintHIP)) + initialentries['TARGET_STATE'][BGSFaintHIP] = np.broadcast_to(np.array(['BGS_FAINT|UNOBS']), BGSFaintHIP.shape) + + #Select 20% of BGS_FAINT to promote using function from desitarget + BGSFaintNewHIP = random_fraction_of_trues(PromoteFracBGSFaint, BGSFaintAll) + #Promote them + + initialentries['SV3_BGS_TARGET'][BGSFaintNewHIP] = (BGSBits[BGSFaintNewHIP] | BGSHIPBit) + initialentries['TARGET_STATE'][BGSFaintNewHIP] = np.broadcast_to(np.array(['BGS_FAINT_HIP|UNOBS']), BGSFaintNewHIP.shape) + initialentries['PRIORITY'][BGSFaintNewHIP] = BGSHIPPriority*np.ones(np.sum(BGSFaintNewHIP)).astype(int) + initialentries['PRIORITY_INIT'][BGSFaintNewHIP] = BGSHIPPriority*np.ones(np.sum(BGSFaintNewHIP)).astype(int) + + elif (survey.lower() == 'main') and (obscon.lower() == 'dark') and (shuffleELGPriorities): + + #desi_mask + + #evaluateMask(bit, mask, evalMultipleBits = False): + #flipBit(cat, bit2Flip, cond = None, fieldName = 'DESI_TARGET', mode = 'on'): + + ELGBits = initialentries['DESI_TARGET'] + + #Set up condition arrays to select each type of target class + LRGs = evaluateMask(ELGBits, desi_mask['LRG']) + ELGs = evaluateMask(ELGBits, desi_mask['ELG']) + QSOs = evaluateMask(ELGBits, desi_mask['QSO']) + ELGHIPs = evaluateMask(ELGBits, desi_mask['ELG_HIP']) + ELGLOPs = evaluateMask(ELGBits, desi_mask['ELG_LOP']) + ELGVLOs = evaluateMask(ELGBits, desi_mask['ELG_VLO']) + log.info('ELGs:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGs))) + log.info('LRGs:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(LRGs))) + log.info('QSOs:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(QSOs))) + log.info('ELGHIPs:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGHIPs))) + log.info('ELGLOPs:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGLOPs))) + log.info('ELGVLOs:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGVLOs))) + + + log.info('ELGHIPs&ELGLOPs:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGHIPs&ELGLOPs))) + log.info('ELGHIPs&ELGVLOs:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGHIPs&ELGVLOs))) + log.info('ELGHIPs&LRGs:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGHIPs&LRGs))) + log.info('ELGHIPs&QSOs:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGHIPs&QSOs))) + log.info('ELGLOPs&LRGs:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGLOPs&LRGs))) + log.info('ELGLOPs&QSOs:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGLOPs&QSOs))) + log.info('ELGVLOs&LRGs:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGVLOs&LRGs))) + log.info('ELGVLOs&QSOs:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGVLOs&QSOs))) + + + #turn off the ELG_HIP bit + initialentries = flipBit(initialentries, desi_mask['ELG_HIP'], cond = ELGHIPs, mode = 'off', fieldName = 'DESI_TARGET') + + #reset object priority, priority_init, and numobs_init based on new target bits. + outpriority, outnumobs = initial_priority_numobs(initialentries, obscon = 'DARK') + initialentries['PRIORITY'][ELGHIPs] = outpriority[ELGHIPs] + initialentries['PRIORITY_INIT'][ELGHIPs] = outpriority[ELGHIPs] + initialentries['NUMOBS_INIT'][ELGHIPs] = outnumobs[ELGHIPs] + + + #JL - reset TARGET_STATES based on new target bits. This step isn't necessary for AMTL function but makes debugging using target states vastly easier. + initialentries['TARGET_STATE'][ELGHIPs & ELGVLOs & np.invert(LRGs) & np.invert(QSOs)] = np.broadcast_to(np.array(['ELG_VLO|UNOBS']), np.sum(ELGHIPs & ELGVLOs & np.invert(LRGs) & np.invert(QSOs) ) ) + + initialentries['TARGET_STATE'][ELGHIPs & ELGLOPs & np.invert(LRGs) & np.invert(QSOs)] = np.broadcast_to(np.array(['ELG_LOP|UNOBS']), np.sum(ELGHIPs & ELGLOPs & np.invert(LRGs) & np.invert(QSOs) ) ) + + initialentries['TARGET_STATE'][ELGHIPs & LRGs] = np.broadcast_to(np.array(['LRG|UNOBS']), np.sum(ELGHIPs & LRGs) ) + + + #For Debug. New Target bit flags after demoting all ELG_HIPs + ELGBitsMid = initialentries['DESI_TARGET'] + LRGsMid = evaluateMask(ELGBitsMid, desi_mask['LRG']) + ELGsMid = evaluateMask(ELGBitsMid, desi_mask['ELG']) + QSOsMid = evaluateMask(ELGBitsMid, desi_mask['QSO']) + ELGHIPsMid = evaluateMask(ELGBitsMid, desi_mask['ELG_HIP']) + ELGLOPsMid = evaluateMask(ELGBitsMid, desi_mask['ELG_LOP']) + ELGVLOsMid = evaluateMask(ELGBitsMid, desi_mask['ELG_VLO']) + log.info('ELGsMid:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGsMid))) + log.info('LRGsMid:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(LRGsMid))) + log.info('QSOsMid:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(QSOsMid))) + log.info('ELGHIPsMid:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGHIPsMid))) + log.info('ELGLOPsMid:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGLOPsMid))) + log.info('ELGVLOsMid:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGVLOsMid))) + + + log.info('ELGHIPsMid&ELGLOPsMid:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGHIPsMid&ELGLOPsMid))) + log.info('ELGHIPsMid&ELGVLOsMid:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGHIPsMid&ELGVLOsMid))) + log.info('ELGHIPsMid&LRGsMid:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGHIPsMid&LRGsMid))) + log.info('ELGHIPsMid&QSOsMid:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGHIPsMid&QSOsMid))) + log.info('ELGLOPsMid&LRGsMid:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGLOPsMid&LRGsMid))) + log.info('ELGLOPsMid&QSOsMid:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGLOPsMid&QSOsMid))) + log.info('ELGVLOsMid&LRGsMid:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGVLOsMid&LRGsMid))) + log.info('ELGVLOsMid&QSOsMid:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGVLOsMid&QSOsMid))) + + + + #Determine which 10% of ELGLOP and ELGVLO will be promoted to ELGHIP. These are done separately. + + #ELGNewHIP = random_fraction_of_trues(PromoteFracELG, ELGLOPs) + + #ELGNewHIP = ELGNewHIP | random_fraction_of_trues(PromoteFracELG, ELGVLOs) + + chosenLOP = rand.random(len(ELGLOPs)) < 0.1 + ELGNewHIP_FromLOP = ELGLOPs & chosenLOP + + chosenVLO = rand.random(len(ELGVLOs)) < 0.1 + ELGNewHIP_FromVLO = ELGVLOs & chosenVLO + + ELGNewHIP = ELGNewHIP_FromLOP | ELGNewHIP_FromVLO + + log.info('ELGNewHIP_FromVLO:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGNewHIP_FromVLO))) + log.info('ELGNewHIP_FromLOP:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGNewHIP_FromLOP))) + log.info('ELGNewHIP:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGNewHIP))) + + #promote the just-determined 10% of ELG_LOP/ELG_VLO + initialentries = flipBit(initialentries, desi_mask['ELG_HIP'], cond = ELGNewHIP, mode = 'on', fieldName = 'DESI_TARGET') + + + #For Debug. New Target bit flags after promoting 10% of ELGs to HIP + ELGBitsFinal = initialentries['DESI_TARGET'] + LRGsFinal = evaluateMask(ELGBitsFinal, desi_mask['LRG']) + ELGsFinal = evaluateMask(ELGBitsFinal, desi_mask['ELG']) + QSOsFinal = evaluateMask(ELGBitsFinal, desi_mask['QSO']) + ELGHIPsFinal = evaluateMask(ELGBitsFinal, desi_mask['ELG_HIP']) + ELGLOPsFinal = evaluateMask(ELGBitsFinal, desi_mask['ELG_LOP']) + ELGVLOsFinal = evaluateMask(ELGBitsFinal, desi_mask['ELG_VLO']) + log.info('ELGsFinal:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGsFinal))) + log.info('LRGsFinal:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(LRGsFinal))) + log.info('QSOsFinal:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(QSOsFinal))) + log.info('ELGHIPsFinal:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGHIPsFinal))) + log.info('ELGLOPsFinal:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGLOPsFinal))) + log.info('ELGVLOsFinal:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGVLOsFinal))) + + + log.info('ELGHIPsFinal&ELGLOPsFinal:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGHIPsFinal&ELGLOPsFinal))) + log.info('ELGHIPsFinal&ELGVLOsFinal:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGHIPsFinal&ELGVLOsFinal))) + log.info('ELGHIPsFinal&LRGsFinal:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGHIPsFinal&LRGsFinal))) + log.info('ELGHIPsFinal&QSOsFinal:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGHIPsFinal&QSOsFinal))) + log.info('ELGLOPsFinal&LRGsFinal:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGLOPsFinal&LRGsFinal))) + log.info('ELGLOPsFinal&QSOsFinal:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGLOPsFinal&QSOsFinal))) + log.info('ELGVLOsFinal&LRGsFinal:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGVLOsFinal&LRGsFinal))) + log.info('ELGVLOsFinal&QSOsFinal:HPNUM:{0}:Sum:{1}'.format(hpnum, np.sum(ELGVLOsFinal&QSOsFinal))) + + + + #reset object priority, priority_init, and numobs_init based on new target bits. + outpriority, outnumobs = initial_priority_numobs(initialentries, obscon = 'DARK') + initialentries['PRIORITY'][ELGNewHIP] = outpriority[ELGNewHIP] + initialentries['PRIORITY_INIT'][ELGNewHIP] = outpriority[ELGNewHIP] + initialentries['NUMOBS_INIT'][ELGNewHIP] = outnumobs[ELGNewHIP] + + #JL - reset TARGET_STATES based on new target bits. This step isn't necessary for AMTL function but makes debugging using target states vastly easier. + initialentries['TARGET_STATE'][ELGNewHIP & np.invert(QSOs)] = np.broadcast_to(np.array(['ELG_HIP|UNOBS']), np.sum(ELGNewHIP & np.invert(QSOs) ) ) + + retval = desitarget.io.write_mtl(outputMTLDir, initialentries, survey=survey, obscon=obscon, extra=meta, nsidefile=meta['FILENSID'], hpxlist = [meta['FILEHPX']]) + if debug or verbose: + log.info('(nowrite = False) ntargs, fn = {0}'.format(retval)) + log.info('wrote MTLs to {0}'.format(outputMTLDir)) + if saveBackup and (not usetmp): + if not os.path.exists(str(outputMTLDir) +'/' + str(survey).lower() + '/' +str(obscon).lower() + '/orig/'): + os.makedirs(str(outputMTLDir) +'/' + str(survey).lower() + '/' +str(obscon).lower() + '/orig/') + + + if not os.path.exists(str(outputMTLDir) +'/' + str(survey).lower() + '/' +str(obscon).lower() + '/orig/' + str(fn)): + from shutil import copyfile + copyfile(str(outputMTLDir) +'/' + str(survey).lower() + '/' + str(obscon).lower() + '/' + str(fn), str(outputMTLDir) +'/' + str(survey).lower() + '/' +str(obscon).lower() + '/orig/' + str(fn)) + if usetmp: + from shutil import copyfile + + if not os.path.exists(str(finalDir.format(n)) +'/' + str(survey).lower() + '/' +str(obscon).lower() ): + os.makedirs(str(finalDir.format(n)) +'/' + str(survey).lower() + '/' +str(obscon).lower() ) + if saveBackup and (not os.path.exists(str(finalDir.format(n)) +'/' + str(survey).lower() + '/' +str(obscon).lower() + '/orig/')): + os.makedirs(str(finalDir.format(n)) +'/' + str(survey).lower() + '/' +str(obscon).lower() + '/orig/') + if debug: + log.info('tempdir contents before copying') + log.info(glob.glob(outputMTLDir + '/*' )) + log.info(glob.glob(outputMTLDir + '/main/dark/*' )) + copyfile(str(outputMTLDir) +'/' + str(survey).lower() + '/' + str(obscon).lower() + '/' + str(fn), str(finalDir.format(n)) +'/' + str(survey).lower() + '/' +str(obscon).lower() + '/' + str(fn)) + if debug: + log.info('tempdir contents after copying') + log.info(glob.glob(outputMTLDir + '/*' )) + log.info(glob.glob(outputMTLDir + '/main/dark/*' )) + + if saveBackup and not os.path.exists(str(outputMTLDir) +'/' + str(survey).lower() + '/' +str(obscon).lower() + '/orig/' + str(fn)): + #JL Potentially move the saveBackup copying to an afterburner + #JL to speed up afterburner process. Copy all at once + copyfile(str(outputMTLDir) +'/' + str(survey).lower() + '/' + str(obscon).lower() + '/' + str(fn), str(finalDir.format(n)) +'/' + str(survey).lower() + '/' +str(obscon).lower() + '/orig/' + str(fn)) + + os.remove(str(outputMTLDir) +'/' + str(survey).lower() + '/' + str(obscon).lower() + '/' + str(fn)) + if debug: + log.info('tempdir contents after removing') + log.info(glob.glob(outputMTLDir + '/*' )) + if usetmp: + + if verbose or debug: + log.info('cleaning up tmpdir') + log.info(glob.glob(outputMTLDir + '*' )) + f2c = glob.glob(outputMTLDir + '*' ) + if verbose or debug: + log.info('finaldir') + log.info(finalDir.format(n)) + for tempfn in f2c: + if '.' in str(os.path.split(tempfn)[1]): + if verbose or debug: + log.info('copying tempfn: {0}'.format(tempfn)) + copyfile(tempfn , str(finalDir.format(n)) +'/' + os.path.basename(tempfn) ) + + if verbose or debug: + log.info('tempdir contents after copying') + log.info(glob.glob(outputMTLDir + '*' )) + + if profile: + pr.disable() + s = ProfileIO.StringIO() + sortby = SortKey.CUMULATIVE + ps = pstats.Stats(pr, stream=s).sort_stats(sortby) + ps.print_stats() + if usetmp: + + ps.dump_stats(str(finalDir.format(n)) +'/' + str(survey).lower() + '/' + str(obscon).lower() + '/' + str(fn) + '.prof') + else: + ps.dump_stats(str(outputMTLDir) +'/' + str(survey).lower() + '/' + str(obscon).lower() + '/' + str(fn) + '.prof') + print(s.getvalue()) + + + +def quickRestartFxn(ndirs = 1, altmtlbasedir = None, survey = 'sv3', obscon = 'dark', multiproc =False, nproc = None, verbose = False, debug = False): + if verbose or debug: + log.info('quick restart running') + from shutil import copyfile, move + from glob import glob as ls + if multiproc: + iterloop = range(nproc, nproc+1) + else: + iterloop = range(ndirs) + for nRestart in iterloop: + if verbose or debug: + log.info(nRestart) + altmtldirRestart = altmtlbasedir + '/Univ{0:03d}/'.format(nRestart) + if os.path.exists(altmtldirRestart + 'mtl-done-tiles.ecsv'): + move(altmtldirRestart + 'mtl-done-tiles.ecsv',altmtldirRestart + 'mtl-done-tiles.ecsv.old') + restartMTLs = ls(altmtldirRestart +'/' + survey + '/' + obscon + '/' + '/orig/*') + for fn in restartMTLs: + copyfile(fn, altmtldirRestart +'/' + survey + '/' + obscon + '/' + fn.split('/')[-1]) + +def do_fiberassignment(altmtldir, FATiles, survey = 'sv3', obscon = 'dark', + verbose = False, debug = False, getosubp = False, redoFA = False, mock = False, reproducing = False): + #FATiles = tiles_to_be_processed_alt(altmtldir, obscon = obscon, survey = survey, today = today, mode = 'fa') + if len(FATiles): + try: + log.info('FATiles[0] = {0}'.format(FATiles[0])) + if isinstance(FATiles[0], (collections.abc.Sequence, np.ndarray)): + pass + else: + FATiles = [FATiles] + except: + log.info('cannot access element 0 of FATiles') + log.info('FATiles = {0}'.format(FATiles)) + + + OrigFAs = [] + AltFAs = [] + AltFAs2 = [] + TSs = [] + fadates = [] + + + #if len(FATiles): + # log.info('len FATiles = {0}'.format(len(FATiles))) + # pass + #else: + # return OrigFAs, AltFAs, AltFAs2, TSs, fadates, FATiles + for t in FATiles: + log.info('t = {0}'.format(t)) + #JL This loop takes each of the original fiberassignments for each of the tiles on $date + #JL and opens them to obtain information for the alternative fiber assignments. + #JL Then it runs the alternative fiber assignments, stores the results in an array (AltFAs) + #JL while also storing the original fiber assignment files in a different array (OrigFA) + + ts = str(t['TILEID']).zfill(6) + #JL Full path to the original fiber assignment from the real survey + FAOrigName = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz' + fhtOrig = fitsio.read_header(FAOrigName) + fadate = fhtOrig['RUNDATE'] + # e.g. DESIROOT/target/catalogs/dr9/1.0.0/targets/main/resolve/dark + targver = fhtOrig['TARG'].split('/targets')[0].split('/')[-1] + assert(not ('/' in targver)) + log.info('fadate = {0}'.format(fadate)) + #JL stripping out the time of fiber assignment to leave only the date + #JL THIS SHOULD ONLY BE USED IN DIRECTORY NAMES. THE ACTUAL RUNDATE VALUE SHOULD INCLUDE A TIME + fadate = ''.join(fadate.split('T')[0].split('-')) + log.info('fadate stripped = {0}'.format(fadate)) + fbadirbase = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/' + + log.info('fbadirbase = {0}'.format(fbadirbase)) + log.info('ts = {0}'.format(ts)) + ##log.info('t[reprocflag] (should be false if here)= {0}'.format(t['REPROCFLAG'])) + ##assert(not bool(t['REPROCFLAG'])) + #if str(ts) == str(3414).zfill(6): + # raise ValueError('Not only do I create the backup here but I also need to fix the reproc flag') + + if getosubp: + #JL When we are trying to reproduce a prior survey and/or debug, create a separate + #JL directory in fbadirbase + /orig/ to store the reproduced FA files. + FAAltName = fbadirbase + '/orig/fba-' + ts+ '.fits' + #FAMapName = fbadirbase + '/orig/famap-' + ts + '.pickle' + fbadir = fbadirbase + '/orig/' + else: + + #JL For normal "alternate" operations, store the fiber assignments + #JL in the fbadirbase directory. + + FAAltName = fbadirbase + '/fba-' + ts+ '.fits' + #FAMapName = fbadirbase + '/famap-' + ts + '.pickle' + fbadir = fbadirbase + if verbose or debug: + log.info('FAOrigName = {0}'.format(FAOrigName)) + + log.info('FAAltName = {0}'.format(FAAltName)) + + #JL Sometimes fiberassign leaves around temp files if a run is aborted. + #JL This command removes those temp files to prevent endless crashes. + if os.path.exists(FAAltName + '.tmp'): + os.remove(FAAltName + '.tmp') + #JL If the alternate fiberassignment was already performed, don't repeat it + #JL Unless the 'redoFA' flag is set to true + if verbose or debug: + log.info('redoFA = {0}'.format(redoFA)) + log.info('FAAltName = {0}'.format(FAAltName)) + + if redoFA or (not os.path.exists(FAAltName)): + if verbose and os.path.exists(FAAltName): + log.info('repeating fiberassignment') + elif verbose: + log.info('fiberassignment not found, running fiberassignment') + if verbose: + log.info(ts) + log.info(altmtldir + survey.lower()) + log.info(fbadir) + log.info(getosubp) + log.info(redoFA) + if getosubp and verbose: + log.info('checking contents of fiberassign directory before calling get_fba_from_newmtl') + log.info(glob.glob(fbadir + '/*' )) + #get_fba_fromnewmtl(ts,mtldir=altmtldir + survey.lower() + '/',outdir=fbadirbase, getosubp = getosubp, overwriteFA = redoFA, verbose = verbose, mock = mock, targver = targver)#, targets = targets) + get_fba_fromnewmtl(ts,mtldir=altmtldir + survey.lower() + '/',outdir=fbadirbase, getosubp = getosubp, overwriteFA = redoFA, verbose = verbose, mock = mock, targver = targver, reproducing = reproducing)#, targets = targets) + command_run = (['bash', fbadir + 'fa-' + ts + '.sh']) + if verbose: + log.info('fa command_run') + log.info(command_run) + result = subprocess.run(command_run, capture_output = True) + else: + log.info('not repeating fiberassignment') + log.info('adding fiberassignments to arrays') + OrigFAs.append(pf.open(FAOrigName)[1].data) + AltFAs.append(pf.open(FAAltName)[1].data) + AltFAs2.append(pf.open(FAAltName)[2].data) + TSs.append(ts) + fadates.append(fadate) + + return OrigFAs, AltFAs, AltFAs2, TSs, fadates, FATiles + +def make_fibermaps(altmtldir, OrigFAs, AltFAs, AltFAs2, TSs, fadates, tiles, survey = 'sv3', obscon = 'dark', changeFiberOpt = None, verbose = False, debug = False, getosubp = False, redoFA = False): + A2RMap = {} + R2AMap = {} + if verbose: + log.info('beginning loop through FA files') + for ofa, afa, afa2, ts, fadate, t in zip(OrigFAs, AltFAs, AltFAs2, TSs, fadates, tiles): + log.info('ts = {0}'.format(ts)) + if changeFiberOpt is None: + A2RMap, R2AMap = createFAmap(ofa, afa, changeFiberOpt = changeFiberOpt) + else: + raise NotImplementedError('changeFiberOpt has not yet been implemented') + + #FAOrigName = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz' + + A2RMap, R2AMap = createFAmap(ofa, afa, TargAlt = afa2, changeFiberOpt = changeFiberOpt) + + fbadirbase = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/' + if getosubp: + FAAltName = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/orig/fba-' + ts+ '.fits' + FAMapName = fbadirbase + '/orig/famap-' + ts + '.pickle' + fbadir = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/orig/' + else: + + FAAltName = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/fba-' + ts+ '.fits' + FAMapName = fbadirbase + '/famap-' + ts + '.pickle' + fbadir = fbadirbase + + + if debug: + log.info('ts = {0}'.format(ts)) + log.info('FAMapName = {0}'.format(FAMapName)) + + + if redoFA or (not (os.path.isfile(FAMapName))): + if verbose: + log.info('dumping out fiber map to pickle file') + with open(FAMapName, 'wb') as handle: + pickle.dump((A2RMap, R2AMap), handle, protocol=pickle.HIGHEST_PROTOCOL) + #thisUTCDate = get_utc_date(survey=survey) + if verbose: + log.info('---') + log.info('unique keys in R2AMap = {0:d}'.format(np.unique(R2AMap.keys()).shape[0])) + log.info('---') + + log.info('---') + log.info('unique keys in A2RMap = {0:d}'.format(np.unique(A2RMap.keys()).shape[0])) + log.info('---') + #retval = write_amtl_tile_tracker(altmtldir, [t], obscon = obscon, survey = survey, mode = 'fa') + retval = write_amtl_tile_tracker(altmtldir, [t], obscon = obscon, survey = survey) + log.info('write_amtl_tile_tracker retval = {0}'.format(retval)) + + return A2RMap, R2AMap +def update_alt_ledger(altmtldir,althpdirname, altmtltilefn, actions, survey = 'sv3', obscon = 'dark', today = None, + getosubp = False, zcatdir = None, mock = False, numobs_from_ledger = True, targets = None, verbose = False, debug = False): + if verbose or debug: + log.info('today = {0}'.format(today)) + log.info('obscon = {0}'.format(obscon)) + log.info('survey = {0}'.format(survey)) + #UpdateTiles = tiles_to_be_processed_alt(altmtldir, obscon = obscon, survey = survey, today = today, mode = 'update') + #log.info('updatetiles = {0}'.format(UpdateTiles)) + # ADM grab the zcat directory (in case we're relying on $ZCAT_DIR). + zcatdir = get_zcat_dir(zcatdir) + # ADM And contruct the associated ZTILE filename. + ztilefn = os.path.join(zcatdir, get_ztile_file_name()) + #if len(UpdateTiles): + # pass + #else: + # return althpdirname, altmtltilefn, ztilefn, None + #isinstance(FATiles[0], (collections.abc.Sequence, np.ndarray)) + if not isinstance(actions['TILEID'], (collections.abc.Sequence, np.ndarray)): + actions = [actions] + log.info('actions = {0}'.format(actions)) + for t in actions: + log.info('t = {0}'.format(t)) + if t['ACTIONTYPE'].lower() == 'reproc': + raise ValueError('Reprocessing should be handled elsewhere.') + #raise ValueError('Make sure backup is made and reprocessing logic is correct before beginning reprocessing.') + ts = str(t['TILEID']).zfill(6) + + FAOrigName = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz' + fhtOrig = fitsio.read_header(FAOrigName) + fadate = fhtOrig['RUNDATE'] + fadate = ''.join(fadate.split('T')[0].split('-')) + fbadirbase = altmtldir + '/fa/' + survey.upper() + '/' + fadate + '/' + log.info('t = {0}'.format(t)) + log.info('fbadirbase = {0}'.format(fbadirbase)) + log.info('ts = {0}'.format(ts)) + + if getosubp: + FAMapName = fbadirbase + '/orig/famap-' + ts + '.pickle' + else: + FAMapName = fbadirbase + '/famap-' + ts + '.pickle' + + log.info('FAMapName = {0}'.format(FAMapName)) + with open(FAMapName,'rb') as fl: + (A2RMap, R2AMap) = pickle.load(fl,fix_imports = True) + + # ADM create the catalog of updated redshifts. + log.info('making zcats') + log.info('zcatdir = {0}'.format(zcatdir)) + log.info('t = {0}'.format(t)) + zcat = make_zcat(zcatdir, [t], obscon, survey) + + altZCat = makeAlternateZCat(zcat, R2AMap, A2RMap, debug = debug, verbose = verbose) + # ADM insist that for an MTL loop with real observations, the zcat + # ADM must conform to the data model. In particular, it must include + # ADM ZTILEID, and other columns addes for the Main Survey. These + # ADM columns may not be needed for non-ledger simulations. + # ADM Note that the data model differs with survey type. + zcatdm = survey_data_model(zcatdatamodel, survey=survey) + if zcat.dtype.descr != zcatdm.dtype.descr: + msg = "zcat data model must be {} not {}!".format( + zcatdm.dtype.descr, zcat.dtype.descr) + log.critical(msg) + raise ValueError(msg) + # ADM useful to know how many targets were updated. + _, _, _, _, sky, _ = decode_targetid(zcat["TARGETID"]) + ntargs, nsky = np.sum(sky == 0), np.sum(sky) + msg = "Update state for {} targets".format(ntargs) + msg += " (the zcats also contain {} skies with +ve TARGETIDs)".format(nsky) + log.info(msg) + didUpdateHappen = False + # ADM update the appropriate ledger. + if mock: + + if targets is None: + raise ValueError('If processing mocks, you MUST specify a target file') + log.info('update loc a') + print('althpdirname') + print(althpdirname) + print('---------------------') + print('altZCat') + print(altZCat) + print('*************************') + + update_ledger(althpdirname, altZCat, obscon=obscon.upper(), + numobs_from_ledger=numobs_from_ledger)#, targets = targets) + print('AURE') + didUpdateHappen = True + elif targets is None: + log.info('update loc b') + update_ledger(althpdirname, altZCat, obscon=obscon.upper(), + numobs_from_ledger=numobs_from_ledger) + didUpdateHappen = True + else: + log.info('update loc c') + update_ledger(althpdirname, altZCat, obscon=obscon.upper(), + numobs_from_ledger=numobs_from_ledger, targets = targets) + didUpdateHappen = True + assert(didUpdateHappen) + if verbose or debug: + log.info('if main, should sleep 1 second') + #thisUTCDate = get_utc_date(survey=survey) + if survey == "main": + sleep(1) + if verbose or debug: + log.info('has slept one second') + #t["ALTARCHIVEDATE"] = thisUTCDate + if verbose or debug: + log.info('now writing to amtl_tile_tracker') + #io.write_mtl_tile_file(altmtltilefn,dateTiles) + #write_amtl_tile_tracker(altmtldir, dateTiles, thisUTCDate, obscon = obscon, survey = survey) + log.info('changes are being registered') + log.info('altmtldir = {0}'.format(altmtldir)) + log.info('t = {0}'.format(t)) + #log.info('thisUTCDate = {0}'.format(thisUTCDate)) + log.info('today = {0}'.format(today)) + #retval = write_amtl_tile_tracker(altmtldir, [t], obscon = obscon, survey = survey, mode = 'update') + retval = write_amtl_tile_tracker(altmtldir, [t], obscon = obscon, survey = survey) + log.info('write_amtl_tile_tracker retval = {0}'.format(retval)) + if verbose or debug: + log.info('has written to amtl_tile_tracker') + + return althpdirname, altmtltilefn, ztilefn, actions +#@profile +def loop_alt_ledger(obscon, survey='sv3', zcatdir=None, mtldir=None, + altmtlbasedir=None, ndirs = 3, numobs_from_ledger=True, + secondary=False, singletile = None, singleDate = None, debugOrig = False, + getosubp = False, quickRestart = False, redoFA = False, + multiproc = False, nproc = None, testDoubleDate = False, + changeFiberOpt = None, targets = None, mock = False, + debug = False, verbose = False, reproducing = False): + """Execute full MTL loop, including reading files, updating ledgers. + + Parameters + ---------- + obscon : :class:`str` + A string matching ONE obscondition in the desitarget bitmask yaml + file (i.e. in `desitarget.targetmask.obsconditions`), e.g. "DARK" + Governs how priorities are set when merging targets. + survey : :class:`str`, optional, defaults to "main" + Used to look up the correct ledger, in combination with `obscon`. + Options are ``'main'`` and ``'svX``' (where X is 1, 2, 3 etc.) + for the main survey and different iterations of SV, respectively. + zcatdir : :class:`str`, optional, defaults to ``None`` + Full path to the "daily" directory that hosts redshift catalogs. + If this is ``None``, look up the redshift catalog directory from + the $ZCAT_DIR environment variable. + mtldir : :class:`str`, optional, defaults to ``None`` + Full path to the directory that hosts the MTL ledgers and the MTL + tile file. If ``None``, then look up the MTL directory from the + $MTL_DIR environment variable. + altmtlbasedir : :class:`str`, optional, defaults to ``None`` + Formattable path to a directory that hosts alternate MTL ledgers + If ``None``, then look up the MTL directory from the + $ALT_MTL_DIR environment variable. This will fail since that variable + is not currently set in the desi code setup. + ndirs : :class:`int`, optional, defaults to ``3`` + Number of alternate MTLs to process within altmtlbasedir + numobs_from_ledger : :class:`bool`, optional, defaults to ``True`` + If ``True`` then inherit the number of observations so far from + the ledger rather than expecting it to have a reasonable value + in the `zcat.` + secondary : :class:`bool`, optional, defaults to ``False`` + If ``True`` then process secondary targets instead of primaries + for passed `survey` and `obscon`. + quickRestart : :class:`bool`, optional, defaults to ``False`` + If ``True`` then copy original alternate MTLs from + altmtlbasedir/Univ*/survey/obscon/orig and + redoFA : :class:`bool`, optional, defaults to ``False`` + If ``True`` then automatically redo fiberassignment regardless of + existence of fiberassign file in alternate fiberassign directory + multiproc : :class:`bool`, optional, defaults to ``False`` + If ``True`` then run a single MTL update in a directory specified by + nproc. + nproc : :class:`int`, optional, defaults to None + If multiproc is ``True`` this must be specified. Integer determines + directory of alternate MTLs to update. + + Returns + ------- + :class:`str` + The directory containing the ledger that was updated. + :class:`str` + The name of the MTL tile file that was updated. + :class:`str` + The name of the ZTILE file that was used to link TILEIDs to + observing conditions and to determine if tiles were "done". + :class:`~numpy.array` + Information for the tiles that were processed. + + Notes + ----- + - Assumes all of the relevant ledgers have already been made by, + e.g., :func:`~LSS.SV3.altmtltools.initializeAlternateMTLs()`. + """ + + + if mock: + if targets is None: + raise ValueError('If processing mocks, you MUST specify a target file') + if debug: + log.info('getosubp value: {0}'.format(getosubp)) + if ('trunk' in altmtlbasedir.lower()) or ('ops' in altmtlbasedir.lower()): + raise ValueError("In order to prevent accidental overwriting of the real MTLs, please remove \'ops\' and \'trunk\' from your MTL output directory") + assert((singleDate is None) or (type(singleDate) == bool)) + if multiproc: + import multiprocessing as mp + import logging + + logger=mp.log_to_stderr(logging.DEBUG) + + ### JL - Start of directory/loop variable construction ### + + + # ADM first grab all of the relevant files. + # ADM grab the MTL directory (in case we're relying on $MTL_DIR). + ##mtldir = get_mtl_dir(mtldir) + # ADM construct the full path to the mtl tile file. + ##mtltilefn = os.path.join(mtldir, get_mtl_tile_file_name(secondary=secondary)) + # ADM construct the relevant sub-directory for this survey and + # ADM set of observing conditions.. + form = get_mtl_ledger_format() + resolve = True + msg = "running on {} ledger with obscon={} and survey={}" + if secondary: + log.info(msg.format("SECONDARY", obscon, survey)) + resolve = None + else: + log.info(msg.format("PRIMARY", obscon, survey)) + + + + if altmtlbasedir is None: + log.critical('This will automatically find the alt mtl dir in the future but fails now. Bye.') + assert(0) + if debugOrig: + iterloop = range(1) + elif multiproc: + iterloop = range(nproc, nproc+1) + else: + iterloop = range(ndirs) + ### JL - End of directory/loop variable construction ### + + + + if quickRestart: + raise NotImplementedError('There is no way the quick restart will work properly post refactor.') + quickRestartFxn(ndirs = ndirs, altmtlbasedir = altmtlbasedir, survey = survey, obscon = obscon, multiproc = multiproc, nproc = nproc) + + ### JL - this loop is through all realizations serially or (usually) one realization parallelized + for n in iterloop: + if debugOrig: + altmtldir = altmtlbasedir + else: + altmtldir = os.path.join(altmtlbasedir.format(mock_number=n), 'Univ000/') + #altmtldir = altmtlbasedir + '/Univ{0:03d}/'.format(n) + altmtltilefn = os.path.join(altmtldir, get_mtl_tile_file_name(secondary=secondary)) + + althpdirname = desitarget.io.find_target_files(altmtldir, flavor="mtl", resolve=resolve, + survey=survey, obscon=obscon, ender=form) + + altMTLTileTrackerFN = makeTileTrackerFN(altmtldir, survey = survey, obscon = obscon) + altMTLTileTracker = Table.read(altMTLTileTrackerFN) + #today = altMTLTileTracker.meta['Today'] + #endDate = altMTLTileTracker.meta['EndDate'] + + actionList = altMTLTileTracker[np.invert(altMTLTileTracker['DONEFLAG'])] + + actionList.sort(['ACTIONTIME']) + #if not (singletile is None): + # tiles = tiles[tiles['TILEID'] == singletile] + + #if testDoubleDate: + # raise NotImplementedError('this block needs to be moved for new organization of tiletracker.') + # log.info('Testing Rosette with Doubled Date only') + # cond1 = ((tiles['TILEID'] >= 298) & (tiles['TILEID'] <= 324)) + # cond2 = ((tiles['TILEID'] >= 475) & (tiles['TILEID'] <= 477)) + # log.info(tiles[tiles['TILEID' ] == 314]) + # log.info(tiles[tiles['TILEID' ] == 315]) + # tiles = tiles[cond1 | cond2 ] + + + #for ots,famtlt,reprocFlag in datepairs: + #while int(today) <= int(endDate): + for action in actionList: + + if action['ACTIONTYPE'] == 'fa': + + OrigFAs, AltFAs, AltFAs2, TSs, fadates, tiles = do_fiberassignment(altmtldir, [action], survey = survey, obscon = obscon ,verbose = verbose, debug = debug, getosubp = getosubp, redoFA = redoFA, mock = mock, reproducing = reproducing) + assert(len(OrigFAs)) + A2RMap, R2AMap = make_fibermaps(altmtldir, OrigFAs, AltFAs, AltFAs2, TSs, fadates, tiles, changeFiberOpt = changeFiberOpt, verbose = verbose, debug = debug, survey = survey , obscon = obscon, getosubp = getosubp, redoFA = redoFA ) + elif action['ACTIONTYPE'] == 'update': + althpdirname, altmtltilefn, ztilefn, tiles = update_alt_ledger(altmtldir,althpdirname, altmtltilefn, action, survey = survey, obscon = obscon ,getosubp = getosubp, zcatdir = zcatdir, mock = mock, numobs_from_ledger = numobs_from_ledger, targets = targets, verbose = verbose, debug = debug) + elif action['ACTIONTYPE'] == 'reproc': + #returns timedict + + #raise NotImplementedError('make backup here before reprocessing. Then resume Debugging.') + retval = reprocess_alt_ledger(altmtldir, action, obscon=obscon, survey = survey) + if debug or verbose: + log.info(f'retval = {retval}') + + else: + raise ValueError('actiontype must be `fa`, `update`, or `reproc`.') + #retval = write_amtl_tile_tracker(altmtldir, None, None, today, obscon = obscon, survey = survey, mode = 'endofday') + #log.info('write_amtl_tile_tracker retval = {0}'.format(retval)) + + #today = nextDate(today) + #log.info('----------') + #log.info('----------') + #log.info('----------') + #log.info('moving to next day: {0}'.format(today)) + #log.info('----------') + #log.info('----------') + #log.info('----------') + + + return althpdirname, altmtltilefn, altMTLTileTrackerFN, actionList + +def plotMTLProb(mtlBaseDir, ndirs = 10, hplist = None, obscon = 'dark', survey = 'sv3', outFileName = None, outFileType = '.png', jupyter = False, debug = False, verbose = False): + """Plots probability that targets were observed among {ndirs} alternate realizations + of SV3. Uses default matplotlib colorbar to plot between 1-{ndirs} observations. + + Parameters + ---------- + mtlBaseDir : :class:`str` + The home directory of your alternate MTLs. Should not contain obscon + or survey. String should be formattable (i.e. '/path/to/dirs/Univ{0:03d}') + ndirs : :class:`int` + The number of alternate realizations to plot. + survey : :class:`str`, optional, defaults to "sv3" + Used to look up the correct ledger, in combination with `obscon`. + Options are ``'main'`` and ``'svX``' (where X is 1, 2, 3 etc.) + for the main survey and different iterations of SV, respectively. + obscon : :class:`str`, optional, defaults to "dark" + Used to look up the correct ledger, in combination with `survey`. + Options are ``'dark'`` and ``'bright``' + hplist : :class:`arraylike`, optional, defaults to None + List of healpixels to plot. If None, defaults to plotting all available + healpixels + outFileName : :class:`str`, optional, defaults to None + If desired, save file to this location. This will + usually be desired, but was made optional for use in + ipython notebooks. + outFileType : :class:`str`, optional, defaults to '.png' + If desired, save file with name "outFileName" with + type/suffix outFileType. This will usually be desired, + but was made optional for use in ipython notebooks. + + + + Returns + ------- + Nothing + + """ + ObsFlagList = np.array([]) + for i in range(ndirs): + mtldir = mtlBaseDir.format(i) + '/' + survey + '/' + obscon + MTL = np.sort(desitarget.io.read_mtl_in_hp(mtldir, 32, hplist, unique=True, isodate=None, returnfn=False, initial=False, leq=False), order = 'TARGETID') + try: + ObsFlagList = np.column_stack((ObsFlagList,MTL['NUMOBS'] > 0.5)) + except: + log.info('This message should appear once, only for the first realization.') + ObsFlagList = MTL['NUMOBS'] > 0.5 + if verbose or debug: + log.info(ObsFlagList.shape) + ObsArr = np.sum(ObsFlagList, axis = 1) + + + #MTLList[i] = rfn.append_fields(MTLList[i], 'OBSFLAG', MTLList[i]['NUMOBS'] > 0, dtypes=np.dtype(bool)) + + hist, bins = np.histogram(ObsArr, bins = np.arange(ndirs)+ 0.1) + + plt.figure() + plt.plot(bins[1:]- 0.01, hist) + plt.xlabel('Number of Realizations in which a target was observed') + plt.ylabel('Number of targets') + #plt.yscale('log') + if len(hplist )> 100: + plt.ylim(0, 8000) + elif (len(hplist) > 4) & (obscon == 'dark'): + plt.ylim(0, 1500) + elif (len(hplist) > 4) & (obscon == 'bright'): + plt.ylim(0, 500) + if not (outFileName is None): + plt.savefig(outFileName + '_vsNtarget' + outFileType) + if not jupyter: + plt.close() + plt.figure() + plt.scatter(MTL['RA'][ObsArr > 0], MTL['DEC'][ObsArr > 0], c = ObsArr[ObsArr > 0], s = 0.1) + plt.xlabel('RA') + plt.ylabel('DEC') + cbar = plt.colorbar() + cbar.set_label('Number of Realizations in which target was observed') + if not (outFileName is None): + plt.savefig(outFileName + '_vsRADEC' + outFileType ) + if not jupyter: + plt.close() + +#@profile +def makeBitweights(mtlBaseDir, ndirs = 64, hplist = None, obscon = 'dark', survey = 'sv3', debug = False, obsprob = False, splitByReal = False, verbose = False): + """Takes a set of {ndirs} realizations of DESI/SV3 and converts their MTLs into bitweights + and an optional PROBOBS, the probability that the target was observed over the realizations + + Parameters + ---------- + mtlBaseDir : :class:`str` + The home directory of your alternate MTLs. Should not contain obscon + or survey. String should be formattable (i.e. '/path/to/dirs/Univ{0:03d}') + ndirs : :class:`int` + The number of alternate realizations to process. + survey : :class:`str`, optional, defaults to "sv3" + Used to look up the correct ledger, in combination with `obscon`. + Options are ``'main'`` and ``'svX``' (where X is 1, 2, 3 etc.) + for the main survey and different iterations of SV, respectively. + obscon : :class:`str`, optional, defaults to "dark" + Used to look up the correct ledger, in combination with `survey`. + Options are ``'dark'`` and ``'bright``' + hplist : :class:`arraylike`, optional, defaults to None + List of healpixels to plot. If None, defaults to plotting all available + healpixels + debug : :class:`bool`, optional, defaults to False + If True, prints extra information showing input observation information + and output bitweight information for the first few targets as well as + the first few targets that were observed in at least one realization + obsprob: class:`bool`, optional, defaults to False + If True, returns TARGETID, BITWEIGHT, and OBSPROB. Else returns TARGETID + and BITWEIGHT only + splitByReal: class:`bool`, optional, defaults to False + If True, run for only a single realization but for all healpixels in hplist. + + Returns + ------- + :class:`~numpy.array` + Array of Target IDs + :class:`~numpy.array` + Array of bitweights for those target ids + :class:`~numpy.array`, optional if obsprob is True + Array of probabilities a target gets observed over {ndirs} realizations + + """ + + TIDs = None + if splitByReal: + + from mpi4py import MPI + if debug or verbose: + log.info('mtlbasedir') + log.info(mtlBaseDir) + log.info(mtlBaseDir.format(0)) + ntar = desitarget.io.read_mtl_in_hp(mtlBaseDir.format(0) + '/' + survey + '/' + obscon, 32, hplist, unique=True, isodate=None, returnfn=False, initial=False, leq=False).shape[0] + + comm = MPI.COMM_WORLD + mpi_procs = comm.size + mpi_rank = comm.rank + if debug or verbose: + log.info('running on {0:d} cores'.format(mpi_procs)) + n_realization = ndirs + realizations = np.arange(ndirs, dtype=np.int32) + my_realizations = np.array_split(realizations, mpi_procs)[mpi_rank] + MyObsFlagList = np.empty((my_realizations.shape[0], ntar), dtype = bool) + #MTL = np.sort(desitarget.io.read_mtl_in_hp(mtldir, 32, hplist, unique=True, isodate=None, returnfn=False, initial=False, leq=False), order = 'TARGETID') + for i, r in enumerate(my_realizations): + mtldir = mtlBaseDir.format(i) + '/' + survey + '/' + obscon + MTL = np.sort(desitarget.io.read_mtl_in_hp(mtldir, 32, hplist, unique=True, isodate=None, returnfn=False, initial=False, leq=False), order = 'TARGETID') + if TIDs is None: + TIDs = MTL['TARGETID'] + else: + assert(np.array_equal(TIDs, MTL['TARGETID'])) + + MyObsFlagList[i][:] = MTL['NUMOBS'] > 0.5 + + ObsFlagList = None + bitweights = None + obsprobs = None + #gather_weights = None + if mpi_rank == 0: + #gather_weights = np.empty(len(bitweights), dtype=bool) + ObsFlagList = np.empty ((ndirs, ntar), dtype = bool) + comm.Gather(MyObsFlagList, ObsFlagList, root=0) + if mpi_rank == 0: + if debug or verbose: + print(ObsFlagList.shape) + ObsArr = np.sum(ObsFlagList, axis = 0) + obsprobs = ObsArr/ndirs + if debug or verbose: + print(np.min(ObsArr)) + print(np.max(ObsArr)) + print("ObsFlagList shape here: {0}".format(ObsFlagList.shape)) + bitweights = pack_bitweights(ObsFlagList.T) + if debug or verbose: + print('bitweights shape here: {0}'.format(bitweights.shape)) + print('TIDs shape here: {0}'.format(TIDs.shape)) + assert(not (TIDs is None)) + if obsprob: + return TIDs, bitweights, obsprobs + else: + return TIDs, bitweights + + else: + ObsFlagList = np.empty(ndirs) + for i in range(ndirs): + mtldir = mtlBaseDir.format(i) + '/' + survey + '/' + obscon + MTL = np.sort(desitarget.io.read_mtl_in_hp(mtldir, 32, hplist, unique=True, isodate=None, returnfn=False, initial=False, leq=False), order = 'TARGETID') + if TIDs is None: + TIDs = MTL['TARGETID'] + else: + assert(np.array_equal(TIDs, MTL['TARGETID'])) + try: + ObsFlagList = np.column_stack((ObsFlagList,MTL['NUMOBS'] > 0.5)) + except: + log.info('hplist[0] = {0:d}'.format(hplist[0])) + log.info('This message should only appear once for the first realization.') + ObsFlagList = MTL['NUMOBS'] > 0.5 + if debug or verbose: + log.info(ObsFlagList.shape) + ObsArr = np.sum(ObsFlagList, axis = 1) + if debug or verbose: + log.info(np.min(ObsArr)) + log.info(np.max(ObsArr)) + bitweights = pack_bitweights(ObsFlagList) + + assert(not (TIDs is None)) + if obsprob: + + obsprobs = ObsArr/ndirs + + return TIDs, bitweights, obsprobs + else: + return TIDs, bitweights + + + + + +def writeBitweights(mtlBaseDir, ndirs = None, hplist = None, debug = False, outdir = None, obscon = "dark", survey = 'sv3', overwrite = False, allFiles = False, splitByReal = False, splitNChunks = None, verbose = False): + """Takes a set of {ndirs} realizations of DESI/SV3 and converts their MTLs into bitweights + and an optional PROBOBS, the probability that the target was observed over the realizations. + Then writes them to (a) file(s) + + Parameters + ---------- + mtlBaseDir : :class:`str` + The home directory of your alternate MTLs. Should not contain obscon + or survey. String should be formattable (i.e. '/path/to/dirs/Univ{0:03d}') + ndirs : :class:`int` + The number of alternate realizations to process. + survey : :class:`str`, optional, defaults to "sv3" + Used to look up the correct ledger, in combination with `obscon`. + Options are ``'main'`` and ``'svX``' (where X is 1, 2, 3 etc.) + for the main survey and different iterations of SV, respectively. + obscon : :class:`str`, optional, defaults to "dark" + Used to look up the correct ledger, in combination with `survey`. + Options are ``'dark'`` and ``'bright``' + hplist : :class:`arraylike`, optional, defaults to None + List of healpixels to plot. If None, defaults to plotting all available + healpixels + debug : :class:`bool`, optional, defaults to False + If True, prints extra information showing input observation information + and output bitweight information for the first few targets as well as + the first few targets that were observed in at least one realization + obsprob: class:`bool`, optional, defaults to False + If True, returns TARGETID, BITWEIGHT, and OBSPROB. Else returns TARGETID + and BITWEIGHT only + outdir : :class:`str`, optional, defaults to None + The base directory in which to create the BitweightFiles output directory. + If None, defaults to one level above mtlBaseDir + overwrite: class:`bool`, optional, defaults to False + If True, will clobber already existing bitweight files. + ***Fix this Option to autograb all tiles*** + allfiles: class:`bool`, optional, defaults to False + If True, do not generate a bitweight file for each healpixel, but generate + one "allTiles" file for the combination of healpixels + splitByReal: class:`bool`, optional, defaults to False + If True, run for only a single realization but for all healpixels in hplist + + + Returns + ------- + :class:`~numpy.array` + Array of Target IDs + :class:`~numpy.array` + Array of bitweights for those target ids + :class:`~numpy.array`, optional if obsprob is True + Array of probabilities a target gets observed over {ndirs} realizations + + """ + if outdir is None: + log.info('No outdir provided') + outdir = mtlBaseDir.split('/')[:-1] + log.info('autogenerated outdir') + log.info(outdir) + if splitByReal: + from mpi4py import MPI + comm = MPI.COMM_WORLD + mpi_procs = comm.size + mpi_rank = comm.rank + if mpi_rank == 0: + if not os.path.exists(outdir + '/BitweightFiles/' + survey + '/' + obscon): + os.makedirs(outdir + '/BitweightFiles/' + survey + '/' + obscon) + elif not os.path.exists(outdir + '/BitweightFiles/' + survey + '/' + obscon): + os.makedirs(outdir + '/BitweightFiles/' + survey + '/' + obscon) + if type(hplist) == int: + hplist = [hplist] + if allFiles: + hpstring = 'AllTiles' + else: + hpstring = 'hp-' + + for hp in hplist: + hpstring += str(hp) + fn = outdir + '/BitweightFiles/' + survey + '/' + obscon + '/{0}bw-{1}-'.format(survey.lower(), obscon.lower()) + hpstring + '.fits' + # outdir + '/BitweightFiles/' + survey + '/' + obscon + '/{0}bw-{1}-'.format(survey.lower(), obscon.lower()) + hpstring + '.fits' + if (not overwrite) and os.path.exists(fn): + print('overwrite') + print(overwrite) + print('fn') + print(fn) + return None + + if not (splitNChunks is None): + if debug or verbose: + log.info('makeBitweights1') + log.info("splitting into {0} chunks".format(splitNChunks)) + splits = np.array_split(hplist, int(splitNChunks)) + + + for i, split in enumerate(splits): + if debug or verbose: + log.info('split {0}'.format(i)) + log.info(split) + if i == 0: + TIDs, bitweights, obsprobs = makeBitweights(mtlBaseDir, ndirs = ndirs, hplist = split, debug = False, obsprob = True, obscon = obscon, survey = survey, splitByReal = splitByReal) + else: + TIDsTemp, bitweightsTemp, obsprobsTemp = makeBitweights(mtlBaseDir, ndirs = ndirs, hplist = split, debug = False, obsprob = True, obscon = obscon, survey = survey, splitByReal = splitByReal) + + if mpi_rank == 0: + if debug or verbose: + log.info('----') + log.info('mpi_rank: {0}'.format(mpi_rank)) + log.info("TIDs shape: {0}".format(TIDs.shape)) + log.info("bitweights shape: {0}".format(bitweights.shape)) + log.info("obsprobs shape: {0}".format(obsprobs.shape)) + log.info('----') + log.info('mpi_rank: {0}'.format(mpi_rank)) + log.info("TIDsTemp shape: {0}".format(TIDsTemp.shape)) + log.info("bitweightsTemp shape: {0}".format(bitweightsTemp.shape)) + log.info("obsprobsTemp shape: {0}".format(obsprobsTemp.shape)) + TIDs = np.hstack((TIDs, TIDsTemp)) + bitweights = np.vstack((bitweights, bitweightsTemp)) + obsprobs = np.hstack((obsprobs, obsprobsTemp)) + else: + if debug or verbose: + log.info('makeBitweights2') + TIDs, bitweights, obsprobs = makeBitweights(mtlBaseDir, ndirs = ndirs, hplist = hplist, debug = False, obsprob = True, obscon = obscon, survey = survey, splitByReal = splitByReal) + if splitByReal: + if debug or verbose: + log.info('----') + log.info('mpi_rank: {0}'.format(mpi_rank)) + if mpi_rank == 0: + if debug or verbose: + log.info("TIDs shape: {0}".format(TIDs.shape)) + log.info("bitweights shape: {0}".format(bitweights.shape)) + log.info("obsprobs shape: {0}".format(obsprobs.shape)) + data = Table({'TARGETID': TIDs, 'BITWEIGHTS': bitweights, 'PROB_OBS': obsprobs}, + names=['TARGETID', 'BITWEIGHTS', 'PROB_OBS']) + + data.write(fn, overwrite = overwrite) + else: + if debug or verbose: + log.info("TIDs shape: {0}".format(TIDs.shape)) + log.info("bitweights shape: {0}".format(bitweights.shape)) + log.info("obsprobs shape: {0}".format(obsprobs.shape)) + data = Table({'TARGETID': TIDs, 'BITWEIGHTS': bitweights, 'PROB_OBS': obsprobs}, + names=['TARGETID', 'BITWEIGHTS', 'PROB_OBS']) + + data.write(fn, overwrite = overwrite) + +def reprocess_alt_ledger(altmtldir, action, obscon="dark", survey = 'main', zcatdir = None): + """ + Reprocess HEALPixel-split ledgers for targets with new redshifts. + + Parameters + ---------- + hpdirname : :class:`str` + Full path to a directory containing an MTL ledger that has been + partitioned by HEALPixel (i.e. as made by `make_ledger`). + zcat : :class:`~astropy.table.Table`, optional + Redshift catalog table with columns ``TARGETID``, ``NUMOBS``, + ``Z``, ``ZWARN``, ``ZTILEID``, and ``msaddcols`` at the top of + the code for the Main Survey. + obscon : :class:`str`, optional, defaults to "DARK" + A string matching ONE obscondition in the desitarget bitmask yaml + file (i.e. in `desitarget.targetmask.obsconditions`), e.g. "DARK" + Governs how priorities are set using "obsconditions". Basically a + check on whether the files in `hpdirname` are as expected. + + Returns + ------- + :class:`dict` + A dictionary where the keys are the integer TILEIDs and the values + are the TIMESTAMP at which that tile was reprocessed. + + """ + tileid = action['TILEID'] + ts = str(tileid).zfill(6) + FABaseDir = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/' + FAFN = FABaseDir + f'/{ts[0:3]}/fiberassign-{ts}.fits' + + fhtOrig = fitsio.read_header(FAFN) + fadate = fhtOrig['RUNDATE'] + fanite = int(''.join(fadate.split('T')[0].split('-'))) + + hpdirname = altmtldir + f'/{survey.lower()}/{obscon.lower()}/' + + fbadirbase = altmtldir + '/fa/' + survey.upper() + '/' + str(fanite) + '/' + + #if getosubp: + # FAMapName = fbadirbase + '/orig/famap-' + ts + '.pickle' + #else: + FAMapName = fbadirbase + '/famap-' + ts + '.pickle' + with open(FAMapName,'rb') as fl: + (A2RMap, R2AMap) = pickle.load(fl,fix_imports = True) + + #zcat = make_zcat(zcatdir, dateTiles, obscon, survey) + zcatdir = get_zcat_dir(zcatdir) + zcat = make_zcat(zcatdir, [action], obscon, survey, allow_overlaps = True) + log.info('ts = {0}'.format(ts)) + altZCat = makeAlternateZCat(zcat, R2AMap, A2RMap) + + + + #if getosubp: + # FAMapName = fbadirbase + '/orig/famap-' + ts + '.pickle' + #else: + # FAMapName = fbadirbase + '/famap-' + ts + '.pickle' + #with open(FAMapName,'rb') as fl: + # (A2RMapTemp, R2AMapTemp) = pickle.load(fl,fix_imports = True) + t0 = time() + log.info("Reprocessing based on altZCat with {} entries...t={:.1f}s" + .format(len(altZCat), time()-t0)) + + # ADM the output dictionary. + timedict = {} + + # ADM bits that correspond to a "bad" observation in the zwarn_mask. + Mxbad = "BAD_SPECQA|BAD_PETALQA|NODATA" + + # ADM find the general format for the ledger files in `hpdirname`. + # ADM also returning the obsconditions. + fileform, oc = desitarget.io.find_mtl_file_format_from_header(hpdirname, returnoc=True) + # ADM also find the format for any associated override ledgers. + overrideff = desitarget.io.find_mtl_file_format_from_header(hpdirname, + forceoverride=True) + + # ADM check the obscondition is as expected. + if obscon != oc: + msg = "File is type {} but requested behavior is {}".format(oc, obscon) + log.critical(msg) + raise RuntimeError(msg) + + # ADM check the altZCat has unique TARGETID/TILEID combinations. + tiletarg = [str(tt["ZTILEID"]) + "-" + str(tt["TARGETID"]) for tt in zcat] + if len(set(tiletarg)) != len(tiletarg): + msg = "Passed altZCat does NOT have unique TARGETID/TILEID combinations!!!" + log.critical(msg) + raise RuntimeError(msg) + + # ADM record the set of tiles that are being reprocessed. + reproctiles = set(altZCat["ZTILEID"]) + + # ADM read ALL targets from the relevant ledgers. + log.info("Reading (all instances of) targets for {} tiles...t={:.1f}s" + .format(len(reproctiles), time()-t0)) + nside = desitarget.mtl._get_mtl_nside() + theta, phi = np.radians(90-altZCat["DEC"]), np.radians(altZCat["RA"]) + pixnum = hp.ang2pix(nside, theta, phi, nest=True) + pixnum = list(set(pixnum)) + targets = desitarget.io.read_mtl_in_hp(hpdirname, nside, pixnum, unique=False) + + # ADM remove OVERRIDE entries, which should never need reprocessing. + targets, _ = desitarget.mtl.remove_overrides(targets) + + # ADM sort by TIMESTAMP to ensure tiles are listed chronologically. + targets = targets[np.argsort(targets["TIMESTAMP"])] + + # ADM for speed, we only need to work with targets with a altZCat entry. + ntargs = len(targets) + nuniq = len(set(targets["TARGETID"])) + log.info("Read {} targets with {} unique TARGETIDs...t={:.1f}s" + .format(ntargs, nuniq, time()-t0)) + log.info("Limiting targets to {} (unique) TARGETIDs in the altZCat...t={:.1f}s" + .format(len(set(altZCat["TARGETID"])), time()-t0)) + s = set(altZCat["TARGETID"]) + ii = np.array([tid in s for tid in targets["TARGETID"]]) + targets = targets[ii] + nuniq = len(set(targets["TARGETID"])) + log.info("Retained {}/{} targets with {} unique TARGETIDs...t={:.1f}s" + .format(len(targets), ntargs, nuniq, time()-t0)) + + # ADM split off the updated target states from the unobserved states. + _, ii = np.unique(targets["TARGETID"], return_index=True) + unobs = targets[sorted(ii)] + # ADM this should remove both original UNOBS states and any resets + # ADM to UNOBS due to reprocessing data that turned out to be bad. + targets = targets[targets["ZTILEID"] != -1] + # ADM every target should have been unobserved at some point. + if len(set(targets["TARGETID"]) - set(unobs["TARGETID"])) != 0: + msg = "Some targets don't have a corresponding UNOBS state!!!" + log.critical(msg) + raise RuntimeError(msg) + # ADM each target should have only one UNOBS state. + if len(set(unobs["TARGETID"])) != len(unobs["TARGETID"]): + msg = "Passed ledgers have multiple UNOBS states!!!" + log.critical(msg) + raise RuntimeError(msg) + + log.info("{} ({}) targets are in the unobserved (observed) state...t={:.1f}s" + .format(len(unobs), len(targets), time()-t0)) + + # ADM store first-time-through tile order to reproduce processing. + # ADM ONLY WORKS because we sorted by TIMESTAMP, above! + _, ii = np.unique(targets["ZTILEID"], return_index=True) + # ADM remember to sort ii so that the first tiles appear first. + orderedtiles = targets["ZTILEID"][sorted(ii)] + + # ADM assemble a altZCat for all previous and reprocessed observations. + altZCatfromtargs = np.zeros(len(targets), dtype=zcat.dtype) + for col in altZCat.dtype.names: + altZCatfromtargs[col] = targets[col] + # ADM note that we'll retain the TIMESTAMPed order of the old ledger + # ADM entries and new redshifts will (deliberately) be listed last. + allaltZCat = np.concatenate([altZCatfromtargs, altZCat]) + log.info("Assembled a altZCat of {} total observations...t={:.1f}s" + .format(len(allaltZCat), time()-t0)) + + # ADM determine the FINAL observation for each TILED-TARGETID combo. + # ADM must flip first as np.unique finds the FIRST unique entries. + allaltZCat = np.flip(allaltZCat) + # ADM create a unique hash of TILEID and TARGETID. + tiletarg = [str(tt["ZTILEID"]) + "-" + str(tt["TARGETID"]) for tt in allaltZCat] + # ADM find the final unique combination of TILEID and TARGETID. + _, ii = np.unique(tiletarg, return_index=True) + # ADM make sure to retain exact reverse-ordering. + ii = sorted(ii) + # ADM condition on indexes-of-uniqueness and flip back. + allaltZCat = np.flip(allaltZCat[ii]) + log.info("Found {} final TARGETID/TILEID combinations...t={:.1f}s" + .format(len(allaltZCat), time()-t0)) + + # ADM mock up a dictionary of timestamps in advance. This is faster + # ADM as no delays need to be built into the code. + now = get_utc_date(survey="main") + timestamps = {t: desitarget.mtl.add_to_iso_date(now, s) for s, t in enumerate(orderedtiles)} + + # ADM make_mtl() expects altZCats to be in Table form. + allaltZCat = Table(allaltZCat) + # ADM a merged target list to track and record the final states. + mtl = Table(unobs) + # ADM to hold the final list of updates per-tile. + donemtl = [] + + # ADM loop through the tiles in order and update the MTL state. + for tileid in orderedtiles: + # ADM the timestamp for this tile. + timestamp = timestamps[tileid] + + # ADM restrict to the observations on this tile. + altZCatmini = allaltZCat[allaltZCat["ZTILEID"] == tileid] + # ADM check there are only unique TARGETIDs on each tile! + if len(set(altZCatmini["TARGETID"])) != len(altZCatmini): + msg = "There are duplicate TARGETIDs on tile {}".format(tileid) + log.critical(msg) + raise RuntimeError(msg) + + # ADM update NUMOBS in the altZCat using previous MTL totals. + mii, zii = desitarget.geomask.match(mtl["TARGETID"], altZCatmini["TARGETID"]) + altZCatmini["NUMOBS"][zii] = mtl["NUMOBS"][mii] + 1 + + # ADM restrict to just objects in the altZCat that match an UNOBS + # ADM target (i,e that match something in the MTL). + log.info("Processing {}/{} observations from altZCat on tile {}...t={:.1f}s" + .format(len(zii), len(altZCatmini), tileid, time()-t0)) + log.info("(i.e. removed secondaries-if-running-primaries or vice versa)") + altZCatmini = altZCatmini[zii] + + # ADM ------ + # ADM NOTE: We could use trimtozcat=False without matching, and + # ADM just continually update the overall mtl list. But, make_mtl + # ADM doesn't track NUMOBS just NUMOBS_MORE, so we need to add + # ADM complexity somewhere, hence trimtozcat=True/matching-back. + # ADM ------ + # ADM push the observations on this tile through MTL. + zmtl = desitarget.mtl.make_mtl(mtl, oc, zcat=altZCatmini, trimtozcat=True, trimcols=True) + + # ADM match back to overall merged target list to update states. + mii, zii = desitarget.geomask.match(mtl["TARGETID"], zmtl["TARGETID"]) + # ADM update the overall merged target list. + for col in mtl.dtype.names: + if col.upper() == 'RA': + continue + elif col.upper() == 'DEC': + continue + mtl[col][mii] = zmtl[col][zii] + # ADM also update the TIMESTAMP for changes on this tile. + mtl["TIMESTAMP"][mii] = timestamp + + # ADM trimtozcat=True discards BAD observations. Retain these. + tidmiss = list(set(altZCatmini["TARGETID"]) - set(zmtl["TARGETID"])) + tii = desitarget.geomask.match_to(altZCatmini["TARGETID"], tidmiss) + zbadmiss = altZCatmini[tii] + # ADM check all of the missing observations are, indeed, bad. + if np.any(zbadmiss["ZWARN"] & zwarn_mask.mask(Mxbad) == 0): + msg = "Some objects skipped by make_mtl() on tile {} are not BAD!!!" + msg = msg.format(tileid) + log.critical(msg) + raise RuntimeError(msg) + log.info("Adding back {} bad observations from altZCat...t={:.1f}s" + .format(len(zbadmiss), time()-t0)) + + # ADM update redshift information in MTL for bad observations. + mii, zii = desitarget.geomask.match(mtl["TARGETID"], zbadmiss["TARGETID"]) + # ADM update the overall merged target list. + # ADM Never update NUMOBS or NUMOBS_MORE using bad observations. + for col in set(zbadmiss.dtype.names) - set(["NUMOBS", "NUMOBS_MORE", "RA", "DEC"]): + if col.upper() == 'RA': + continue + elif col.upper() == 'DEC': + continue + mtl[col][mii] = zbadmiss[col][zii] + # ADM also update the TIMESTAMP for changes on this tile. + mtl["TIMESTAMP"][mii] = timestamp + + # ADM record the information to add to the output ledgers... + donemtl.append(mtl[mtl["ZTILEID"] == tileid]) + + # ADM if this tile was actually reprocessed (rather than being a + # ADM later overlapping tile) record the TIMESTAMP... + if tileid in reproctiles: + timedict[tileid] = timestamp + + # ADM collect the results. + mtl = Table(np.concatenate(donemtl)) + + # ADM re-collect everything on pixels for writing to ledgers. + nside = desitarget.mtl._get_mtl_nside() + theta, phi = np.radians(90-mtl["DEC"]), np.radians(mtl["RA"]) + pixnum = hp.ang2pix(nside, theta, phi, nest=True) + + # ADM loop through the pixels and update the ledger, depending + # ADM on whether we're working with .fits or .ecsv files. + ender = get_mtl_ledger_format() + for pix in set(pixnum): + # ADM grab the targets in the pixel. + ii = pixnum == pix + mtlpix = mtl[ii] + + # ADM the correct filenames for this pixel number. + fn = fileform.format(pix) + overfn = overrideff.format(pix) + + # ADM if an override ledger exists, update it and recover its + # ADM relevant MTL entries. + if os.path.exists(overfn): + overmtl = process_overrides(overfn) + # ADM add any override entries TO THE END OF THE LEDGER. + mtlpix = vstack([mtlpix, overmtl]) + + # ADM if we're working with .ecsv, simply append to the ledger. + if ender == 'ecsv': + f = open(fn, "a") + astropy.io.ascii.write(mtlpix, f, format='no_header', formats=mtlformatdict) + f.close() + # ADM otherwise, for FITS, we'll have to read in the whole file. + else: + ledger, hd = fitsio.read(fn, extname="MTL", header=True) + done = np.concatenate([ledger, mtlpix.as_array()]) + fitsio.write(fn+'.tmp', done, extname='MTL', header=hd, clobber=True) + os.rename(fn+'.tmp', fn) + retval = write_amtl_tile_tracker(altmtldir, [action], obscon = obscon, survey = survey) + return timedict + + +def write_amtl_tile_tracker(dirname, tiles, obscon = 'dark', survey = 'main'): + """Write AMTL Processing times into TileTrackers + + Parameters + ---------- + dirname : :class:`str` + The path to the AMTL directory. + tiles : :class`astropy.table or numpy.recarray` + The tiles which were processed in this AMTL loop iteration + timestamp : :class:`str` + the time at which the AMTL updates were performed + obscon : :class:`str` + The observing conditions of the tiles that were processed. "dark" or "bright" + survey : :class:`str` + The survey of the tiles that were processed. "main" or "sv3" + + Returns + ------- + :class:`int` + The number of targets that were written to file. + :class:`str` + The name of the file to which targets were written. + """ + #if len(tiles) == 1: + # tiles = [tiles] + TileTrackerFN = makeTileTrackerFN(dirname, survey, obscon) + log.info(TileTrackerFN) + if os.path.isfile(TileTrackerFN): + TileTracker = Table.read(TileTrackerFN, format = 'ascii.ecsv') + + #if mode.lower() == 'update': + # dateKey = 'ALTARCHIVEDATE' + #elif mode.lower() == 'fa': + # dateKey = 'ALTFADATE' + #elif mode.lower() == 'endofday': + # TileTracker.meta['Today'] = today + # TileTracker.write(TileTrackerFN, format = 'ascii.ecsv', overwrite = True) + # return 'only wrote today in metadata' + for t in tiles: + log.info('t = {0}'.format(t)) + tileid = t['TILEID'] + #reprocFlag = t['REPROCFLAG'] + actionType = t['ACTIONTYPE'] + cond = (TileTracker['TILEID'] == tileid) & (TileTracker['ACTIONTYPE'] == actionType) + log.info('for tile {0}, number of matching tiles = {1}'.format(tileid, np.sum(cond))) + #debugTrap = np.copy(TileTracker[dateKey]) + TileTracker['DONEFLAG'][cond] = True + + assert(not (np.all(np.invert(TileTracker['DONEFLAG'])))) + + #if mode == 'update': + # todaysTiles = TileTracker[TileTracker['ORIGMTLDATE'] == today] + # #if np.sum(todaysTiles['ALTARCHIVEDATE'] == None) == 0: + + TileTracker.write(TileTrackerFN, format = 'ascii.ecsv', overwrite = True) + return 'done' diff --git a/scripts/mock_tools/add_extra_realizations.py b/scripts/mock_tools/add_extra_realizations.py new file mode 100644 index 000000000..e5d2ed42d --- /dev/null +++ b/scripts/mock_tools/add_extra_realizations.py @@ -0,0 +1,28 @@ +import numpy as np +from astropy.table import Table,vstack +import os + +program = 'dark' + +rmin = 0 +rmax = 256 + +#path = '/pscratch/sd/a/acarnero/test_main/altmtl{MOCKNUM}/Univ000' +path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl0_R256/Univ{MOCKNUM}' + +extratiles = Table.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/aux_data/extra_{PROGRAM}.ecsv'.format(PROGRAM = program), format='ascii.ecsv') + +tileref = extratiles['TILEID'][-1] +print(tileref) + +for i in range(rmin, rmax): + input_track = os.path.join(path, 'mainsurvey-{PRG}obscon-TileTracker.ecsv').format(MOCKNUM = "%03d" % i, PRG=program.upper()) + tiles = Table.read(input_track, format='ascii.ecsv') + tiles.meta['amtldir'] = path.format(MOCKNUM = i) + if tiles['TILEID'][-1] != tileref: + print('merging for mock', i) + newtable = vstack([tiles, extratiles]) + newtable.meta = tiles.meta + newtable.write(input_track, overwrite=True) + else: + print(i, 'it has already been merged') diff --git a/scripts/mock_tools/add_extra_tilesTracker.py b/scripts/mock_tools/add_extra_tilesTracker.py new file mode 100644 index 000000000..4b814949a --- /dev/null +++ b/scripts/mock_tools/add_extra_tilesTracker.py @@ -0,0 +1,28 @@ +import numpy as np +from astropy.table import Table,vstack +import os + +program = 'dark' + +rmin = 10 +rmax = 11 + +path = '/pscratch/sd/a/acarnero/test_main/altmtl{MOCKNUM}/Univ000' +#path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM}/Univ000' + +extratiles = Table.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/aux_data/extra_{PROGRAM}.ecsv'.format(PROGRAM = program), format='ascii.ecsv') + +tileref = extratiles['TILEID'][-1] +print(tileref) + +for i in range(rmin, rmax): + input_track = os.path.join(path, 'mainsurvey-{PRG}obscon-TileTracker.ecsv').format(MOCKNUM = i, PRG=program.upper()) + tiles = Table.read(input_track, format='ascii.ecsv') + tiles.meta['amtldir'] = path.format(MOCKNUM = i) + if tiles['TILEID'][-1] != tileref: + print('merging for mock', i) + newtable = vstack([tiles, extratiles]) + newtable.meta = tiles.meta + newtable.write(input_track, overwrite=True) + else: + print(i, 'it has already been merged') diff --git a/scripts/mock_tools/mkCat_SecondGen_amtl.py b/scripts/mock_tools/mkCat_SecondGen_amtl.py index 57a5a5fa9..9022603d9 100644 --- a/scripts/mock_tools/mkCat_SecondGen_amtl.py +++ b/scripts/mock_tools/mkCat_SecondGen_amtl.py @@ -48,6 +48,7 @@ def test_dir(value): parser.add_argument("--mockver", help="type of mock to use",default='ab_firstgen') parser.add_argument("--mocknum", help="number for the realization",default=1,type=int) +parser.add_argument("--ccut", help="extra-cut",default=None) parser.add_argument("--base_output", help="base directory for output",default=os.getenv('SCRATCH')+'/SecondGen/') parser.add_argument("--outmd", help="whether to write in scratch",default='scratch') parser.add_argument("--targDir", help="base directory for target file",default=None) @@ -183,8 +184,8 @@ def test_dir(value): if args.tracer != 'dark' and args.tracer != 'bright': if args.tracer == 'BGS_BRIGHT': bit = targetmask.bgs_mask[args.tracer] - desitarg='DESI_TARGET' - ##desitarg='BGS_TARGET' + #desitarg='DESI_TARGET' + desitarg='BGS_TARGET' else: bit = targetmask.desi_mask[args.tracer] desitarg='DESI_TARGET' @@ -210,7 +211,11 @@ def test_dir(value): #if using alt MTL that should have ZWARN_MTL, put that in here asn['ZWARN_MTL'] = np.copy(asn['ZWARN']) print('entering common.combtiles_wdup_altmtl for FAVAIL') - pa = common.combtiles_wdup_altmtl('FAVAIL', tiles, fbadir, os.path.join(outdir, 'datcomb_' + pdir + 'wdup.fits'), tarf, addcols=['TARGETID','RA','DEC','PRIORITY_INIT','DESI_TARGET']) + + cols = ['TARGETID','RA','DEC','PRIORITY_INIT','DESI_TARGET'] + if pdir == 'bright': + cols.append('BGS_TARGET', 'R_MAG_ABS') + pa = common.combtiles_wdup_altmtl('FAVAIL', tiles, fbadir, os.path.join(outdir, 'datcomb_' + pdir + 'wdup.fits'), tarf, addcols=cols) fcoll = os.path.join(lssdir, 'collision_'+pdir+'_mock%d.fits' % mocknum) if args.joindspec == 'y': @@ -448,29 +453,51 @@ def _parfun2(rann): nztl.append('') #fin = os.path.join(dirout, args.tracer + notqso + '_full' + args.use_map_veto + '.dat.fits') #ct.mkclusdat(os.path.join(dirout,args.tracer+notqso),tp=args.tracer,dchi2=None,tsnrcut=0,zmin=zmin,zmax=zmax)#,ntilecut=ntile) - ct.mkclusdat(os.path.join(readdir, args.tracer + notqso), tp = args.tracer, dchi2 = None, tsnrcut = 0, zmin = zmin, zmax = zmax, use_map_veto = args.use_map_veto,subfrac=subfrac,zsplit=zsplit)#,ntilecut=ntile,ccut=ccut) + + if args.ccut is not None: + targets = Table(fitsio.read(os.path.join(args.targDir, 'forFA{MOCKNUM}.fits').format(MOCKNUM=mocknum).replace('global','dvs_ro'), columns=['TARGETID', 'R_MAG_ABS'])) + ffile = Table.read(os.path.join(readdir, args.tracer + notqso + '_full'+args.use_map_veto + '.dat.fits').replace('global','dvs_ro')) + if 'R_MAG_ABS' not in ffile.columns: + nm = Table(join(ffile, targets, keys=['TARGETID'])) + #print(nm) + common.write_LSS(nm, os.path.join(readdir, args.tracer + notqso + '_full'+args.use_map_veto + '.dat.fits')) + #nm.write(ffile, overwrite=True) + + + ct.mkclusdat(os.path.join(readdir, args.tracer + notqso), tp = args.tracer, dchi2 = None, tsnrcut = 0, zmin = zmin, zmax = zmax, use_map_veto = args.use_map_veto,subfrac=subfrac,zsplit=zsplit, ismock=True, ccut=args.ccut)#,ntilecut=ntile,ccut=ccut) #ct.mkclusdat(os.path.join(dirout, args.tracer + notqso), tp = args.tracer, dchi2 = None, splitNS='y', tsnrcut = 0, zmin = zmin, zmax = zmax, use_map_veto = args.use_map_veto)#,ntilecut=ntile,ccut=ccut) print('*** END WITH MKCLUSDAT ***') +finaltracer = args.tracer + notqso #+ '_' +if args.tracer[:3] == 'BGS': + if args.ccut is not None: + finaltracer = args.tracer + str(args.ccut) #+ '_' + rcols=['Z','WEIGHT','WEIGHT_SYS','WEIGHT_COMP','WEIGHT_ZFAIL','TARGETID_DATA'] if args.mkclusran == 'y': print('--- START MKCLUSRAN ---') if len(nztl) == 0: nztl.append('') - + tsnrcol = 'TSNR2_ELG' if args.tracer[:3] == 'BGS': tsnrcol = 'TSNR2_BGS' - fl = os.path.join(dirout, args.tracer + notqso + '_') + if args.ccut is not None: + for rn in range(rannum[0], rannum[1]): + if not os.path.isfile('%s%s_%d_full_HPmapcut.ran.fits'% (os.path.join(dirout, args.tracer), str(args.ccut), rn)): + os.system('cp %s_%d_full_HPmapcut.ran.fits %s%s_%d_full_HPmapcut.ran.fits' %(os.path.join(dirout, args.tracer), rn, os.path.join(dirout, args.tracer), str(args.ccut), rn)) + #print('cp %s_%d_full_HPmapcut.ran.fits %s%s_%d_full_HPmapcut.ran.fits' %(os.path.join(dirout, args.tracer), rn, os.path.join(dirout, args.tracer), str(args.ccut), rn)) + os.system('cp %s_frac_tlobs.fits %s%s_frac_tlobs.fits' %(os.path.join(dirout, args.tracer), os.path.join(dirout, args.tracer), str(args.ccut))) + fl = os.path.join(dirout, finaltracer) + '_' print('adding tlobs to randoms with ', fl) clus_arrays = [fitsio.read(fl.replace('global','dvs_ro')+'clustering.dat.fits')] global _parfun4 def _parfun4(rann): #ct.add_tlobs_ran(fl, rann, hpmapcut = args.use_map_veto) - ct.mkclusran(os.path.join(readdir, args.tracer + notqso + '_'), os.path.join(dirout, args.tracer + notqso + '_'), rann, rcols = rcols, tsnrcut = -1, tsnrcol = tsnrcol, use_map_veto = args.use_map_veto,clus_arrays=clus_arrays,add_tlobs='y')#,ntilecut=ntile,ccut=ccut) + ct.mkclusran(os.path.join(readdir, finaltracer) + '_', os.path.join(dirout, finaltracer) + '_', rann, rcols = rcols, tsnrcut = -1, tsnrcol = tsnrcol, use_map_veto = args.use_map_veto,clus_arrays=clus_arrays,add_tlobs='y')#,ntilecut=ntile,ccut=ccut) #ct.mkclusran(os.path.join(dirout, args.tracer + notqso + '_'), os.path.join(dirout, args.tracer + notqso + '_'), rann, rcols = rcols, nosplit='n', tsnrcut = 0, tsnrcol = tsnrcol, use_map_veto = args.use_map_veto)#,ntilecut=ntile,ccut=ccut) #for clustering, make rannum start from 0 if args.par == 'n': @@ -490,7 +517,8 @@ def _parfun4(rann): print('*** END WITH MKCLUSRAN ***') nproc = 18 -fb = os.path.join(dirout, tracer_clus) +fb = os.path.join(dirout, finaltracer) +##fb = os.path.join(dirout, tracer_clus) nran = rx-rm if args.nz == 'y': #this calculates the n(z) and then adds nbar(completeness) and FKP weights to the catalogs diff --git a/scripts/mock_tools/prepare_mocks_Y1_bright.py b/scripts/mock_tools/prepare_mocks_Y1_bright.py index e386a1810..0b7e2c4fc 100644 --- a/scripts/mock_tools/prepare_mocks_Y1_bright.py +++ b/scripts/mock_tools/prepare_mocks_Y1_bright.py @@ -263,6 +263,10 @@ def mask_secondgen(nz=0, foot=None, nz_lop=0): print('size of BRIGHT', len(dat_bright)) print('size of FAINT', len(dat_faint)) + + + + dat_bright['BGS_TARGET'] = 2**1 dat_faint['BGS_TARGET'] = 2**0 @@ -274,12 +278,16 @@ def mask_secondgen(nz=0, foot=None, nz_lop=0): datat.append(dat_bright) + SubFracFaint=0.556 + ran_faint = np.random.uniform(size = len(dat_faint)) + dat_faint_subfrac = dat_faint[(ran_faint<=SubFracFaint)] + PromoteFracBGSFaint=0.2 - ran_hip = np.random.uniform(size = len(dat_faint)) + ran_hip = np.random.uniform(size = len(dat_faint_subfrac)) - dat_faint_f = dat_faint[(ran_hip>PromoteFracBGSFaint)] - dat_faint_hip = dat_faint[(ran_hip<=PromoteFracBGSFaint)] + dat_faint_f = dat_faint_subfrac[(ran_hip>PromoteFracBGSFaint)] + dat_faint_hip = dat_faint_subfrac[(ran_hip<=PromoteFracBGSFaint)] dat_faint_hip['BGS_TARGET'] += 2**3 diff --git a/scripts/mock_tools/prepare_script_bright.sh b/scripts/mock_tools/prepare_script_bright.sh new file mode 100755 index 000000000..64b7f3454 --- /dev/null +++ b/scripts/mock_tools/prepare_script_bright.sh @@ -0,0 +1,9 @@ +#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 1 --realmax 4 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 4 --realmax 7 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 0 --realmax 4 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 11 --realmax 14 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 7 --realmax 11 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 14 --realmax 17 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 17 --realmax 21 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 21 --realmax 23 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 23 --realmax 25 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 From 5ae546ceda6cca2fd74949fda4e83ecb76a3f4bc Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 18 Jan 2024 12:26:04 -0500 Subject: [PATCH 046/297] Update cattools.py --- py/LSS/main/cattools.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index f27876ab3..e88afeb51 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -3607,6 +3607,9 @@ def mkclusdat(fl,weighttileloc=True,zmask=False,tp='',dchi2=9,tsnrcut=80,rcut=No kl.append('WEIGHT_SN') if 'WEIGHT_RF' in cols: kl.append('WEIGHT_RF') + if 'WEIGHT_IMLIN' in cols: + kl.append('WEIGHT_RF') + if tp[:3] == 'BGS': #ff['flux_r_dered'] = ff['FLUX_R']/ff['MW_TRANSMISSION_R'] #kl.append('flux_r_dered') @@ -3717,17 +3720,20 @@ def add_tlobs_ran_array(ranf,tlf): nt = 0 utls = np.unique(ranf['TILES']) gtls = np.isin(utls,tlf['TILES']) + nnf = 0 for tls in ranf['TILES']: try: fr = tldic[tls] except: - fr = 0 + fr = 1 + nnf += 1 tlarray.append(fr) if nt%100000 == 0: print(nt,len(ranf)) nt += 1 tlarray = np.array(tlarray) sel = tlarray == 0 + print('number of tiles not found in the data '+str(nnf)) print(len(tlarray[sel]),' number with 0 frac') ranf['FRAC_TLOBS_TILES'] = tlarray return ranf From 52841148576008adc73beecf8307988dcb655f5e Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 18 Jan 2024 16:55:50 -0500 Subject: [PATCH 047/297] fix altmtl v3_1 LSS catalogs --- scripts/mock_tools/abv31amtl_cat_sbatch.sh | 4 ++-- scripts/mock_tools/run1_AMTLmock_LSS_3_1fix.sh | 9 +++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) create mode 100755 scripts/mock_tools/run1_AMTLmock_LSS_3_1fix.sh diff --git a/scripts/mock_tools/abv31amtl_cat_sbatch.sh b/scripts/mock_tools/abv31amtl_cat_sbatch.sh index 4b181c381..1e0356cf1 100755 --- a/scripts/mock_tools/abv31amtl_cat_sbatch.sh +++ b/scripts/mock_tools/abv31amtl_cat_sbatch.sh @@ -3,10 +3,10 @@ #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu -#SBATCH --array=2-24 +#SBATCH --array=1-24 #SBATCH --account=desi source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main PYTHONPATH=$PYTHONPATH:$HOME/LSS/py -srun scripts/mock_tools/run1_AMTLmock_LSS_3_1.sh $SLURM_ARRAY_TASK_ID \ No newline at end of file +srun scripts/mock_tools/run1_AMTLmock_LSS_3_1fix.sh $SLURM_ARRAY_TASK_ID \ No newline at end of file diff --git a/scripts/mock_tools/run1_AMTLmock_LSS_3_1fix.sh b/scripts/mock_tools/run1_AMTLmock_LSS_3_1fix.sh new file mode 100755 index 000000000..79de8cfb8 --- /dev/null +++ b/scripts/mock_tools/run1_AMTLmock_LSS_3_1fix.sh @@ -0,0 +1,9 @@ +#!/bin/bash +OUTBASE='/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl{MOCKNUM}' + +python scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer ELG_LOP --notqso y --minr 0 --maxr 18 --use_map_veto '_HPmapcut' --mkclusran y --nz y --mkclusdat y --splitGC y --outmd 'notscratch' + +python scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer LRG --notqso n --minr 0 --maxr 18 --use_map_veto '_HPmapcut' --mkclusran y --nz y --mkclusdat y --splitGC y --outmd 'notscratch' + +python scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer QSO --notqso n --minr 0 --maxr 18 --use_map_veto '_HPmapcut' --mkclusran y --nz y --mkclusdat y --splitGC y --outmd 'notscratch' + From da9833f166c3c604ec33396b7e4be3813f780067 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 19 Jan 2024 15:16:13 -0500 Subject: [PATCH 048/297] Update cattools.py --- py/LSS/main/cattools.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index 49a0fbdc4..ae4c0c398 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -3641,8 +3641,14 @@ def mkclusdat(fl,weighttileloc=True,zmask=False,tp='',dchi2=9,tsnrcut=80,rcut=No ff = ff[sel] wn = ff['PHOTSYS'] == 'N' - - ff.keep_columns(kl) + kll = [] + data_cols = list(ff.dtype.names) + for name in kl: + if name is in data_cols: + kll.append(name) + else: + print(name+' not found in input and will not be in clustering catalog') + ff.keep_columns(kll) print('minimum,maximum weight') print(np.min(ff['WEIGHT']),np.max(ff['WEIGHT'])) From 6059c81465ee42ad2d49637247265a216254db09 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 19 Jan 2024 15:18:38 -0500 Subject: [PATCH 049/297] Update LSSpipe_Y1.txt --- Sandbox/LSSpipe_Y1.txt | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/Sandbox/LSSpipe_Y1.txt b/Sandbox/LSSpipe_Y1.txt index 0248651ef..9ffd092de 100644 --- a/Sandbox/LSSpipe_Y1.txt +++ b/Sandbox/LSSpipe_Y1.txt @@ -226,3 +226,16 @@ python scripts/main/mkCat_main.py --type LRG --basedir /global/cfs/cdirs/desi/su python scripts/main/mkCat_main.py --type ELG_LOP --notqso y --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --add_bitweight y --verspec iron --survey Y1 --version v1 python scripts/main/mkCat_main.py --type QSO --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --add_bitweight y --verspec iron --survey Y1 --version v1 + + +====== Below are steps to make v1.1 + +mkdir /global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/LSScats/v1.1/ +cp /global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/LSScats/v1/*full* /global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/LSScats/v1.1/ + +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type ELG_LOP --notqso y --fulld n --survey Y1 --verspec iron --version v1.1 --clusd y --clusran y --splitGC y --nz y --par y --imsys_colname WEIGHT_SN --basedir /global/cfs/cdirs/desi/survey/catalogs/ + +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type LRG --fulld n --survey Y1 --verspec iron --version v1.1 --clusd y --clusran y --splitGC y --nz y --par y --imsys_colname WEIGHT_IMLIN --basedir /global/cfs/cdirs/desi/survey/catalogs/ + +add regressis weights to BGS_BRIGHT-21.5: +python scripts/main/mkCat_main.py --type BGS_BRIGHT-21.5 --version v1 --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --regressis y --add_regressis y --survey Y1 --verspec iron --imsys_zbin y From 31d43095eac28824dd990222d5a03b134d8898ea Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 19 Jan 2024 15:22:23 -0500 Subject: [PATCH 050/297] Update cattools.py --- py/LSS/main/cattools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index ae4c0c398..fe8b92fe3 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -3644,7 +3644,7 @@ def mkclusdat(fl,weighttileloc=True,zmask=False,tp='',dchi2=9,tsnrcut=80,rcut=No kll = [] data_cols = list(ff.dtype.names) for name in kl: - if name is in data_cols: + if name in data_cols: kll.append(name) else: print(name+' not found in input and will not be in clustering catalog') From e6291d8ce69fe0164bc651bb72f1d3093cd75d7b Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 19 Jan 2024 15:39:24 -0500 Subject: [PATCH 051/297] Update validation_cl.py --- scripts/validation/validation_cl.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scripts/validation/validation_cl.py b/scripts/validation/validation_cl.py index 739cca0b8..a2c38914b 100644 --- a/scripts/validation/validation_cl.py +++ b/scripts/validation/validation_cl.py @@ -16,6 +16,7 @@ parser.add_argument("--version", help="catalog version",default='test') parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='Y1') parser.add_argument("--tracers", help="all runs all for given survey",default='all') +parser.add_argument("--use_map_veto",help="string to add on the end of full file reflecting if hp maps were used to cut",default='_HPmapcut') parser.add_argument("--verspec",help="version for redshifts",default='iron') parser.add_argument("--data",help="LSS or mock directory",default='LSS') parser.add_argument("--ps",help="point size for density map",default=1,type=float) @@ -144,8 +145,8 @@ def get_delta(dat,ran,racol='RA',decol='DEC',wts=None,wtspix=None,thresh=0,nest= for tp in tps: print('doing '+tp) - dtf = fitsio.read(indir+tp+zdw+'_full.dat.fits') - ran = fitsio.read(indir+tp+zdw+'_0_full.ran.fits') + dtf = fitsio.read(indir+tp+zdw+'_full'+args.use_map_veto+'.dat.fits') + ran = fitsio.read(indir+tp+zdw+'_0_full'+args.use_map_veto+'.ran.fits') fnreg = indir+'/regressis_data/main_'+tp+'_256/RF/main_'+tp+'_imaging_weight_256.npy' rfw = np.load(fnreg,allow_pickle=True) maskreg = rfw.item()['mask_region'] From 209495dbc4928d6f7fb3c3c1d96eb0b51f21a399 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 19 Jan 2024 15:40:43 -0500 Subject: [PATCH 052/297] Update validation_cl.py --- scripts/validation/validation_cl.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/validation/validation_cl.py b/scripts/validation/validation_cl.py index a2c38914b..69ca281d7 100644 --- a/scripts/validation/validation_cl.py +++ b/scripts/validation/validation_cl.py @@ -17,6 +17,7 @@ parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='Y1') parser.add_argument("--tracers", help="all runs all for given survey",default='all') parser.add_argument("--use_map_veto",help="string to add on the end of full file reflecting if hp maps were used to cut",default='_HPmapcut') +parser.add_argument("--weight_col", help="column name for weight",default='WEIGHT_SYS') parser.add_argument("--verspec",help="version for redshifts",default='iron') parser.add_argument("--data",help="LSS or mock directory",default='LSS') parser.add_argument("--ps",help="point size for density map",default=1,type=float) @@ -156,7 +157,7 @@ def get_delta(dat,ran,racol='RA',decol='DEC',wts=None,wtspix=None,thresh=0,nest= sel_gz = common.goodz_infull(tp[:3],dtf) sel_obs = dtf['ZWARN'] != 999999 dtfoz = dtf[sel_obs&sel_gz] - wt = 1./dtfoz['FRACZ_TILELOCID']*dtfoz['WEIGHT_ZFAIL']*dtfoz['WEIGHT_SYS'] + wt = 1./dtfoz['FRACZ_TILELOCID']*dtfoz['WEIGHT_ZFAIL']*dtfoz[args.weight_col] if 'FRAC_TLOBS_TILES' in list(dtfoz.dtype.names): print('using FRAC_TLOBS_TILES') wt *= 1/dtfoz['FRAC_TLOBS_TILES'] From 7c680187c920066d41a5001ad7ba2d249bb69cf5 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 19 Jan 2024 15:49:26 -0500 Subject: [PATCH 053/297] Update validation_cl.py --- scripts/validation/validation_cl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/validation/validation_cl.py b/scripts/validation/validation_cl.py index 69ca281d7..caa6e44fd 100644 --- a/scripts/validation/validation_cl.py +++ b/scripts/validation/validation_cl.py @@ -148,7 +148,7 @@ def get_delta(dat,ran,racol='RA',decol='DEC',wts=None,wtspix=None,thresh=0,nest= print('doing '+tp) dtf = fitsio.read(indir+tp+zdw+'_full'+args.use_map_veto+'.dat.fits') ran = fitsio.read(indir+tp+zdw+'_0_full'+args.use_map_veto+'.ran.fits') - fnreg = indir+'/regressis_data/main_'+tp+'_256/RF/main_'+tp+'_imaging_weight_256.npy' + fnreg = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/LSScats/v0.1/regressis_data/main_LRG_256/RF/main_LRG_imaging_weight_256.npy' #region definitions should be static, could have loaded from regressis code... rfw = np.load(fnreg,allow_pickle=True) maskreg = rfw.item()['mask_region'] From 6e86c76baee3a7888a9fd5ba4284c1bc2e4bce67 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 19 Jan 2024 16:14:54 -0500 Subject: [PATCH 054/297] Update validation_cl.py --- scripts/validation/validation_cl.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/validation/validation_cl.py b/scripts/validation/validation_cl.py index caa6e44fd..57a0cb7b7 100644 --- a/scripts/validation/validation_cl.py +++ b/scripts/validation/validation_cl.py @@ -146,6 +146,10 @@ def get_delta(dat,ran,racol='RA',decol='DEC',wts=None,wtspix=None,thresh=0,nest= for tp in tps: print('doing '+tp) + + dtf_raw = fitsio.read(indir+tp+zdw+'_full'+'.dat.fits') + ran_raw = fitsio.read(indir+tp+zdw+'_0_full'+'.ran.fits') + dtf = fitsio.read(indir+tp+zdw+'_full'+args.use_map_veto+'.dat.fits') ran = fitsio.read(indir+tp+zdw+'_0_full'+args.use_map_veto+'.ran.fits') fnreg = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/LSScats/v0.1/regressis_data/main_LRG_256/RF/main_LRG_imaging_weight_256.npy' #region definitions should be static, could have loaded from regressis code... @@ -179,7 +183,7 @@ def get_delta(dat,ran,racol='RA',decol='DEC',wts=None,wtspix=None,thresh=0,nest= sel_zr = dtfoz['Z_not4clus'] > zmin sel_zr &= dtfoz['Z_not4clus'] < zmax - delta_raw,fsky,frac = get_delta(dtf,ran,maskreg=maskreg) + delta_raw,fsky,frac = get_delta(dtf_raw,ran_raw,maskreg=maskreg) cl_raw = hp.anafast(delta_raw) ell = np.arange(len(cl_raw)) delta_allz,_,_ = get_delta(dtfoz,ran,wts=wt,maskreg=maskreg) From 884353004fb23e96868d875fa8bff05a84bb6442 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 19 Jan 2024 16:26:25 -0500 Subject: [PATCH 055/297] Update validation_cl.py --- scripts/validation/validation_cl.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/validation/validation_cl.py b/scripts/validation/validation_cl.py index 57a0cb7b7..ecf9d18c7 100644 --- a/scripts/validation/validation_cl.py +++ b/scripts/validation/validation_cl.py @@ -206,6 +206,7 @@ def get_delta(dat,ran,racol='RA',decol='DEC',wts=None,wtspix=None,thresh=0,nest= print('doing w(theta)') sel = delta_raw != hp.UNSEEN angl,wth_raw = get_wtheta_auto(sindec[sel],cosdec[sel],sinra[sel],cosra[sel],delta_raw[sel],frac[sel]) + sel = delta_allz != hp.UNSEEN _,wth_allz = get_wtheta_auto(sindec[sel],cosdec[sel],sinra[sel],cosra[sel],delta_allz[sel],frac[sel]) _,wth_zr = get_wtheta_auto(sindec[sel],cosdec[sel],sinra[sel],cosra[sel],delta_zr[sel],frac[sel]) From 6b04030fdbda99d33c256ce43a49926fe57134c3 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 19 Jan 2024 16:43:28 -0500 Subject: [PATCH 056/297] Update LSSpipe_Y1.txt --- Sandbox/LSSpipe_Y1.txt | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/Sandbox/LSSpipe_Y1.txt b/Sandbox/LSSpipe_Y1.txt index 9ffd092de..e366fc863 100644 --- a/Sandbox/LSSpipe_Y1.txt +++ b/Sandbox/LSSpipe_Y1.txt @@ -239,3 +239,15 @@ srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/mai add regressis weights to BGS_BRIGHT-21.5: python scripts/main/mkCat_main.py --type BGS_BRIGHT-21.5 --version v1 --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --regressis y --add_regressis y --survey Y1 --verspec iron --imsys_zbin y + +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type BGS_BRIGHT-21.5 --fulld n --survey Y1 --verspec iron --version v1.1 --clusd y --clusran y --splitGC y --nz y --par y --imsys_colname WEIGHT_IMLIN --basedir /global/cfs/cdirs/desi/survey/catalogs/ + +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type QSO --fulld n --survey Y1 --verspec iron --version v1.1 --clusd y --clusran y --splitGC y --nz y --par y --imsys_colname WEIGHT_RF --basedir /global/cfs/cdirs/desi/survey/catalogs/ + +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type BGS_BRIGHT --fulld n --survey Y1 --verspec iron --version v1.1 --clusd y --clusran y --splitGC y --nz y --par y --basedir /global/cfs/cdirs/desi/survey/catalogs/ + +#get BGS_ANY catalogs that were requested +srun -N 1 -C cpu -t 04:00:00 -q interactive python mkCat_main.py --type BGS_ANY --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --apply_veto y --verspec iron --survey Y1 --version v1.1 + + + From 7a9e782bf821e95ebd838719cee62e2734decc17 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 19 Jan 2024 17:04:48 -0500 Subject: [PATCH 057/297] Update LSSpipe_Y1.txt --- Sandbox/LSSpipe_Y1.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Sandbox/LSSpipe_Y1.txt b/Sandbox/LSSpipe_Y1.txt index e366fc863..f13ce4edc 100644 --- a/Sandbox/LSSpipe_Y1.txt +++ b/Sandbox/LSSpipe_Y1.txt @@ -247,7 +247,10 @@ srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/mai srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type BGS_BRIGHT --fulld n --survey Y1 --verspec iron --version v1.1 --clusd y --clusran y --splitGC y --nz y --par y --basedir /global/cfs/cdirs/desi/survey/catalogs/ #get BGS_ANY catalogs that were requested -srun -N 1 -C cpu -t 04:00:00 -q interactive python mkCat_main.py --type BGS_ANY --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --apply_veto y --verspec iron --survey Y1 --version v1.1 + +# + +srun -N 1 -C cpu -t 04:00:00 -q interactive python scripts/main/mkCat_main.py --type BGS_ANY --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --apply_map_veto y --verspec iron --survey Y1 --version v1.1 From ed8c23e3efe299a80fed470b174ed0f5b60cc9f4 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 19 Jan 2024 17:09:02 -0500 Subject: [PATCH 058/297] Update mkCat_main.py --- scripts/main/mkCat_main.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/main/mkCat_main.py b/scripts/main/mkCat_main.py index 974623641..f2fbce3e6 100644 --- a/scripts/main/mkCat_main.py +++ b/scripts/main/mkCat_main.py @@ -406,6 +406,8 @@ def _parfun(rn): lssmapdir = '/global/cfs/cdirs/desi/survey/catalogs/external_input_maps/' rancatname = dirout+tracer_clus+'_*_full.ran.fits' rancatlist = sorted(glob.glob(rancatname)) + print(dirout) + print(rancatlist) fieldslist = allmapcols masklist = list(np.zeros(len(fieldslist),dtype=int)) From 1f29007a16111a123c8780c5f683e56bd12bd435 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 19 Jan 2024 20:14:26 -0500 Subject: [PATCH 059/297] Update LSSpipe_Y1.txt --- Sandbox/LSSpipe_Y1.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Sandbox/LSSpipe_Y1.txt b/Sandbox/LSSpipe_Y1.txt index f13ce4edc..952af3afd 100644 --- a/Sandbox/LSSpipe_Y1.txt +++ b/Sandbox/LSSpipe_Y1.txt @@ -248,8 +248,10 @@ srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/mai #get BGS_ANY catalogs that were requested -# +#need to make healpix maps +python scripts/main/mkCat_main.py --type BGS_ANY --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --verspec iron --survey Y1 --mkHPmaps y --version v1.1 +#apply healpix map vetos srun -N 1 -C cpu -t 04:00:00 -q interactive python scripts/main/mkCat_main.py --type BGS_ANY --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --apply_map_veto y --verspec iron --survey Y1 --version v1.1 From 9afe24a54705fea0645e7ddb3b4270e9bb351dfc Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Sat, 20 Jan 2024 18:17:18 -0500 Subject: [PATCH 060/297] Update LSSpipe_Y1.txt --- Sandbox/LSSpipe_Y1.txt | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/Sandbox/LSSpipe_Y1.txt b/Sandbox/LSSpipe_Y1.txt index 952af3afd..e756d2016 100644 --- a/Sandbox/LSSpipe_Y1.txt +++ b/Sandbox/LSSpipe_Y1.txt @@ -254,5 +254,16 @@ python scripts/main/mkCat_main.py --type BGS_ANY --basedir /global/cfs/cdirs/des #apply healpix map vetos srun -N 1 -C cpu -t 04:00:00 -q interactive python scripts/main/mkCat_main.py --type BGS_ANY --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --apply_map_veto y --verspec iron --survey Y1 --version v1.1 +#add zfail weights +python scripts/main/mkCat_main.py --type BGS_ANY --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --verspec iron --version v1.1 --add_weight_zfail y --survey Y1 --use_map_veto _HPmapcut +# produce blinded catalogs +module swap pyrecon/main pyrecon/mpi +srun -N 1 -n 128 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/apply_blinding_main_fromfile_fcomp.py --type BGS_BRIGHT-21.5 --wsyscol WEIGHT_IMLIN --version v1.1 --baoblind y --mkclusdat y --mkclusran y --maxr 18 --dorecon y --rsdblind y --fnlblind y --getFKP y --resamp y --mv_out2cfs y + +srun -N 1 -n 128 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/apply_blinding_main_fromfile_fcomp.py --type LRG --wsyscol WEIGHT_IMLIN --version v1.1 --baoblind y --mkclusdat y --mkclusran y --maxr 18 --dorecon y --rsdblind y --fnlblind y --getFKP y --resamp y --mv_out2cfs y + +srun -N 1 -n 128 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/apply_blinding_main_fromfile_fcomp.py --type ELG_LOPnotqso --wsyscol WEIGHT_SN --version v1.1 --baoblind y --mkclusdat y --mkclusran y --maxr 18 --dorecon y --rsdblind y --fnlblind y --getFKP y --resamp y --mv_out2cfs y + +srun -N 1 -n 128 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/apply_blinding_main_fromfile_fcomp.py --type QSO --wsyscol WEIGHT_RF --version v1.1 --baoblind y --mkclusdat y --mkclusran y --maxr 18 --dorecon y --rsdblind y --fnlblind y --getFKP y --resamp y --mv_out2cfs y From 7b83f5bf982fef251a80397970de70da0920aff5 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Sat, 20 Jan 2024 19:17:19 -0500 Subject: [PATCH 061/297] Update mkCat_main.py --- scripts/main/mkCat_main.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/scripts/main/mkCat_main.py b/scripts/main/mkCat_main.py index f2fbce3e6..1ea4f66f1 100644 --- a/scripts/main/mkCat_main.py +++ b/scripts/main/mkCat_main.py @@ -1042,6 +1042,16 @@ def _wrapper(N): #print(np.sum(sel)) dd['WEIGHT_SN'][sel&selz] = hpmap[dpix[sel&selz]] + if tracer_clus == 'ELG_LOPnotqso': + if zl[0] == 0.8: + selz = dd['Z_not4clus'] <= zl[0] + if zl[1] == 1.6: + selz = dd['Z_not4clus'] > zl[1] + dd['WEIGHT_SN'][sel&selz] = hpmap[dpix[sel&selz]] + #assign weights to galaxies outside the z ranges + if tracer_clus == 'ELG_LOPnotqso': + zwl = '0.8_1.1' + #print(np.min(dd['WEIGHT_SYS']),np.max(dd['WEIGHT_SYS']),np.std(dd['WEIGHT_SYS'])) comments = [] comments.append("Using sysnet for WEIGHT_SYS") From a03ae8c22525e5b795094e16805cff9fafdb6993 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 22 Jan 2024 12:53:17 -0500 Subject: [PATCH 062/297] Update LSSpipe_Y1.txt --- Sandbox/LSSpipe_Y1.txt | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Sandbox/LSSpipe_Y1.txt b/Sandbox/LSSpipe_Y1.txt index e756d2016..60e0a2fbc 100644 --- a/Sandbox/LSSpipe_Y1.txt +++ b/Sandbox/LSSpipe_Y1.txt @@ -267,3 +267,13 @@ srun -N 1 -n 128 -C cpu -t 04:00:00 --qos interactive --account desi python scri srun -N 1 -n 128 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/apply_blinding_main_fromfile_fcomp.py --type QSO --wsyscol WEIGHT_RF --version v1.1 --baoblind y --mkclusdat y --mkclusran y --maxr 18 --dorecon y --rsdblind y --fnlblind y --getFKP y --resamp y --mv_out2cfs y +#copy sysnet weights over +cp -r /global/cfs/cdirs/desi/survey/catalogs//Y1/LSS/iron/LSScats/v1//sysnet/ /global/cfs/cdirs/desi/survey/catalogs//Y1/LSS/iron/LSScats/v1.1/ + +#re-add sysnet weights, so that lower/higher redshift z get weights (so blinded data outside of bounds will get weights) +python scripts/main/mkCat_main.py --type ELG_LOP --notqso y --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --verspec iron --version v1.1 --add_sysnet y --survey Y1 --use_map_veto _HPmapcut + +#re-run blinding for ELG_LOP +srun -N 1 -n 128 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/apply_blinding_main_fromfile_fcomp.py --type ELG_LOPnotqso --wsyscol WEIGHT_SN --version v1.1 --baoblind y --mkclusdat y --mkclusran y --maxr 18 --dorecon y --rsdblind y --fnlblind y --getFKP y --resamp y --mv_out2cfs y + + From e23fc7278ce8b6d07bc3882be681adc4c105807f Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 22 Jan 2024 17:26:28 -0500 Subject: [PATCH 063/297] Update ffa2clus_fast.py --- scripts/mock_tools/ffa2clus_fast.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mock_tools/ffa2clus_fast.py b/scripts/mock_tools/ffa2clus_fast.py index e29ebef91..7757376d4 100644 --- a/scripts/mock_tools/ffa2clus_fast.py +++ b/scripts/mock_tools/ffa2clus_fast.py @@ -257,7 +257,7 @@ def apply_imaging_veto(ff,reccircmasks,ebits): subfrac = 0.66 if args.mockver == 'AbacusSummit_v3': subfrac = 0.66 - elif tracer == 'BGS_BRIGHT-21.5': + elif tracer[:3] == 'BGS'#_BRIGHT-21.5': zmin = 0.1 zmax = 0.4 mainp = main(tracer,'iron','Y1') From 5cd76e1523a8626794e90ab569213c982ae3b72f Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 22 Jan 2024 17:27:17 -0500 Subject: [PATCH 064/297] Update ffa2clus_fast.py --- scripts/mock_tools/ffa2clus_fast.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mock_tools/ffa2clus_fast.py b/scripts/mock_tools/ffa2clus_fast.py index 7757376d4..a056a4c8c 100644 --- a/scripts/mock_tools/ffa2clus_fast.py +++ b/scripts/mock_tools/ffa2clus_fast.py @@ -257,7 +257,7 @@ def apply_imaging_veto(ff,reccircmasks,ebits): subfrac = 0.66 if args.mockver == 'AbacusSummit_v3': subfrac = 0.66 - elif tracer[:3] == 'BGS'#_BRIGHT-21.5': + elif tracer[:3] == 'BGS':#_BRIGHT-21.5': zmin = 0.1 zmax = 0.4 mainp = main(tracer,'iron','Y1') From 50a70352438a4d7a4f0ca644dd32988d9326aea3 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 22 Jan 2024 20:40:02 -0500 Subject: [PATCH 065/297] Update testEZcov.py --- scripts/mock_tools/testEZcov.py | 104 ++++++++++++++++++++++++++++++-- 1 file changed, 98 insertions(+), 6 deletions(-) diff --git a/scripts/mock_tools/testEZcov.py b/scripts/mock_tools/testEZcov.py index 43ab0183b..82200f6cf 100644 --- a/scripts/mock_tools/testEZcov.py +++ b/scripts/mock_tools/testEZcov.py @@ -101,28 +101,28 @@ def get_xi_desipipe_ab_baseline(mockn,zr='0.4-0.6',tp='LRG',rec='recon_recsym',n result = np.loadtxt(fn).transpose() return result[0],result[2:5] -def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4]): +def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4],thetacut=''): indmin = smin//4 indmax = smax//4 zr = str(zmin)+'-'+str(zmax) twa = '' if tracer == 'ELG_LOP': twa = 'notqso' - xiave,cov = get_xi_cov_desipipe_baseline_txt(zr=zr,tp=tracer,smin=smin,smax=smax,rec=rec,ells=ells) - sep,xiave_abamtl = get_xiave_desipipe_ab_baseline(zr=zr,tp=tracer+twa,rec=rec,flavor='altmtl',mockversion='v3_1') - _,xiave_abffa = get_xiave_desipipe_ab_baseline(zr=zr,tp=tracer,rec=rec,flavor='ffa',mockversion='v3') + xiave,cov = get_xi_cov_desipipe_baseline_txt(zr=zr,tp=tracer,smin=smin,smax=smax,rec=rec,ells=ells,thetacut=thetacut) + sep,xiave_abamtl = get_xiave_desipipe_ab_baseline(zr=zr,tp=tracer+twa,rec=rec,flavor='altmtl',mockversion='v3_1',thetacut=thetacut) + _,xiave_abffa = get_xiave_desipipe_ab_baseline(zr=zr,tp=tracer,rec=rec,flavor='ffa',mockversion='v3',thetacut=thetacut) xiave_abamtl_cut = cat_ells(xiave_abamtl[:len(ells)],indrange=[indmin,indmax]) xiave_abffa_cut = cat_ells(xiave_abffa[:len(ells)],indrange=[indmin,indmax]) icov = np.linalg.inv(cov) chi2la = [] chi2lf = [] for i in range(0,25): - _,xi_amtl = get_xi_desipipe_ab_baseline(i,zr=zr,tp=tracer+twa,rec=rec,flavor='altmtl',mockversion='v3_1') + _,xi_amtl = get_xi_desipipe_ab_baseline(i,zr=zr,tp=tracer+twa,rec=rec,flavor='altmtl',mockversion='v3_1',thetacut=thetacut) xi_amtl_cut = cat_ells(xi_amtl[:len(ells)],indrange=[indmin,indmax]) damtl = xi_amtl_cut-xiave_abamtl_cut chi2_amtl = np.dot(damtl,np.dot(damtl,icov)) chi2la.append(chi2_amtl) - _,xi_ffa = get_xi_desipipe_ab_baseline(i,zr=zr,tp=tracer,rec=rec,flavor='ffa',mockversion='v3') + _,xi_ffa = get_xi_desipipe_ab_baseline(i,zr=zr,tp=tracer,rec=rec,flavor='ffa',mockversion='v3',thetacut=thetacut) xi_ffa_cut = cat_ells(xi_ffa[:len(ells)],indrange=[indmin,indmax]) dffa = xi_ffa_cut-xiave_abffa_cut chi2_ffa = np.dot(dffa,np.dot(dffa,icov)) @@ -141,6 +141,8 @@ def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4]): titl = tracer+' '+str(zmin)+' Date: Mon, 22 Jan 2024 22:20:38 -0500 Subject: [PATCH 066/297] Update testEZcov.py --- scripts/mock_tools/testEZcov.py | 34 ++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/scripts/mock_tools/testEZcov.py b/scripts/mock_tools/testEZcov.py index 82200f6cf..b3d7c221e 100644 --- a/scripts/mock_tools/testEZcov.py +++ b/scripts/mock_tools/testEZcov.py @@ -201,7 +201,9 @@ def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4],thetacut=''): smin=20 smax=200 -recl = ['recon_recsym',''] + +recl = [''] +thetacut='_thetacut0.05' ellsl = [[0,2,4],[0,2],[0]] tp = 'QSO' @@ -209,11 +211,11 @@ def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4],thetacut=''): for rec in recl: for zr in zrl: for ells in ellsl: - fig = compchi2stats(tp,zr[0],zr[1],smin,smax,rec=rec,ells=ells) + fig = compchi2stats(tp,zr[0],zr[1],smin,smax,rec=rec,ells=ells,thetacut=thetacut) figs.append(fig) outdir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/' -with PdfPages(outdir+'testEZmockcov_'+tp+'_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: +with PdfPages(outdir+'testEZmockcov_'+tp+thetacut+'_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: for fig in figs: pdf.savefig(fig) plt.close() @@ -224,9 +226,9 @@ def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4],thetacut=''): for rec in recl: for zr in zrl: for ells in ellsl: - fig = compchi2stats(tp,zr[0],zr[1],smin,smax,rec=rec,ells=ells) + fig = compchi2stats(tp,zr[0],zr[1],smin,smax,rec=rec,ells=ells,thetacut=thetacut) figs.append(fig) -with PdfPages(outdir+'testEZmockcov_'+tp+'_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: +with PdfPages(outdir+'testEZmockcov_'+tp+thetacut+'_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: for fig in figs: pdf.savefig(fig) plt.close() @@ -237,16 +239,17 @@ def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4],thetacut=''): for rec in recl: for zr in zrl: for ells in ellsl: - fig = compchi2stats(tp,zr[0],zr[1],smin,smax,rec=rec,ells=ells) + fig = compchi2stats(tp,zr[0],zr[1],smin,smax,rec=rec,ells=ells,thetacut=thetacut) figs.append(fig) -with PdfPages(outdir+'testEZmockcov_'+tp+'_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: +with PdfPages(outdir+'testEZmockcov_'+thetacut+tp+'_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: for fig in figs: pdf.savefig(fig) plt.close() -recl = [''] -thetacut='_thetacut0.5' +thetacut='' + +recl = ['recon_recsym',''] ellsl = [[0,2,4],[0,2],[0]] tp = 'QSO' @@ -254,11 +257,11 @@ def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4],thetacut=''): for rec in recl: for zr in zrl: for ells in ellsl: - fig = compchi2stats(tp,zr[0],zr[1],smin,smax,rec=rec,ells=ells,thetacut=thetacut) + fig = compchi2stats(tp,zr[0],zr[1],smin,smax,rec=rec,ells=ells) figs.append(fig) outdir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/' -with PdfPages(outdir+'testEZmockcov_'+tp+thetacut+'_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: +with PdfPages(outdir+'testEZmockcov_'+tp+'_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: for fig in figs: pdf.savefig(fig) plt.close() @@ -269,9 +272,9 @@ def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4],thetacut=''): for rec in recl: for zr in zrl: for ells in ellsl: - fig = compchi2stats(tp,zr[0],zr[1],smin,smax,rec=rec,ells=ells,thetacut=thetacut) + fig = compchi2stats(tp,zr[0],zr[1],smin,smax,rec=rec,ells=ells) figs.append(fig) -with PdfPages(outdir+'testEZmockcov_'+tp+thetacut+'_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: +with PdfPages(outdir+'testEZmockcov_'+tp+'_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: for fig in figs: pdf.savefig(fig) plt.close() @@ -282,10 +285,11 @@ def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4],thetacut=''): for rec in recl: for zr in zrl: for ells in ellsl: - fig = compchi2stats(tp,zr[0],zr[1],smin,smax,rec=rec,ells=ells,thetacut=thetacut) + fig = compchi2stats(tp,zr[0],zr[1],smin,smax,rec=rec,ells=ells) figs.append(fig) -with PdfPages(outdir+'testEZmockcov_'+thetacut+tp+'_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: +with PdfPages(outdir+'testEZmockcov_'+tp+'_smin'+str(smin)+'smax'+str(smax)+'.pdf') as pdf: for fig in figs: pdf.savefig(fig) plt.close() + From 90cd22658576cf5e60036fd96581841000c7b383 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 22 Jan 2024 22:29:37 -0500 Subject: [PATCH 067/297] Update testEZcov.py --- scripts/mock_tools/testEZcov.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/mock_tools/testEZcov.py b/scripts/mock_tools/testEZcov.py index b3d7c221e..fe50e24d7 100644 --- a/scripts/mock_tools/testEZcov.py +++ b/scripts/mock_tools/testEZcov.py @@ -206,6 +206,7 @@ def compchi2stats(tracer,zmin,zmax,smin,smax,rec='',ells=[0,2,4],thetacut=''): thetacut='_thetacut0.05' ellsl = [[0,2,4],[0,2],[0]] +figs = [] tp = 'QSO' zrl = [(0.8,2.1)] for rec in recl: From 7964313654756fdd3e25ea880ad50c0b91e7c1d5 Mon Sep 17 00:00:00 2001 From: Jiaxi-Yu Date: Tue, 23 Jan 2024 04:12:06 -0800 Subject: [PATCH 068/297] added focal-plane SSR chi2 information --- scripts/validation/validation_ssr_plot.py | 43 +++++++++++++++-------- 1 file changed, 29 insertions(+), 14 deletions(-) diff --git a/scripts/validation/validation_ssr_plot.py b/scripts/validation/validation_ssr_plot.py index e6f5da852..6641aed16 100644 --- a/scripts/validation/validation_ssr_plot.py +++ b/scripts/validation/validation_ssr_plot.py @@ -319,28 +319,43 @@ def SSR_chi2(goodz, allz, err): f8l.append(fibf8_dict[fib]) # plot the fibre-wise SSR + fig = plt.figure(figsize=(8,4)) + spec = gridspec.GridSpec(nrows=1,ncols=2, left = 0.1,right = 0.99,bottom=0.12,top = 0.98, wspace=0,width_ratios=[0.85,1]) + ax = np.empty((1,2), dtype=type(plt.axes)) + plt.rc('font', family='serif', size=12) for cp,split in enumerate(['N','S']): + ax[0,cp] = fig.add_subplot(spec[0,cp]) if split == 'N': selection = sel_obs&seln - elif split == 'S': - selection = sel_obs&~seln - selection_gz = selection&selz&gz - - ALL, GOOD, BIN, err, bin = SSR(full, 'FIBER', selection, selection_gz, weights=full['WEIGHT_ZFAIL'][selection_gz], fiberbins=FIB) - ssrmodel = GOOD/ALL - # fibrewise SSR and the correction - plt.scatter(xll,yll,c=ssrmodel/np.nanmean(ssrmodel),s=2,vmin=1-dv,vmax=1+dv) - plt.colorbar() - plt.title(f'fibrewise SSR on {photos[cp]}') - plt.savefig(outdir+'{}_focalplane_success_rate_z{}z{}_{}_{}.png'.format(tp,zmin,zmax,split,args.version)) - plt.close() - - if split == 'N': ssr_wtN = 1./(ssrmodel/np.nanmean(ssrmodel)) ssr_wtN[np.isnan(ssr_wtN)] = 1. elif split == 'S': + selection = sel_obs&~seln ssr_wtS = 1./(ssrmodel/np.nanmean(ssrmodel)) ssr_wtS[np.isnan(ssr_wtS)] = 1. + selection_gz = selection&selz&gz + + ALL, GOOD, BIN, err, _ = SSR(full, 'FIBER', selection, selection_gz, weights=full['WEIGHT_ZFAIL'][selection_gz], fiberbins=FIB) + ssrmodel = GOOD/ALL + ssrmean= np.sum(GOOD)/np.sum(ALL) + # fibrewise SSR and the correction + hb = ax[0,cp].scatter(xll,yll,c=ssrmodel/ssrmean,s=2,vmin=1-dv,vmax=1+dv) + if cp == 1: + cb = fig.colorbar(hb, ax=ax[0,1]) + cb.set_label('rescaled SSR',fontsize=12) + plt.text(-150,410,f'{photos[cp]}',fontsize=15,weight='bold') + plt.yticks(alpha=0) + else: + plt.text(-190,410,f'{photos[cp]}',fontsize=15,weight='bold') + plt.ylabel('Y (mm)') + plt.xlabel('X (mm)') + plt.xlim(-470,470) + plt.ylim(-420,470) + chi2s = (ssrmodel-ssrmean)/err + print(f'chi2 in {split}'+' is {:.1f}/{}'.format(np.sum(chi2s[np.isfinite(chi2s)]**2),np.sum(np.isfinite(ssrmodel)))) + + plt.savefig(outdir+'{}_focalplane_success_rate_z{}z{}_{}_{}.png'.format(tp,zmin,zmax,split,args.version)) + plt.close() #print('ssr_wt',ssr_wt) #print('BIN',list(BIN)) #print('FIB',list(FIB)) From 514089df142cc24a7b5702e1723b13ef6da9c805 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 24 Jan 2024 18:34:53 -0500 Subject: [PATCH 069/297] Update mkCat_main.py --- scripts/main/mkCat_main.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/main/mkCat_main.py b/scripts/main/mkCat_main.py index 1ea4f66f1..876d9c510 100644 --- a/scripts/main/mkCat_main.py +++ b/scripts/main/mkCat_main.py @@ -87,6 +87,7 @@ parser.add_argument("--imsys_zbin",help="if yes, do imaging systematic regressions in z bins",default='y') parser.add_argument("--imsys",help="add weights for imaging systematics using eboss method?",default='n') +parser.add_argument("--nran4imsys",help="number of random files to using for linear regression",default=4,type=int) parser.add_argument("--regressis",help="RF weights for imaging systematics?",default='n') parser.add_argument("--add_regressis",help="add RF weights for imaging systematics?",default='n') @@ -731,7 +732,7 @@ def _wrapper(N): selobs = dat['ZWARN'] != 999999 dat = dat[selgood&selobs] ranl = [] - for i in range(0,1):#int(args.maxr)): + for i in range(0,args.nran4imsys):#int(args.maxr)): ran = fitsio.read(os.path.join(dirout, tpstr+'_'+str(i)+'_full'+args.use_map_veto+'.ran.fits'), columns=['RA', 'DEC','PHOTSYS']) ranl.append(ran) rands = np.concatenate(ranl) From 9a1b23b96a1a46f6f6d337fbd506165a31c2d269 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 24 Jan 2024 18:37:12 -0500 Subject: [PATCH 070/297] Update mkCat_main.py --- scripts/main/mkCat_main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/main/mkCat_main.py b/scripts/main/mkCat_main.py index 876d9c510..db3efd753 100644 --- a/scripts/main/mkCat_main.py +++ b/scripts/main/mkCat_main.py @@ -87,7 +87,7 @@ parser.add_argument("--imsys_zbin",help="if yes, do imaging systematic regressions in z bins",default='y') parser.add_argument("--imsys",help="add weights for imaging systematics using eboss method?",default='n') -parser.add_argument("--nran4imsys",help="number of random files to using for linear regression",default=4,type=int) +parser.add_argument("--nran4imsys",help="number of random files to using for linear regression",default=1,type=int) parser.add_argument("--regressis",help="RF weights for imaging systematics?",default='n') parser.add_argument("--add_regressis",help="add RF weights for imaging systematics?",default='n') From 476004cca18790cdfdb1443e634eaab8c6469a98 Mon Sep 17 00:00:00 2001 From: Jiaxi-Yu Date: Sun, 28 Jan 2024 08:21:01 -0800 Subject: [PATCH 071/297] plot chi2 histograms on the focal plane --- scripts/validation/validation_ssr_plot.py | 115 +++++++++++++--------- 1 file changed, 69 insertions(+), 46 deletions(-) diff --git a/scripts/validation/validation_ssr_plot.py b/scripts/validation/validation_ssr_plot.py index 6641aed16..50fb9a3fc 100644 --- a/scripts/validation/validation_ssr_plot.py +++ b/scripts/validation/validation_ssr_plot.py @@ -22,6 +22,8 @@ parser.add_argument("--tracers", help="only ELG_LOPnotqso is available",default='all') parser.add_argument("--zmin", help="minimum redshift",default=-0.1) parser.add_argument("--zmax", help="maximum redshift",default=1.5) +parser.add_argument("--focalplane_SSR_plot", help="plot 2D SSR on the focal plane or uts chi2 histogram",type=bool,default=True) +parser.add_argument("--focalplane_SSR_LSS", help="add WEIGHT_focal to the full data or not",type=bool,default=None) args = parser.parse_args() @@ -98,7 +100,7 @@ def SSR_chi2(goodz, allz, err): # list all tracers tps = [args.tracers] if args.tracers == 'all': - tps = ['BGS_BRIGHT']#,'ELG_LOPnotqso','QSO','LRG'] + tps = ['BGS_BRIGHT','ELG_LOPnotqso','QSO','LRG'] if args.survey == 'SV3' and args.tracers == 'all': tps = ['QSO','LRG','BGS_ANY','BGS_BRIGHT','ELG','ELG_HIP','ELG_HIPnotqso','ELGnotqso'] @@ -248,13 +250,18 @@ def SSR_chi2(goodz, allz, err): ## the histogram of valid samples w.r.t quantities ## and that of the weighted good-redshift samples w.r.t quantities + weight_type = ': ZFAIL' if args.data == 'mock': bins = np.loadtxt('/global/cfs/cdirs/desi/survey/catalogs//Y1/LSS/iron/LSScats/test/plots/ssr/'+'{}_TSNR2_success_rate_z{}z{}_{}_{}_bins.txt'.format(tp,zmin,zmax,split,args.version)) ALL, GOOD, BIN, err, bins = SSR(full, quantity, selection, selection_gz, weights=full['WEIGHT_ZFAIL'][selection_gz], binsbins=bins) elif args.data == 'LSS': - ALL, GOOD, BIN, err, bins = SSR(full, quantity, selection, selection_gz, weights=full['WEIGHT_ZFAIL'][selection_gz]*full['WEIGHT_focal'][selection_gz]) + if (not 'WEIGHT_focal' in full.colnames): + ALL, GOOD, BIN, err, bins = SSR(full, quantity, selection, selection_gz, weights=full['WEIGHT_ZFAIL'][selection_gz]) + else: + ALL, GOOD, BIN, err, bins = SSR(full, quantity, selection, selection_gz, weights=full['WEIGHT_ZFAIL'][selection_gz]*full['WEIGHT_focal'][selection_gz]) + weight_type = r': ZFAIL*$\epsilon_{\rm focal}$' meanssr = np.sum(GOOD)/np.sum(ALL) - ax[i,j].errorbar(BIN,GOOD/ALL/meanssr,err/meanssr,label=split+r': ZFAIL $\chi^2/dof={:.1f}/{}$'.format(SSR_chi2(GOOD,ALL,err),len(ALL)),fmt=fmt) + ax[i,j].errorbar(BIN,GOOD/ALL/meanssr,err/meanssr,label=split+weight_type+r'$\chi^2/dof={:.1f}/{}$'.format(SSR_chi2(GOOD,ALL,err),len(ALL)),fmt=fmt) print('GOOD/ALL/meanssr',GOOD/ALL/meanssr) plt.xlabel(f'{quantity} at {zmin} 0: - - print("ssr wt[FIB==full['FIBER'][i]]",ssr_wtN[np.where(FIB==full['FIBER'][i])]) - full['WEIGHT_focal'][i] = ssr_wtN[np.where(FIB==full['FIBER'][i])] - print(i) - elif full['PHOTSYS'][i] == 'S': - if len(ssr_wtS[np.where(FIB==full['FIBER'][i])]) > 0: - - print("ssr wt[FIB==full['FIBER'][i]]",ssr_wtS[np.where(FIB==full['FIBER'][i])]) - full['WEIGHT_focal'][i] = ssr_wtS[np.where(FIB==full['FIBER'][i])] - print(i) + if (not 'WEIGHT_focal' in full.colnames)&(args.focalplane_SSR_LSS): + full['WEIGHT_focal'] = np.ones_like(full['WEIGHT_ZFAIL']) + for i in range(len(full['FIBER'])): + #print('fiber',full['FIBER'][i]) + #print('fib == fiber',np.where(FIB==full['FIBER'][i])) + #print('len ssr_wt',len(ssr_wt)) + #print('len fib',len(FIB)) + #print('len BIN',len(BIN)) + + if full['FIBER'][i] != 999999: + if full['PHOTSYS'][i] == 'N': + if len(ssr_wtN[np.where(FIB==full['FIBER'][i])]) > 0: + + print("ssr wt[FIB==full['FIBER'][i]]",ssr_wtN[np.where(FIB==full['FIBER'][i])]) + full['WEIGHT_focal'][i] = ssr_wtN[np.where(FIB==full['FIBER'][i])] + print(i) + elif full['PHOTSYS'][i] == 'S': + if len(ssr_wtS[np.where(FIB==full['FIBER'][i])]) > 0: + + print("ssr wt[FIB==full['FIBER'][i]]",ssr_wtS[np.where(FIB==full['FIBER'][i])]) + full['WEIGHT_focal'][i] = ssr_wtS[np.where(FIB==full['FIBER'][i])] + print(i) - full.write(indir + tp+'_full.dat.2.fits',overwrite=True)''' + full.write(indir + tp+'_full.dat.2.fits',overwrite=True) \ No newline at end of file From a057e07be3d428e401059ecf64663d98e320870a Mon Sep 17 00:00:00 2001 From: Jiaxi-Yu Date: Sun, 28 Jan 2024 08:39:34 -0800 Subject: [PATCH 072/297] minor corrections on figures --- scripts/validation/validation_ssr_plot.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/scripts/validation/validation_ssr_plot.py b/scripts/validation/validation_ssr_plot.py index 50fb9a3fc..0ee5d3ffb 100644 --- a/scripts/validation/validation_ssr_plot.py +++ b/scripts/validation/validation_ssr_plot.py @@ -23,7 +23,7 @@ parser.add_argument("--zmin", help="minimum redshift",default=-0.1) parser.add_argument("--zmax", help="maximum redshift",default=1.5) parser.add_argument("--focalplane_SSR_plot", help="plot 2D SSR on the focal plane or uts chi2 histogram",type=bool,default=True) -parser.add_argument("--focalplane_SSR_LSS", help="add WEIGHT_focal to the full data or not",type=bool,default=None) +parser.add_argument("--focalplane_SSR_LSS", help="add WEIGHT_focal to the full data or not",type=bool,default=False) args = parser.parse_args() @@ -225,7 +225,7 @@ def SSR_chi2(goodz, allz, err): nrows = len(quantities)//ncols if len(quantities)%ncols ==0 else len(quantities)//ncols+1 plt.rc('font', family='serif', size=12) fig = plt.figure(figsize=(ncols*5,nrows*5)) - spec = gridspec.GridSpec(nrows=nrows,ncols=ncols,left = 0.05,right = 0.98,bottom=0.1,top = 0.98,wspace=0.2)#,hspace=0.15,wspace=0) + spec = gridspec.GridSpec(nrows=nrows,ncols=ncols,left = 0.05,right = 0.99,bottom=0.1,top = 0.98,wspace=0.25)#,hspace=0.15,wspace=0) ax = np.empty((nrows,ncols), dtype=type(plt.axes)) for q in range(len(quantities)): i,j = q//ncols,q%ncols @@ -259,9 +259,9 @@ def SSR_chi2(goodz, allz, err): ALL, GOOD, BIN, err, bins = SSR(full, quantity, selection, selection_gz, weights=full['WEIGHT_ZFAIL'][selection_gz]) else: ALL, GOOD, BIN, err, bins = SSR(full, quantity, selection, selection_gz, weights=full['WEIGHT_ZFAIL'][selection_gz]*full['WEIGHT_focal'][selection_gz]) - weight_type = r': ZFAIL*$\epsilon_{\rm focal}$' + weight_type = r': ZFAIL*$\epsilon_{\rm focal}$' meanssr = np.sum(GOOD)/np.sum(ALL) - ax[i,j].errorbar(BIN,GOOD/ALL/meanssr,err/meanssr,label=split+weight_type+r'$\chi^2/dof={:.1f}/{}$'.format(SSR_chi2(GOOD,ALL,err),len(ALL)),fmt=fmt) + ax[i,j].errorbar(BIN,GOOD/ALL/meanssr,err/meanssr,label=split+weight_type+r', $\chi^2/dof={:.1f}/{}$'.format(SSR_chi2(GOOD,ALL,err),len(ALL)),fmt=fmt) print('GOOD/ALL/meanssr',GOOD/ALL/meanssr) plt.xlabel(f'{quantity} at {zmin} Date: Sun, 28 Jan 2024 08:49:36 -0800 Subject: [PATCH 073/297] minor corrections in figures --- scripts/validation/validation_ssr_plot.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/scripts/validation/validation_ssr_plot.py b/scripts/validation/validation_ssr_plot.py index 0ee5d3ffb..bae6f4f56 100644 --- a/scripts/validation/validation_ssr_plot.py +++ b/scripts/validation/validation_ssr_plot.py @@ -277,13 +277,11 @@ def SSR_chi2(goodz, allz, err): ax[i,j].plot(BIN,GOOD_uncorr/ALL/meanssr_uncorr,label=split+r': unweighted, $\chi^2/dof={:.1f}/{}$'.format(SSR_chi2(GOOD_uncorr,ALL,err_uncorr),len(ALL)),color=fmt_model,alpha=0.5) ax[i,j].axhline(1,c='k') - handles, labels = plt.gca().get_legend_handles_labels() - order = [2,0,3,1] - plt.legend([handles[idx] for idx in order],[labels[idx] for idx in order],frameon=False) - + handles, labels = plt.gca().get_legend_handles_labels() + order = [2,0,3,1] + plt.legend([handles[idx] for idx in order],[labels[idx] for idx in order],frameon=False) plt.grid(True) - plt.legend() plt.ylabel('{} z success rate'.format(tp)) plt.savefig(outdir+'{}_success_rate_z{}z{}_{}.png'.format(tp,zmin,zmax,args.version)) From 05e50c5b4f3f2aedfadc8c2af5f7908f8e2c85c6 Mon Sep 17 00:00:00 2001 From: Jiaxi-Yu Date: Sun, 28 Jan 2024 14:06:10 -0800 Subject: [PATCH 074/297] provide chi2 for 2D SSR --- scripts/validation/validation_ssr_plot.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/scripts/validation/validation_ssr_plot.py b/scripts/validation/validation_ssr_plot.py index bae6f4f56..522c811ab 100644 --- a/scripts/validation/validation_ssr_plot.py +++ b/scripts/validation/validation_ssr_plot.py @@ -22,8 +22,8 @@ parser.add_argument("--tracers", help="only ELG_LOPnotqso is available",default='all') parser.add_argument("--zmin", help="minimum redshift",default=-0.1) parser.add_argument("--zmax", help="maximum redshift",default=1.5) -parser.add_argument("--focalplane_SSR_plot", help="plot 2D SSR on the focal plane or uts chi2 histogram",type=bool,default=True) -parser.add_argument("--focalplane_SSR_LSS", help="add WEIGHT_focal to the full data or not",type=bool,default=False) +parser.add_argument("--focalplane_SSR_chi2", help="plot the chi2 histogram of 2D SSR? otherwise 2D SSR plot",action='store_true',default=False) +parser.add_argument("--focalplane_SSR_LSS", help="add WEIGHT_focal to the full data or not",action='store_true',default=False) args = parser.parse_args() @@ -329,7 +329,7 @@ def SSR_chi2(goodz, allz, err): # plot the fibre-wise SSR fig = plt.figure(figsize=(8,4)) - spec = gridspec.GridSpec(nrows=1,ncols=2, left = 0.1,right = 0.99,bottom=0.12,top = 0.98, wspace=0,width_ratios=[0.85,1]) + spec = gridspec.GridSpec(nrows=1,ncols=2, left = 0.1,right = 0.99,bottom=0.12,top = 0.93, wspace=0,width_ratios=[0.85,1]) ax = np.empty((1,2), dtype=type(plt.axes)) plt.rc('font', family='serif', size=12) for cp,split in enumerate(['N','S']): @@ -343,8 +343,10 @@ def SSR_chi2(goodz, allz, err): ALL, GOOD, BIN, err, _ = SSR(full, 'FIBER', selection, selection_gz, weights=full['WEIGHT_ZFAIL'][selection_gz], fiberbins=FIB) ssrmodel = GOOD/ALL ssrmean= np.sum(GOOD)/np.sum(ALL) + chi2s = (ssrmodel-ssrmean)/err + plt.title('chi2 = {:.1f}/{}'.format(np.sum(chi2s[np.isfinite(chi2s)]**2),np.sum(np.isfinite(ssrmodel)))) - if args.focalplane_SSR_plot: + if not args.focalplane_SSR_chi2: # fibrewise SSR and the correction hb = ax[0,cp].scatter(xll,yll,c=ssrmodel/ssrmean,s=2,vmin=1-dv,vmax=1+dv) if cp == 1: @@ -359,8 +361,6 @@ def SSR_chi2(goodz, allz, err): plt.xlim(-470,470) plt.ylim(-420,470) else: - chi2s = (ssrmodel-ssrmean)/err - print(f'chi2 in {split}'+' is {:.1f}/{}'.format(np.sum(chi2s[np.isfinite(chi2s)]**2),np.sum(np.isfinite(ssrmodel)))) plt.hist(chi2s[np.isfinite(chi2s)],density=True,label=f'{tp} in {split}') plt.xlabel('chi2') if cp ==0: @@ -373,11 +373,11 @@ def SSR_chi2(goodz, allz, err): elif split == 'S': ssr_wtS = 1./(ssrmodel/np.nanmean(ssrmodel)) ssr_wtS[np.isnan(ssr_wtS)] = 1. - if args.focalplane_SSR_plot: + if not args.focalplane_SSR_chi2: plt.savefig(outdir+'{}_focalplane_success_rate_z{}z{}_{}.png'.format(tp,zmin,zmax,args.version)) plt.close('all') else: - plt.savefig(f'{tp}_chi2_hist.png') + plt.savefig(outdir+f'{tp}_chi2_hist.png') plt.close() #print('ssr_wt',ssr_wt) From abafcb46b25c8d96860cd3b99eeaca8f5a21546f Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 30 Jan 2024 15:54:27 -0500 Subject: [PATCH 075/297] Update pota2clus_fast.py --- scripts/mock_tools/pota2clus_fast.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/scripts/mock_tools/pota2clus_fast.py b/scripts/mock_tools/pota2clus_fast.py index 007e24150..815b17809 100644 --- a/scripts/mock_tools/pota2clus_fast.py +++ b/scripts/mock_tools/pota2clus_fast.py @@ -268,14 +268,16 @@ def apply_imaging_veto(ff,reccircmasks,ebits): for tracer in tracers: - mainp = main(tracer,'iron','Y1') - bit = bittest[tracer]#targetmask.desi_mask[tracer] - seltar = mock_data[desitarg] & bit > 0 - mock_data_tr = mock_data[seltar] - lmockdat_noveto = len(mock_data_tr) - logger.info('length before/after cut to target type '+tracer+' using bit '+str(bit)+' and column '+desitarg) - logger.info(str(ndattot)+','+str(len(mock_data_tr))) - + if args.prog == 'DARK': + mainp = main(tracer,'iron','Y1') + bit = bittest[tracer]#targetmask.desi_mask[tracer] + seltar = mock_data[desitarg] & bit > 0 + mock_data_tr = mock_data[seltar] + lmockdat_noveto = len(mock_data_tr) + logger.info('length before/after cut to target type '+tracer+' using bit '+str(bit)+' and column '+desitarg) + logger.info(str(ndattot)+','+str(len(mock_data_tr))) + else: + mock_data_tr = mock_data tracerd = tracer if tracer == 'BGS_BRIGHT-21.5': From 602bf305a3e3956506347faa80cff970c63d6311 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 30 Jan 2024 16:01:06 -0500 Subject: [PATCH 076/297] Create process2genab_BGS215_pota2clus.sh --- scripts/mock_tools/process2genab_BGS215_pota2clus.sh | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100755 scripts/mock_tools/process2genab_BGS215_pota2clus.sh diff --git a/scripts/mock_tools/process2genab_BGS215_pota2clus.sh b/scripts/mock_tools/process2genab_BGS215_pota2clus.sh new file mode 100755 index 000000000..bf2b1a509 --- /dev/null +++ b/scripts/mock_tools/process2genab_BGS215_pota2clus.sh @@ -0,0 +1,12 @@ +#!/bin/bash +source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main +PYTHONPATH=$PYTHONPATH:$HOME/LSS/py +for (( i=$1;i<=$2;i++ )) +do + srun -N 1 -C cpu -t 00:45:00 --qos interactive --account desi python scripts/mock_tools/pota2clus_fast.py --mockver AbacusSummitBGS --tracer BGS_BRIGHT-21.5 --realization $i + mv $SCRATCH/AbacusSummitBGS/mock$i/*GC* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/mock$i/ + mv $SCRATCH/AbacusSummitBGS/mock$i/*nz* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/mock$i/ + rm $SCRATCH/AbacusSummitBGS/mock$i/* + chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/mock$i/*clustering* + chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/mock$i/*nz* +done \ No newline at end of file From 6300b93c81557fdf63123a5149040cabc6a3300f Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 30 Jan 2024 16:02:23 -0500 Subject: [PATCH 077/297] Update process2genab_BGS215_pota2clus.sh --- scripts/mock_tools/process2genab_BGS215_pota2clus.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mock_tools/process2genab_BGS215_pota2clus.sh b/scripts/mock_tools/process2genab_BGS215_pota2clus.sh index bf2b1a509..3fbf87519 100755 --- a/scripts/mock_tools/process2genab_BGS215_pota2clus.sh +++ b/scripts/mock_tools/process2genab_BGS215_pota2clus.sh @@ -3,7 +3,7 @@ source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main PYTHONPATH=$PYTHONPATH:$HOME/LSS/py for (( i=$1;i<=$2;i++ )) do - srun -N 1 -C cpu -t 00:45:00 --qos interactive --account desi python scripts/mock_tools/pota2clus_fast.py --mockver AbacusSummitBGS --tracer BGS_BRIGHT-21.5 --realization $i + srun -N 1 -C cpu -t 00:45:00 --qos interactive --account desi python scripts/mock_tools/pota2clus_fast.py --mockver AbacusSummitBGS --tracer BGS_BRIGHT-21.5 --realization $i --prog BRIGHT mv $SCRATCH/AbacusSummitBGS/mock$i/*GC* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/mock$i/ mv $SCRATCH/AbacusSummitBGS/mock$i/*nz* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/mock$i/ rm $SCRATCH/AbacusSummitBGS/mock$i/* From 972fda387e837246f299d2ed5f29c4256ddb92ad Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 30 Jan 2024 16:06:02 -0500 Subject: [PATCH 078/297] Update pota2clus_fast.py --- scripts/mock_tools/pota2clus_fast.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/mock_tools/pota2clus_fast.py b/scripts/mock_tools/pota2clus_fast.py index 815b17809..18a33b9b9 100644 --- a/scripts/mock_tools/pota2clus_fast.py +++ b/scripts/mock_tools/pota2clus_fast.py @@ -234,6 +234,8 @@ def apply_imaging_veto(ff,reccircmasks,ebits): 'MASKBITS','ZWARN', 'COLLISION', 'TILEID'] +if args.prog == 'BRIGHT': + cols.append('R_MAG_ABS') mock_data = fitsio.read(in_data_fn,columns=cols) selcoll = mock_data['COLLISION'] == False mock_data = mock_data[selcoll] From 4191e36a6821bb6cad062948d79787a975d2f5df Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 30 Jan 2024 16:10:03 -0500 Subject: [PATCH 079/297] Update pota2clus_fast.py --- scripts/mock_tools/pota2clus_fast.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/mock_tools/pota2clus_fast.py b/scripts/mock_tools/pota2clus_fast.py index 18a33b9b9..866969c02 100644 --- a/scripts/mock_tools/pota2clus_fast.py +++ b/scripts/mock_tools/pota2clus_fast.py @@ -269,9 +269,9 @@ def apply_imaging_veto(ff,reccircmasks,ebits): for tracer in tracers: - + mainp = main(tracer,'iron','Y1') if args.prog == 'DARK': - mainp = main(tracer,'iron','Y1') + bit = bittest[tracer]#targetmask.desi_mask[tracer] seltar = mock_data[desitarg] & bit > 0 mock_data_tr = mock_data[seltar] From cefde56b622e19c67681c79298cc66439769f74c Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 31 Jan 2024 11:30:44 -0500 Subject: [PATCH 080/297] Update addsys.py --- scripts/mock_tools/addsys.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mock_tools/addsys.py b/scripts/mock_tools/addsys.py index 209d7b416..95fd370a5 100644 --- a/scripts/mock_tools/addsys.py +++ b/scripts/mock_tools/addsys.py @@ -32,7 +32,7 @@ parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='Y1') parser.add_argument("--verspec",help="version for redshifts",default='iron') parser.add_argument("--data_version",help="version for redshifts",default='v0.6') -parser.add_argument("--mockcatver", default=None, help = "if not None, gets added to the output path") +parser.add_argument("--mockcatver", default='v2', help = "if not None, gets added to the output path") parser.add_argument("--minr", help="minimum number for random files",default=0) parser.add_argument("--maxr", help="maximum for random files, 18 are available (use parallel script for all)",default=18) parser.add_argument("--prepsysnet",help="prepare data to get sysnet weights for imaging systematics?",default='n') From afd5e4db0b7c6e1367dcfd32a089777fb8523f68 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 31 Jan 2024 11:34:24 -0500 Subject: [PATCH 081/297] Update addsys.py --- scripts/mock_tools/addsys.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scripts/mock_tools/addsys.py b/scripts/mock_tools/addsys.py index 95fd370a5..d97bb7d7b 100644 --- a/scripts/mock_tools/addsys.py +++ b/scripts/mock_tools/addsys.py @@ -484,9 +484,10 @@ def addrancol(rn): for rn in range(rm,rx): addrancol(rn) if args.par == 'y': - nproc = 9 + #nproc = 9 nran = rx-rm - inds = np.arange(nran) + nproc = nran + inds = np.arange(rm,rx) from multiprocessing import Pool with Pool(processes=nproc) as pool: res = pool.map(addrancol, inds) From 568e035f7f03bc7fefae22b8919fca4f0dcfff5d Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 31 Jan 2024 12:33:14 -0500 Subject: [PATCH 082/297] Update addsys.py --- scripts/mock_tools/addsys.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mock_tools/addsys.py b/scripts/mock_tools/addsys.py index d97bb7d7b..d23a34bfd 100644 --- a/scripts/mock_tools/addsys.py +++ b/scripts/mock_tools/addsys.py @@ -32,7 +32,7 @@ parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='Y1') parser.add_argument("--verspec",help="version for redshifts",default='iron') parser.add_argument("--data_version",help="version for redshifts",default='v0.6') -parser.add_argument("--mockcatver", default='v2', help = "if not None, gets added to the output path") +parser.add_argument("--mockcatver", default=None, help = "if not None, gets added to the output path") parser.add_argument("--minr", help="minimum number for random files",default=0) parser.add_argument("--maxr", help="maximum for random files, 18 are available (use parallel script for all)",default=18) parser.add_argument("--prepsysnet",help="prepare data to get sysnet weights for imaging systematics?",default='n') From bac938fba8b380b37a401324d2f9a1bb52bd6b76 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 31 Jan 2024 12:34:13 -0500 Subject: [PATCH 083/297] Update add_linweight_abBGSffa.sh --- scripts/mock_tools/add_linweight_abBGSffa.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mock_tools/add_linweight_abBGSffa.sh b/scripts/mock_tools/add_linweight_abBGSffa.sh index 25d6d7541..ae0497bf2 100755 --- a/scripts/mock_tools/add_linweight_abBGSffa.sh +++ b/scripts/mock_tools/add_linweight_abBGSffa.sh @@ -3,7 +3,7 @@ source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main PYTHONPATH=$PYTHONPATH:$HOME/LSS/py for (( i=$1;i<=$2;i++ )) do - python scripts/mock_tools/addsys.py --tracer BGS_BRIGHT-21.5_ffa --imsys y --add_imsys_ran y --par y --realization $i --base_dir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/SecondGenMocks/AbacusSummit/ + python scripts/mock_tools/addsys.py --tracer BGS_BRIGHT-21.5_ffa --imsys y --add_imsys_ran y --par y --realization $i --base_dir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/FFA done From 52b7d65640a8f2563195de252de3041e8893608a Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 31 Jan 2024 12:34:33 -0500 Subject: [PATCH 084/297] Update add_linweight_abBGSffa.sh --- scripts/mock_tools/add_linweight_abBGSffa.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mock_tools/add_linweight_abBGSffa.sh b/scripts/mock_tools/add_linweight_abBGSffa.sh index ae0497bf2..e45b0d5d8 100755 --- a/scripts/mock_tools/add_linweight_abBGSffa.sh +++ b/scripts/mock_tools/add_linweight_abBGSffa.sh @@ -3,7 +3,7 @@ source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main PYTHONPATH=$PYTHONPATH:$HOME/LSS/py for (( i=$1;i<=$2;i++ )) do - python scripts/mock_tools/addsys.py --tracer BGS_BRIGHT-21.5_ffa --imsys y --add_imsys_ran y --par y --realization $i --base_dir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/FFA + python scripts/mock_tools/addsys.py --tracer BGS_BRIGHT-21.5_ffa --imsys y --add_imsys_ran y --par y --realization $i --base_dir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/FFA/ done From 9487c8a6f27283595b4dc0e69d113aa2d7093cdd Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 31 Jan 2024 12:38:27 -0500 Subject: [PATCH 085/297] Update addsys.py --- scripts/mock_tools/addsys.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/scripts/mock_tools/addsys.py b/scripts/mock_tools/addsys.py index d23a34bfd..a49ccb039 100644 --- a/scripts/mock_tools/addsys.py +++ b/scripts/mock_tools/addsys.py @@ -168,19 +168,19 @@ def splitGC_wo(flroot,datran='.dat',rann=0): use_maps = fit_maps print('in imsys loop') - #datn = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_NGC'+'_clustering.dat.fits')) - #dats = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_SGC'+'_clustering.dat.fits')) + datn = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_NGC'+'_clustering.dat.fits')) + dats = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_SGC'+'_clustering.dat.fits')) #dat = np.concatenate((datn,dats)) #dat = common.addNS(Table(dat)) - dat = Table(fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_clustering.dat.fits'))) + #dat = Table(fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_clustering.dat.fits'))) print(len(dat)) ranl = [] for i in range(0,1): - #rann = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_NGC'+'_'+str(i)+'_clustering.ran.fits'), columns=['RA', 'DEC','WEIGHT','WEIGHT_FKP']) - #rans = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_SGC'+'_'+str(i)+'_clustering.ran.fits'), columns=['RA', 'DEC','WEIGHT','WEIGHT_FKP']) - #ran = np.concatenate((rann,rans)) + rann = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_NGC'+'_'+str(i)+'_clustering.ran.fits'), columns=['RA', 'DEC','WEIGHT','WEIGHT_FKP']) + rans = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_SGC'+'_'+str(i)+'_clustering.ran.fits'), columns=['RA', 'DEC','WEIGHT','WEIGHT_FKP']) + ran = np.concatenate((rann,rans)) #ran = common.addNS(Table(ran)) - ran = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_'+str(i)+'_clustering.ran.fits'), columns=['RA', 'DEC','WEIGHT','WEIGHT_FKP','PHOTSYS']) + #ran = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_'+str(i)+'_clustering.ran.fits'), columns=['RA', 'DEC','WEIGHT','WEIGHT_FKP','PHOTSYS']) ranl.append(ran) rands = np.concatenate(ranl) print(len(rands)) From 4f68438fb0f009498a3bf88da807566b6bf67734 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 31 Jan 2024 12:38:56 -0500 Subject: [PATCH 086/297] Update addsys.py --- scripts/mock_tools/addsys.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mock_tools/addsys.py b/scripts/mock_tools/addsys.py index a49ccb039..4f32d2fa6 100644 --- a/scripts/mock_tools/addsys.py +++ b/scripts/mock_tools/addsys.py @@ -170,7 +170,7 @@ def splitGC_wo(flroot,datran='.dat',rann=0): print('in imsys loop') datn = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_NGC'+'_clustering.dat.fits')) dats = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_SGC'+'_clustering.dat.fits')) - #dat = np.concatenate((datn,dats)) + dat = np.concatenate((datn,dats)) #dat = common.addNS(Table(dat)) #dat = Table(fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_clustering.dat.fits'))) print(len(dat)) From 9b9e911b7318fb262c02a5a458e29e4c13461f6a Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 31 Jan 2024 12:39:52 -0500 Subject: [PATCH 087/297] Update addsys.py --- scripts/mock_tools/addsys.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mock_tools/addsys.py b/scripts/mock_tools/addsys.py index 4f32d2fa6..9657e1942 100644 --- a/scripts/mock_tools/addsys.py +++ b/scripts/mock_tools/addsys.py @@ -170,7 +170,7 @@ def splitGC_wo(flroot,datran='.dat',rann=0): print('in imsys loop') datn = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_NGC'+'_clustering.dat.fits')) dats = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_SGC'+'_clustering.dat.fits')) - dat = np.concatenate((datn,dats)) + dat = Table(np.concatenate((datn,dats))) #dat = common.addNS(Table(dat)) #dat = Table(fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_clustering.dat.fits'))) print(len(dat)) From efa7b39ebcb508396e375fcf6d5574ba7274ad90 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 31 Jan 2024 12:40:49 -0500 Subject: [PATCH 088/297] Update addsys.py --- scripts/mock_tools/addsys.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/mock_tools/addsys.py b/scripts/mock_tools/addsys.py index 9657e1942..191927e74 100644 --- a/scripts/mock_tools/addsys.py +++ b/scripts/mock_tools/addsys.py @@ -176,8 +176,8 @@ def splitGC_wo(flroot,datran='.dat',rann=0): print(len(dat)) ranl = [] for i in range(0,1): - rann = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_NGC'+'_'+str(i)+'_clustering.ran.fits'), columns=['RA', 'DEC','WEIGHT','WEIGHT_FKP']) - rans = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_SGC'+'_'+str(i)+'_clustering.ran.fits'), columns=['RA', 'DEC','WEIGHT','WEIGHT_FKP']) + rann = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_NGC'+'_'+str(i)+'_clustering.ran.fits'), columns=['RA', 'DEC','WEIGHT','WEIGHT_FKP','PHOTSYS']) + rans = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_SGC'+'_'+str(i)+'_clustering.ran.fits'), columns=['RA', 'DEC','WEIGHT','WEIGHT_FKP','PHOTSYS']) ran = np.concatenate((rann,rans)) #ran = common.addNS(Table(ran)) #ran = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_'+str(i)+'_clustering.ran.fits'), columns=['RA', 'DEC','WEIGHT','WEIGHT_FKP','PHOTSYS']) From b57dde26b3cf16d393fb5ab57b0ccb59aa5e0617 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 31 Jan 2024 12:41:26 -0500 Subject: [PATCH 089/297] Update addsys.py --- scripts/mock_tools/addsys.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/mock_tools/addsys.py b/scripts/mock_tools/addsys.py index 191927e74..7bdb16a05 100644 --- a/scripts/mock_tools/addsys.py +++ b/scripts/mock_tools/addsys.py @@ -176,10 +176,10 @@ def splitGC_wo(flroot,datran='.dat',rann=0): print(len(dat)) ranl = [] for i in range(0,1): - rann = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_NGC'+'_'+str(i)+'_clustering.ran.fits'), columns=['RA', 'DEC','WEIGHT','WEIGHT_FKP','PHOTSYS']) - rans = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_SGC'+'_'+str(i)+'_clustering.ran.fits'), columns=['RA', 'DEC','WEIGHT','WEIGHT_FKP','PHOTSYS']) + rann = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_NGC'+'_'+str(i)+'_clustering.ran.fits'), columns=['RA', 'DEC','WEIGHT','WEIGHT_FKP']) + rans = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_SGC'+'_'+str(i)+'_clustering.ran.fits'), columns=['RA', 'DEC','WEIGHT','WEIGHT_FKP']) ran = np.concatenate((rann,rans)) - #ran = common.addNS(Table(ran)) + ran = common.addNS(Table(ran)) #ran = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_'+str(i)+'_clustering.ran.fits'), columns=['RA', 'DEC','WEIGHT','WEIGHT_FKP','PHOTSYS']) ranl.append(ran) rands = np.concatenate(ranl) From efa5ed5c2913ac9cce2002fd918d654d26c6b397 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 31 Jan 2024 14:26:42 -0500 Subject: [PATCH 090/297] Create add_RFweight_abLRGELGffa.sh --- scripts/mock_tools/add_RFweight_abLRGELGffa.sh | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100755 scripts/mock_tools/add_RFweight_abLRGELGffa.sh diff --git a/scripts/mock_tools/add_RFweight_abLRGELGffa.sh b/scripts/mock_tools/add_RFweight_abLRGELGffa.sh new file mode 100755 index 000000000..56aa34c8b --- /dev/null +++ b/scripts/mock_tools/add_RFweight_abLRGELGffa.sh @@ -0,0 +1,10 @@ +#!/bin/bash +source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main +PYTHONPATH=$PYTHONPATH:$HOME/LSS/py +for (( i=$1;i<=$2;i++ )) +do + srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/mock_tools/addsys.py --tracer LRG_ffa --regressis y --add_regressis y --add_regressis_ran y --par y --realization $i --mockcatver v2 + srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/mock_tools/addsys.py --tracer ELG_LOP_ffa --regressis y --add_regressis y --add_regressis_ran y --par y --realization 1 --mockcatver v2 + +done + From 353a5a5fa9ae91430d1145e46e119ff5e0270cb9 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 31 Jan 2024 14:27:11 -0500 Subject: [PATCH 091/297] Update add_RFweight_abLRGELGffa.sh --- scripts/mock_tools/add_RFweight_abLRGELGffa.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mock_tools/add_RFweight_abLRGELGffa.sh b/scripts/mock_tools/add_RFweight_abLRGELGffa.sh index 56aa34c8b..bfdb9c35c 100755 --- a/scripts/mock_tools/add_RFweight_abLRGELGffa.sh +++ b/scripts/mock_tools/add_RFweight_abLRGELGffa.sh @@ -4,7 +4,7 @@ PYTHONPATH=$PYTHONPATH:$HOME/LSS/py for (( i=$1;i<=$2;i++ )) do srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/mock_tools/addsys.py --tracer LRG_ffa --regressis y --add_regressis y --add_regressis_ran y --par y --realization $i --mockcatver v2 - srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/mock_tools/addsys.py --tracer ELG_LOP_ffa --regressis y --add_regressis y --add_regressis_ran y --par y --realization 1 --mockcatver v2 + srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/mock_tools/addsys.py --tracer ELG_LOP_ffa --regressis y --add_regressis y --add_regressis_ran y --par y --realization $i --mockcatver v2 done From b1791338476526bd711cb2fb6c2a327948ed0f56 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 1 Feb 2024 10:59:36 -0500 Subject: [PATCH 092/297] Update pota2clus_fast.py --- scripts/mock_tools/pota2clus_fast.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/mock_tools/pota2clus_fast.py b/scripts/mock_tools/pota2clus_fast.py index 866969c02..f00582bbf 100644 --- a/scripts/mock_tools/pota2clus_fast.py +++ b/scripts/mock_tools/pota2clus_fast.py @@ -282,8 +282,8 @@ def apply_imaging_veto(ff,reccircmasks,ebits): mock_data_tr = mock_data tracerd = tracer - if tracer == 'BGS_BRIGHT-21.5': - tracerd = 'BGS' + #if tracer == 'BGS_BRIGHT-21.5': + # tracerd = 'BGS' out_data_fn = outdir+tracerd+'_complete_clustering.dat.fits' out_data_froot = outdir+tracerd+'_complete_' From 068b524157d0b9e387bd88c41ad4b7318aae2153 Mon Sep 17 00:00:00 2001 From: Jiaxi-Yu Date: Sat, 3 Feb 2024 09:20:24 -0800 Subject: [PATCH 093/297] full.dat.fits: plot SSR on the focal plane without ZFAIL. similar to that with ZFAIL --- scripts/validation/validation_ssr_plot.py | 126 +++++++++++++--------- 1 file changed, 75 insertions(+), 51 deletions(-) diff --git a/scripts/validation/validation_ssr_plot.py b/scripts/validation/validation_ssr_plot.py index 522c811ab..b1fcb2173 100644 --- a/scripts/validation/validation_ssr_plot.py +++ b/scripts/validation/validation_ssr_plot.py @@ -22,7 +22,6 @@ parser.add_argument("--tracers", help="only ELG_LOPnotqso is available",default='all') parser.add_argument("--zmin", help="minimum redshift",default=-0.1) parser.add_argument("--zmax", help="maximum redshift",default=1.5) -parser.add_argument("--focalplane_SSR_chi2", help="plot the chi2 histogram of 2D SSR? otherwise 2D SSR plot",action='store_true',default=False) parser.add_argument("--focalplane_SSR_LSS", help="add WEIGHT_focal to the full data or not",action='store_true',default=False) @@ -100,7 +99,7 @@ def SSR_chi2(goodz, allz, err): # list all tracers tps = [args.tracers] if args.tracers == 'all': - tps = ['BGS_BRIGHT','ELG_LOPnotqso','QSO','LRG'] + tps = ['ELG_LOPnotqso']#['BGS_BRIGHT','ELG_LOPnotqso','QSO','LRG'] if args.survey == 'SV3' and args.tracers == 'all': tps = ['QSO','LRG','BGS_ANY','BGS_BRIGHT','ELG','ELG_HIP','ELG_HIPnotqso','ELGnotqso'] @@ -288,7 +287,6 @@ def SSR_chi2(goodz, allz, err): plt.close('all') # obtain the fibre-wise SSR - dv = 0.05 photos = ['BASS/MzLS','DECaLS'] # the fibreIDs dl = np.loadtxt(f'/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/LSScats/v0.1/{tp}_zsuccess_fromfull.txt').transpose() @@ -327,65 +325,91 @@ def SSR_chi2(goodz, allz, err): f6l.append(fibf6_dict[fib]) f8l.append(fibf8_dict[fib]) - # plot the fibre-wise SSR - fig = plt.figure(figsize=(8,4)) - spec = gridspec.GridSpec(nrows=1,ncols=2, left = 0.1,right = 0.99,bottom=0.12,top = 0.93, wspace=0,width_ratios=[0.85,1]) - ax = np.empty((1,2), dtype=type(plt.axes)) - plt.rc('font', family='serif', size=12) - for cp,split in enumerate(['N','S']): - ax[0,cp] = fig.add_subplot(spec[0,cp]) - if split == 'N': - selection = sel_obs&seln - elif split == 'S': - selection = sel_obs&~seln - selection_gz = selection&selz&gz - - ALL, GOOD, BIN, err, _ = SSR(full, 'FIBER', selection, selection_gz, weights=full['WEIGHT_ZFAIL'][selection_gz], fiberbins=FIB) - ssrmodel = GOOD/ALL - ssrmean= np.sum(GOOD)/np.sum(ALL) - chi2s = (ssrmodel-ssrmean)/err - plt.title('chi2 = {:.1f}/{}'.format(np.sum(chi2s[np.isfinite(chi2s)]**2),np.sum(np.isfinite(ssrmodel)))) + # plot the SSR and chi2 on the focal plane (fibre-wise SSR) + for ptype in ['noZFAIL','SSR','chi2','chi2hist']: + if ptype != 'chi2hist': + right = 0.93 + else: + right = 0.99 + fig = plt.figure(figsize=(9,4)) + spec = gridspec.GridSpec(nrows=1,ncols=2, left = 0.1,right = right,bottom=0.12,top = 0.93, wspace=0,width_ratios=[0.85,1]) + ax = np.empty((1,2), dtype=type(plt.axes)) + plt.rc('font', family='serif', size=12) + for cp,split in enumerate(['N','S']): + ax[0,cp] = fig.add_subplot(spec[0,cp]) + if split == 'N': + selection = sel_obs&seln + elif split == 'S': + selection = sel_obs&~seln + selection_gz = selection&selz&gz + + if ptype == 'noZFAIL': + ALL, GOOD, BIN, err, _ = SSR(full, 'FIBER', selection, selection_gz, weights=np.ones(np.sum(selection_gz)), fiberbins=FIB) + ptypetl = 'no ZFAIL weight' + else: + ALL, GOOD, BIN, err, _ = SSR(full, 'FIBER', selection, selection_gz, weights=full['WEIGHT_ZFAIL'][selection_gz], fiberbins=FIB) + ptypetl = 'ZFAIL weight' + ssrmodel = GOOD/ALL + ssrmean = np.sum(GOOD)/np.sum(ALL) + err[err==0]= 1 + chi2s = (ssrmodel-ssrmean)/err + plt.title('{} chi2 = {:.1f}/{}'.format(ptypetl,np.sum(chi2s[np.isfinite(chi2s)]**2),np.sum(np.isfinite(ssrmodel))),fontsize=10) - if not args.focalplane_SSR_chi2: - # fibrewise SSR and the correction - hb = ax[0,cp].scatter(xll,yll,c=ssrmodel/ssrmean,s=2,vmin=1-dv,vmax=1+dv) - if cp == 1: - cb = fig.colorbar(hb, ax=ax[0,1]) - cb.set_label('rescaled SSR',fontsize=12) - plt.text(-150,410,f'{photos[cp]}',fontsize=15,weight='bold') - plt.yticks(alpha=0) + if ptype != 'chi2hist': + if (ptype == 'SSR')|(ptype == 'noZFAIL'): + # fibrewise SSR + dv = 0.05 + vmin = 1-dv + vmax = 1+dv + value = ssrmodel/ssrmean + cblabel = 'rescaled SSR' + # mock weight, don't need to repeat with plots + if split == 'N': + ssr_wtN = 1./(ssrmodel/np.nanmean(ssrmodel)) + ssr_wtN[np.isnan(ssr_wtN)] = 1. + elif split == 'S': + ssr_wtS = 1./(ssrmodel/np.nanmean(ssrmodel)) + ssr_wtS[np.isnan(ssr_wtS)] = 1. + elif ptype == 'chi2': + # fibrewise SSR chi2 + dv = 2 + vmin = -dv + vmax = +dv + value = chi2s + cblabel = r'SSR $\chi^2$' + hb = ax[0,cp].scatter(xll,yll,c=value,s=2,vmin=vmin,vmax=vmax) + if cp == 1: + cb = fig.colorbar(hb, ax=ax[0,1]) + cb.set_label(cblabel,fontsize=12) + plt.text(-150,410,f'{photos[cp]}',fontsize=15,weight='bold') + plt.yticks(alpha=0) + else: + plt.text(-190,410,f'{photos[cp]}',fontsize=15,weight='bold') + plt.ylabel('Y (mm)') + plt.xlabel('X (mm)') + plt.xlim(-470,470) + plt.ylim(-420,470) else: - plt.text(-190,410,f'{photos[cp]}',fontsize=15,weight='bold') - plt.ylabel('Y (mm)') - plt.xlabel('X (mm)') - plt.xlim(-470,470) - plt.ylim(-420,470) + # the histogram of fibrewise SSR chi2 + plt.hist(chi2s[np.isfinite(chi2s)],density=True,label=f'{tp} in {split}') + plt.xlabel('chi2') + if cp ==0: + plt.ylabel('normalised counts') + plt.legend() + + if ptype == 'SSR': + plt.savefig(outdir+'{}_focalplane_success_rate_z{}z{}_{}.png'.format(tp,zmin,zmax,args.version)) else: - plt.hist(chi2s[np.isfinite(chi2s)],density=True,label=f'{tp} in {split}') - plt.xlabel('chi2') - if cp ==0: - plt.ylabel('normalised counts') - plt.legend() + plt.savefig(outdir+'{}_focalplane_success_rate_{}_z{}z{}_{}.png'.format(tp,ptype,zmin,zmax,args.version)) - if split == 'N': - ssr_wtN = 1./(ssrmodel/np.nanmean(ssrmodel)) - ssr_wtN[np.isnan(ssr_wtN)] = 1. - elif split == 'S': - ssr_wtS = 1./(ssrmodel/np.nanmean(ssrmodel)) - ssr_wtS[np.isnan(ssr_wtS)] = 1. - if not args.focalplane_SSR_chi2: - plt.savefig(outdir+'{}_focalplane_success_rate_z{}z{}_{}.png'.format(tp,zmin,zmax,args.version)) plt.close('all') - else: - plt.savefig(outdir+f'{tp}_chi2_hist.png') - plt.close() #print('ssr_wt',ssr_wt) #print('BIN',list(BIN)) #print('FIB',list(FIB)) #print('FIBER',full['FIBER']) if args.data == 'LSS': - full = Table(fitsio.read(indir+tp+'_full.dat.fits')) + full = Table(fitsio.read(indir+tp+'_full_HPmapcut.dat.fits')) elif args.data == 'mock': full = Table(fitsio.read(indir+'ffa_full_' + tp+'.fits')) From c3a3299e67048d884e45c72e29ac969c0ddcf25e Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 5 Feb 2024 14:02:52 -0500 Subject: [PATCH 094/297] Create run1_AMTLmock_LSS_3_1_window.sh --- scripts/mock_tools/run1_AMTLmock_LSS_3_1_window.sh | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100755 scripts/mock_tools/run1_AMTLmock_LSS_3_1_window.sh diff --git a/scripts/mock_tools/run1_AMTLmock_LSS_3_1_window.sh b/scripts/mock_tools/run1_AMTLmock_LSS_3_1_window.sh new file mode 100755 index 000000000..0c2ec44fa --- /dev/null +++ b/scripts/mock_tools/run1_AMTLmock_LSS_3_1_window.sh @@ -0,0 +1,11 @@ +#!/bin/bash +OUTBASE='/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1_window/altmtl{MOCKNUM}' + +cp /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl$1/mock$1/LSScats/*_HPmapcut*.ran.fits /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1_window/altmtl$1/mock$1/LSScats/ + +python scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer ELG_LOP --notqso y --minr 0 --maxr 18 --use_map_veto '_HPmapcut' --mkclusran y --nz y --mkclusdat y --splitGC y --outmd 'notscratch' + +python scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer LRG --notqso n --minr 0 --maxr 18 --use_map_veto '_HPmapcut' --mkclusran y --nz y --mkclusdat y --splitGC y --outmd 'notscratch' + +python scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer QSO --notqso n --minr 0 --maxr 18 --use_map_veto '_HPmapcut' --mkclusran y --nz y --mkclusdat y --splitGC y --outmd 'notscratch' + From 17c389a2f2d49eadf605b4f3e819c36c8ad8d282 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 5 Feb 2024 14:13:43 -0500 Subject: [PATCH 095/297] Update run1_AMTLmock_LSS_3_1_window.sh --- scripts/mock_tools/run1_AMTLmock_LSS_3_1_window.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/mock_tools/run1_AMTLmock_LSS_3_1_window.sh b/scripts/mock_tools/run1_AMTLmock_LSS_3_1_window.sh index 0c2ec44fa..0c4220770 100755 --- a/scripts/mock_tools/run1_AMTLmock_LSS_3_1_window.sh +++ b/scripts/mock_tools/run1_AMTLmock_LSS_3_1_window.sh @@ -3,6 +3,8 @@ OUTBASE='/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSu cp /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl$1/mock$1/LSScats/*_HPmapcut*.ran.fits /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1_window/altmtl$1/mock$1/LSScats/ +cp /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl$1/mock$1/LSScats/*frac_tlobs.fits /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1_window/altmtl$1/mock$1/LSScats/ + python scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer ELG_LOP --notqso y --minr 0 --maxr 18 --use_map_veto '_HPmapcut' --mkclusran y --nz y --mkclusdat y --splitGC y --outmd 'notscratch' python scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer LRG --notqso n --minr 0 --maxr 18 --use_map_veto '_HPmapcut' --mkclusran y --nz y --mkclusdat y --splitGC y --outmd 'notscratch' From 0dd068fa6981b27aae84a98b531a28bc24347485 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 5 Feb 2024 15:36:09 -0500 Subject: [PATCH 096/297] Create patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 44 ++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 scripts/main/patch_HPmapcut.py diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py new file mode 100644 index 000000000..2aa2bf359 --- /dev/null +++ b/scripts/main/patch_HPmapcut.py @@ -0,0 +1,44 @@ +import matplotlib.pyplot as plt +from matplotlib.backends.backend_pdf import PdfPages +import numpy as np +import os +import sys +import argparse + +import fitsio +from astropy.table import join,Table + +import LSS.common_tools as common + + +parser = argparse.ArgumentParser() +parser.add_argument("--version", help="catalog version",default='test') +parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='Y1') +parser.add_argument("--tracers", help="all runs all for given survey",default='all') +parser.add_argument("--verspec",help="version for redshifts",default='iron') +parser.add_argument("--data",help="LSS or mock directory",default='LSS') +args = parser.parse_args() + + +indir = '/global/cfs/cdirs/desi/survey/catalogs/'+args.survey+'/'+args.data+'/'+args.verspec+'/LSScats/'+args.version+'/' + +lssmapdirout = indir+'/hpmaps/' + + + +tps = [args.tracers] +if args.tracers == 'all': + tps = ['QSO','LRG','ELG_LOPnotqso']#'BGS_BRIGHT' + +for tp in tps: + mainp = main(tp,args.verspec,survey=args.survey) + df_cutdisk = fitsio.read(indir+tp+'_full_HPmapcut.dat.fits') + df = fitsio.read(indir+tp+'_full.dat.fits') + mapn = fitsio.read(lssmapdirout+tp+'_mapprops_healpix_nested_nside'+str(nside)+'_N.fits') + maps = fitsio.read(lssmapdirout+tp+'_mapprops_healpix_nested_nside'+str(nside)+'_S.fits') + mapcuts = mainp.mapcuts + df_cut = common.apply_map_veto_arrays(df,mapn,maps,mapcuts) + sel_idmatch = np.isin(df_cut['TARGETID'],df_cutdisk['TARGETID']) + df_cutnomatch = df_cut[~sel_idmatch] + df_comb = np.concatenate((df_cutdisk,df_cutnomatch)) + print(tp,len(df_comb),len(np.unique(df_comb['TARGETID']))) From 0cb7e7680f75b2da5f9f3efcc41684e42d652b66 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 5 Feb 2024 15:37:12 -0500 Subject: [PATCH 097/297] Update patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py index 2aa2bf359..18f2fd42e 100644 --- a/scripts/main/patch_HPmapcut.py +++ b/scripts/main/patch_HPmapcut.py @@ -9,6 +9,7 @@ from astropy.table import join,Table import LSS.common_tools as common +from LSS.globals import main parser = argparse.ArgumentParser() From c66ab9220ac4e42619b5e9931b788bdb2f88497b Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 5 Feb 2024 15:38:12 -0500 Subject: [PATCH 098/297] Update patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py index 18f2fd42e..4f65da308 100644 --- a/scripts/main/patch_HPmapcut.py +++ b/scripts/main/patch_HPmapcut.py @@ -12,6 +12,7 @@ from LSS.globals import main + parser = argparse.ArgumentParser() parser.add_argument("--version", help="catalog version",default='test') parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='Y1') @@ -25,7 +26,7 @@ lssmapdirout = indir+'/hpmaps/' - +nside = 256 tps = [args.tracers] if args.tracers == 'all': From c6521de427e2bd847d3c96373f28fc7dac84d728 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 5 Feb 2024 15:43:30 -0500 Subject: [PATCH 099/297] Update patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py index 4f65da308..fc0aec38b 100644 --- a/scripts/main/patch_HPmapcut.py +++ b/scripts/main/patch_HPmapcut.py @@ -35,7 +35,16 @@ for tp in tps: mainp = main(tp,args.verspec,survey=args.survey) df_cutdisk = fitsio.read(indir+tp+'_full_HPmapcut.dat.fits') + df_cutdisk_cols = list(df_cutdisk.dtype.names) df = fitsio.read(indir+tp+'_full.dat.fits') + df_cols = list(df.dtype.names) + for name in df_cols: + if name not in df_cutdisk_cols: + print(name+' not in HPmapcut file') + for name in df_cutdisk_cols: + if name not in df_cols: + print(name+' not in not cut file') + mapn = fitsio.read(lssmapdirout+tp+'_mapprops_healpix_nested_nside'+str(nside)+'_N.fits') maps = fitsio.read(lssmapdirout+tp+'_mapprops_healpix_nested_nside'+str(nside)+'_S.fits') mapcuts = mainp.mapcuts From 7b8c13f2247edcc417e6d987decb4ce79b7e1c5e Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 5 Feb 2024 16:02:35 -0500 Subject: [PATCH 100/297] Update patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py index fc0aec38b..b43c85591 100644 --- a/scripts/main/patch_HPmapcut.py +++ b/scripts/main/patch_HPmapcut.py @@ -34,16 +34,18 @@ for tp in tps: mainp = main(tp,args.verspec,survey=args.survey) - df_cutdisk = fitsio.read(indir+tp+'_full_HPmapcut.dat.fits') + df_cutdisk = Table(fitsio.read(indir+tp+'_full_HPmapcut.dat.fits')) + df_cutdisk.remove_columns(['BITWEIGHTS','PROBOBS']) df_cutdisk_cols = list(df_cutdisk.dtype.names) - df = fitsio.read(indir+tp+'_full.dat.fits') + df = Table(fitsio.read(indir+tp+'_full.dat.fits')) df_cols = list(df.dtype.names) for name in df_cols: if name not in df_cutdisk_cols: print(name+' not in HPmapcut file') for name in df_cutdisk_cols: if name not in df_cols: - print(name+' not in not cut file') + df[name] = np.ones(len(df)) + print(name+' added to not file as 1') mapn = fitsio.read(lssmapdirout+tp+'_mapprops_healpix_nested_nside'+str(nside)+'_N.fits') maps = fitsio.read(lssmapdirout+tp+'_mapprops_healpix_nested_nside'+str(nside)+'_S.fits') @@ -53,3 +55,9 @@ df_cutnomatch = df_cut[~sel_idmatch] df_comb = np.concatenate((df_cutdisk,df_cutnomatch)) print(tp,len(df_comb),len(np.unique(df_comb['TARGETID']))) + if tp[:3] != 'BGS': + bitf = fitsio.read(mainp.darkbitweightfile) + else: + bitf = fitsio.read(mainp.brightbitweightfile) + df_comb = join(ff,bitf,keys=['TARGETID'],join_type='left') + print(len(df_comb)) From b149eb0cf8d9306716b4b3989ecc4e348f61d5dc Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 5 Feb 2024 16:03:07 -0500 Subject: [PATCH 101/297] Update patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py index b43c85591..69d953799 100644 --- a/scripts/main/patch_HPmapcut.py +++ b/scripts/main/patch_HPmapcut.py @@ -35,7 +35,7 @@ for tp in tps: mainp = main(tp,args.verspec,survey=args.survey) df_cutdisk = Table(fitsio.read(indir+tp+'_full_HPmapcut.dat.fits')) - df_cutdisk.remove_columns(['BITWEIGHTS','PROBOBS']) + df_cutdisk.remove_columns(['BITWEIGHTS','PROB_OBS']) df_cutdisk_cols = list(df_cutdisk.dtype.names) df = Table(fitsio.read(indir+tp+'_full.dat.fits')) df_cols = list(df.dtype.names) From d3740ef21851e642ad069e13922bfa08a5f6cbe2 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 5 Feb 2024 16:07:29 -0500 Subject: [PATCH 102/297] Update patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py index 69d953799..2aa21c362 100644 --- a/scripts/main/patch_HPmapcut.py +++ b/scripts/main/patch_HPmapcut.py @@ -6,7 +6,7 @@ import argparse import fitsio -from astropy.table import join,Table +from astropy.table import join,Table,vstack import LSS.common_tools as common from LSS.globals import main @@ -53,7 +53,8 @@ df_cut = common.apply_map_veto_arrays(df,mapn,maps,mapcuts) sel_idmatch = np.isin(df_cut['TARGETID'],df_cutdisk['TARGETID']) df_cutnomatch = df_cut[~sel_idmatch] - df_comb = np.concatenate((df_cutdisk,df_cutnomatch)) + #df_comb = np.concatenate((df_cutdisk,df_cutnomatch)) + df_comb = vstack((df_cutdisk,df_cutnomatch)) print(tp,len(df_comb),len(np.unique(df_comb['TARGETID']))) if tp[:3] != 'BGS': bitf = fitsio.read(mainp.darkbitweightfile) From 506e73bd0b165946417fea69f3470d4aa06d1041 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 5 Feb 2024 16:07:54 -0500 Subject: [PATCH 103/297] Update patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py index 2aa21c362..3b2df5013 100644 --- a/scripts/main/patch_HPmapcut.py +++ b/scripts/main/patch_HPmapcut.py @@ -62,3 +62,4 @@ bitf = fitsio.read(mainp.brightbitweightfile) df_comb = join(ff,bitf,keys=['TARGETID'],join_type='left') print(len(df_comb)) + print(df_comb.dtype.names) From 25ee31e61f3c77f9ff098c1d5d792a065a784ca0 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 5 Feb 2024 16:09:50 -0500 Subject: [PATCH 104/297] Update patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py index 3b2df5013..7b1eda23e 100644 --- a/scripts/main/patch_HPmapcut.py +++ b/scripts/main/patch_HPmapcut.py @@ -60,6 +60,6 @@ bitf = fitsio.read(mainp.darkbitweightfile) else: bitf = fitsio.read(mainp.brightbitweightfile) - df_comb = join(ff,bitf,keys=['TARGETID'],join_type='left') + df_comb = join(df,bitf,keys=['TARGETID'],join_type='left') print(len(df_comb)) print(df_comb.dtype.names) From f6048e9d8d0dd82fec40befd340f5cb952546441 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 5 Feb 2024 16:14:22 -0500 Subject: [PATCH 105/297] Update patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py index 7b1eda23e..518af68c9 100644 --- a/scripts/main/patch_HPmapcut.py +++ b/scripts/main/patch_HPmapcut.py @@ -61,5 +61,5 @@ else: bitf = fitsio.read(mainp.brightbitweightfile) df_comb = join(df,bitf,keys=['TARGETID'],join_type='left') - print(len(df_comb)) + print(len(df_comb['TARGETID'])) print(df_comb.dtype.names) From 6ae76fb8815c3089dc71c77fec3a500b7db6109c Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 5 Feb 2024 16:20:12 -0500 Subject: [PATCH 106/297] Update patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py index 518af68c9..871c043fe 100644 --- a/scripts/main/patch_HPmapcut.py +++ b/scripts/main/patch_HPmapcut.py @@ -60,6 +60,6 @@ bitf = fitsio.read(mainp.darkbitweightfile) else: bitf = fitsio.read(mainp.brightbitweightfile) - df_comb = join(df,bitf,keys=['TARGETID'],join_type='left') + df_comb = join(df_comb,bitf,keys=['TARGETID'],join_type='left') print(len(df_comb['TARGETID'])) print(df_comb.dtype.names) From fae99cefaf3cb38906095f393beb8eeeb9363399 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 5 Feb 2024 16:31:55 -0500 Subject: [PATCH 107/297] Update patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py index 871c043fe..5d961e96b 100644 --- a/scripts/main/patch_HPmapcut.py +++ b/scripts/main/patch_HPmapcut.py @@ -30,7 +30,7 @@ tps = [args.tracers] if args.tracers == 'all': - tps = ['QSO','LRG','ELG_LOPnotqso']#'BGS_BRIGHT' + tps = ['ELG_LOPnotqso','QSO','LRG']#'BGS_BRIGHT' for tp in tps: mainp = main(tp,args.verspec,survey=args.survey) From 5b23bfdde150f6c978f3d6f234b20f0460d2c5f9 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 5 Feb 2024 16:48:03 -0500 Subject: [PATCH 108/297] Update patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py index 5d961e96b..b9d70c0db 100644 --- a/scripts/main/patch_HPmapcut.py +++ b/scripts/main/patch_HPmapcut.py @@ -63,3 +63,4 @@ df_comb = join(df_comb,bitf,keys=['TARGETID'],join_type='left') print(len(df_comb['TARGETID'])) print(df_comb.dtype.names) + common.write_LSS(df_comb,indir+tp+'_full_HPmapcut.dat.fits') From 2b11fed23aeedac062bc079fc836cf3072216ac7 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 5 Feb 2024 16:49:32 -0500 Subject: [PATCH 109/297] Update LSSpipe_Y1.txt --- Sandbox/LSSpipe_Y1.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Sandbox/LSSpipe_Y1.txt b/Sandbox/LSSpipe_Y1.txt index 60e0a2fbc..d781f3bf0 100644 --- a/Sandbox/LSSpipe_Y1.txt +++ b/Sandbox/LSSpipe_Y1.txt @@ -276,4 +276,8 @@ python scripts/main/mkCat_main.py --type ELG_LOP --notqso y --basedir /global/cf #re-run blinding for ELG_LOP srun -N 1 -n 128 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/apply_blinding_main_fromfile_fcomp.py --type ELG_LOPnotqso --wsyscol WEIGHT_SN --version v1.1 --baoblind y --mkclusdat y --mkclusran y --maxr 18 --dorecon y --rsdblind y --fnlblind y --getFKP y --resamp y --mv_out2cfs y +#patch HPmapcut files to return bad redshifts that were cut by accident in the IMLIN fitting +#from an interactive node (runs out of memory otherwise) +python scripts/main/patch_HPmapcut.py --version v1.1 + From e9a1090221af1922a11cf23a6951c97d71614a5f Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 5 Feb 2024 17:26:46 -0500 Subject: [PATCH 110/297] run BGS EZ mock FFA LSS cats --- scripts/mock_tools/ez_cat_sbatch_BGS.sh | 12 ++++++++++++ scripts/mock_tools/run1_EZmockBGS_LSS.sh | 7 +++++++ 2 files changed, 19 insertions(+) create mode 100755 scripts/mock_tools/ez_cat_sbatch_BGS.sh create mode 100755 scripts/mock_tools/run1_EZmockBGS_LSS.sh diff --git a/scripts/mock_tools/ez_cat_sbatch_BGS.sh b/scripts/mock_tools/ez_cat_sbatch_BGS.sh new file mode 100755 index 000000000..73333c3b2 --- /dev/null +++ b/scripts/mock_tools/ez_cat_sbatch_BGS.sh @@ -0,0 +1,12 @@ +#!/bin/bash +#SBATCH --time=00:30:00 +#SBATCH --qos=regular +#SBATCH --nodes=1 +#SBATCH --constraint=cpu +#SBATCH --array=1-200 +#SBATCH --account=desi + +source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main +PYTHONPATH=$PYTHONPATH:$HOME/LSS/py + +srun scripts/mock_tools/run1_EZmockBGS_LSS.sh $SLURM_ARRAY_TASK_ID \ No newline at end of file diff --git a/scripts/mock_tools/run1_EZmockBGS_LSS.sh b/scripts/mock_tools/run1_EZmockBGS_LSS.sh new file mode 100755 index 000000000..c0b06f74c --- /dev/null +++ b/scripts/mock_tools/run1_EZmockBGS_LSS.sh @@ -0,0 +1,7 @@ +#!/bin/bash +python scripts/mock_tools/ffa2clus_fast.py --mockver EZmock/FFA_BGS --tracer BGS --realization $1 +mv $SCRATCH/EZmock/FFA_BGS/mock$1/*GC* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/EZmock/FFA_BGS/mock$1/ +mv $SCRATCH/EZmock/FFA_BGS/mock$1/*nz* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/EZmock/FFA_BGS/mock$1/ +rm $SCRATCH/EZmock/FFA_BGS/mock$1/* +chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/EZmock/FFA_BGS/mock$1/*clustering* +chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/EZmock/FFA_BGS/mock$1/*nz* \ No newline at end of file From 40cdee1c78d43df286b9e42e4e219cfdbedbe0c1 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 5 Feb 2024 17:28:01 -0500 Subject: [PATCH 111/297] Update ez_cat_sbatch_BGS.sh --- scripts/mock_tools/ez_cat_sbatch_BGS.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mock_tools/ez_cat_sbatch_BGS.sh b/scripts/mock_tools/ez_cat_sbatch_BGS.sh index 73333c3b2..62be5debd 100755 --- a/scripts/mock_tools/ez_cat_sbatch_BGS.sh +++ b/scripts/mock_tools/ez_cat_sbatch_BGS.sh @@ -3,7 +3,7 @@ #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu -#SBATCH --array=1-200 +#SBATCH --array=2-200 #SBATCH --account=desi source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main From 13b9283ecdc3d7ae9392a1e78cce0463b737588e Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Wed, 24 Jan 2024 10:11:03 -0800 Subject: [PATCH 112/297] a --- bin/Y1ALTMTLRealizationsBRIGHT_mock.sh | 2 +- bin/Y1Bitweights128RealizationsDARK_mock.sh | 2 +- bin/dateLoopAltMTLBugFix.sh | 4 ++-- scripts/mock_tools/add_extra_tilesTracker.py | 10 +++++----- scripts/mock_tools/prepare_script_bright.sh | 12 ++++++------ .../run_Y1SecondGen_initialledger_batch.sh | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/bin/Y1ALTMTLRealizationsBRIGHT_mock.sh b/bin/Y1ALTMTLRealizationsBRIGHT_mock.sh index 159715dba..a789fb76f 100755 --- a/bin/Y1ALTMTLRealizationsBRIGHT_mock.sh +++ b/bin/Y1ALTMTLRealizationsBRIGHT_mock.sh @@ -82,7 +82,7 @@ seed=3593589 #Number of realizations to generate. Ideally a multiple of 64 for bitweights #However, you can choose smaller numbers for debugging mockinit=0 -mockend=1 +mockend=5 let ndir=$mockend-$mockinit diff --git a/bin/Y1Bitweights128RealizationsDARK_mock.sh b/bin/Y1Bitweights128RealizationsDARK_mock.sh index 5b0068350..c6ea4f6b5 100755 --- a/bin/Y1Bitweights128RealizationsDARK_mock.sh +++ b/bin/Y1Bitweights128RealizationsDARK_mock.sh @@ -82,7 +82,7 @@ fi seed=3593589 #Number of realizations to generate. Ideally a multiple of 64 for bitweights #However, you can choose smaller numbers for debugging -ndir=256 +ndir=128 #Uncomment second option if you want to clobber already existing files for Alt MTL generation overwrite='' diff --git a/bin/dateLoopAltMTLBugFix.sh b/bin/dateLoopAltMTLBugFix.sh index f83ed5196..16988235e 100755 --- a/bin/dateLoopAltMTLBugFix.sh +++ b/bin/dateLoopAltMTLBugFix.sh @@ -37,7 +37,7 @@ fi if [ $QVal = 'regular' ]; then - srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:17881308 $path2LSS/runAltMTLParallel.py $argstring + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:20481531 $path2LSS/runAltMTLParallel.py $argstring fi if [ $QVal = 'debug' ]; @@ -53,4 +53,4 @@ fi # exit 1234 #fi -#done \ No newline at end of file +#done diff --git a/scripts/mock_tools/add_extra_tilesTracker.py b/scripts/mock_tools/add_extra_tilesTracker.py index 4b814949a..0dd4d7be3 100644 --- a/scripts/mock_tools/add_extra_tilesTracker.py +++ b/scripts/mock_tools/add_extra_tilesTracker.py @@ -2,13 +2,13 @@ from astropy.table import Table,vstack import os -program = 'dark' +program = 'bright' -rmin = 10 -rmax = 11 +rmin = 0 +rmax = 1 -path = '/pscratch/sd/a/acarnero/test_main/altmtl{MOCKNUM}/Univ000' -#path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM}/Univ000' +#path = '/pscratch/sd/a/acarnero/test_main/altmtl{MOCKNUM}/Univ000' +path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM}/Univ000' extratiles = Table.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/aux_data/extra_{PROGRAM}.ecsv'.format(PROGRAM = program), format='ascii.ecsv') diff --git a/scripts/mock_tools/prepare_script_bright.sh b/scripts/mock_tools/prepare_script_bright.sh index 64b7f3454..02a8d22d3 100755 --- a/scripts/mock_tools/prepare_script_bright.sh +++ b/scripts/mock_tools/prepare_script_bright.sh @@ -1,9 +1,9 @@ #srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 1 --realmax 4 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 4 --realmax 7 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 4 --realmax 7 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 0 --realmax 4 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 11 --realmax 14 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 11 --realmax 14 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 7 --realmax 11 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 14 --realmax 17 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 17 --realmax 21 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 21 --realmax 23 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 23 --realmax 25 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 14 --realmax 17 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 17 --realmax 21 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 21 --realmax 23 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 02:00:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 24 --realmax 25 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 diff --git a/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh index 724511790..259a7f511 100755 --- a/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh +++ b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh @@ -1,9 +1,9 @@ -SeconGenVer=AbacusSummit_v3_1 #AbacusSummit -for j in {4..24} +SeconGenVer=AbacusSummitBGS #AbacusSummit +for j in {0..4} do #j=0 echo $j #echo $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run_mtl_ledger.py $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled DARK +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run_mtl_ledger.py $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled BRIGHT #python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run_mtl_ledger.py $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled DARK done From 2da8634beeb9943b3be9195119caa1d99fcef069 Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Tue, 6 Feb 2024 04:26:16 -0800 Subject: [PATCH 113/297] changes --- bin/Y1ALTMTLRealizationsBRIGHT_mock2.sh | 319 ++++++++++++++++++ bin/Y1ALTMTLRealizationsBRIGHT_mock_init.sh | 8 +- bin/Y1Bitweights128RealizationsDARK_mock.sh | 15 +- scripts/mock_tools/abBGSamtl_cat_sbatch.sh | 12 + scripts/mock_tools/add_extra_tilesTracker.py | 4 +- scripts/mock_tools/copyfiles_bright.py | 5 + .../mock_tools/getpota_Y1_bright_script.sh | 21 ++ scripts/mock_tools/initAMTL.py | 6 + scripts/mock_tools/mkCat_SecondGen_amtl.py | 5 +- scripts/mock_tools/prepare_mocks_Y1_bright.py | 4 +- scripts/mock_tools/prepare_script_bright.sh | 12 +- scripts/mock_tools/run1_AMTLmock_LSS_BGS.sh | 7 + .../run_Y1SecondGen_initialledger_batch.sh | 2 +- 13 files changed, 394 insertions(+), 26 deletions(-) create mode 100755 bin/Y1ALTMTLRealizationsBRIGHT_mock2.sh create mode 100755 scripts/mock_tools/abBGSamtl_cat_sbatch.sh create mode 100644 scripts/mock_tools/copyfiles_bright.py create mode 100755 scripts/mock_tools/getpota_Y1_bright_script.sh create mode 100644 scripts/mock_tools/initAMTL.py create mode 100755 scripts/mock_tools/run1_AMTLmock_LSS_BGS.sh diff --git a/bin/Y1ALTMTLRealizationsBRIGHT_mock2.sh b/bin/Y1ALTMTLRealizationsBRIGHT_mock2.sh new file mode 100755 index 000000000..4255904df --- /dev/null +++ b/bin/Y1ALTMTLRealizationsBRIGHT_mock2.sh @@ -0,0 +1,319 @@ +#!/bin/bash +start=`date +%s.%N` + +##TEMPrealization=0 + +#simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written +#simName=JL_DebugReprocReprod2 +simName="altmtl{mock_number}" +#Location where you have cloned the LSS Repo +path2LSS=/pscratch/sd/a/acarnero/codes/LSS/bin/ + +# Flags for debug/verbose mode/profiling code time usage. +# Uncomment second set of options to turn on the modes +#debug='' +#verbose='' +profile='' +debug='--debug' +verbose='--verbose' +#profile='--profile' + +#if [ -z "$debug" ] +#then +# echo "\$debug is empty" +#else +# echo "\$debug is set" +# pwd +# InitWorkingDirectory=`pwd` +# cd $path2LSS +# cd .. +# pwd +# pip install --user . +# cd $InitWorkingDirectory +# pwd +# echo "end of pip in script attempt" +#fi + +#Uncomment second option if running on mocks +#mock='' +mock='--mock' + +#ALTMTLHOME is a home directory for all of your alternate MTLs. Default is your scratch directory +#There will be an environment variable $ALTMTLHOME for the "survey alt MTLs" +#However, you should specify your own directory to a. not overwrite the survey alt MTLs +# and b. keep your alt MTLs somewhere that you have control/access + +#Uncomment the following line to set your own/nonscratch directory +#ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ +#ALTMTLHOME=/pscratch/sd/a/acarnero/test_main/ +ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/ + +if [[ "${NERSC_HOST}" == "cori" ]]; then + CVal='haswell' + QVal='interactive' + ProcPerNode=32 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$CSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi +elif [[ "${NERSC_HOST}" == "perlmutter" ]]; then + srunConfig='-C cpu -q regular' + CVal='cpu' + QVal='interactive' + ProcPerNode=128 + if [[ -z "${ALTMTLHOME}" ]]; then + ALTMTLHOME=$PSCRATCH + else + echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" + fi + +else + echo "This code is only supported on NERSC Cori and NERSC Perlmutter. Goodbye" + exit 1234 +fi + + + + +#Options for InitializeAltMTLs + +#Random seed. Change to any integer you want (or leave the same) +#If seed is different between two otherwise identical runs, the initial MTLs will also be different +#seed is also saved in output directory +#seed=14126579 +seed=3593589 +#Number of realizations to generate. Ideally a multiple of 64 for bitweights +#However, you can choose smaller numbers for debugging +#Mock realization +mockinit=5 +mockend=25 +let ndir=$mockend-$mockinit + + +#Uncomment second option if you want to clobber already existing files for Alt MTL generation +overwrite='' +#overwrite='--overwrite' + +#Observing conditions for generating MTLs (should be all caps "DARK" or "BRIGHT") +obscon='BRIGHT' +#obscon='BRIGHT' + +#Survey to generate MTLs for (should be lowercase "sv3" or "main", sv2, sv1, and cmx are untested and will likely fail) +#survey='sv3' +survey='main' +# options are default None (empty strings). Uncommenting the second options will set them to the Y1 start and end dates. +startDate='' +#endDate='' +#startDate='2021-05-13T08:15:37+00:00' +endDate='2022-06-24T00:00:00+00:00' + +#For rundate formatting in simName, either manually modify the string below +#to be the desired date or comment that line out and uncomment the +#following line to autogenerate date strings. +#To NOT use any date string specification, use the third line, an empty string +#datestring='071322' +#datestring=`date +%y%m%d` +datestring='' + +#Can save time in MTL generation by first writing files to local tmp directory and then copying over later +#uncommenting the second option will directly write to your output directory +usetmp='' +#usetmp='--dontUseTemp' + +if [ -z $usetmp ] +then + outputMTLDirBaseBase=`mktemp -d /dev/shm/"$USER"_tempdirXXXX` +else + outputMTLDirBaseBase=$ALTMTLHOME +fi +printf -v outputMTLDirBase "$outputMTLDirBaseBase/$simName/" $datestring $ndir $survey +printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $survey + +#List of healpixels to create Alt MTLs for +#hpListFile="$path2LSS/MainSurveyHPList_mock.txt" +##TEMPhpListFile="/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$realization/initled/hpxlist_dark.txt" +#hpListFile="$path2LSS/MainSurveyHPList.txt" +#hpListFile="$path2LSS/DebugMainHPList.txt" +#hpListFile="$path2LSS/SV3HPList.txt" + +#These two options only are considered if the obscon is BRIGHT +#First option indicates whether to shuffle the top level priorities +#of BGS_FAINT/BGS_FAINT_HIP. Uncomment section option to turn off shuffling of bright time priorities +#Second option indicates what fraction/percent +#of BGS_FAINT to promote to BGS_FAINT_HIP. Default is 20%, same as SV3 + +#shuffleBrightPriorities='--shuffleBrightPriorities' +shuffleBrightPriorities='' + + +shuffleELGPriorities='' +#shuffleELGPriorities='--shuffleELGPriorities' + +#PromoteFracBGSFaint=0.2 +PromoteFracBGSFaint=0.0 +#PromoteFracELG=0.1 +PromoteFracELG=0. + +# location of original MTLs to shuffle. +# Default directory is a read only mount of the CFS filesystem +# You can only access that directory from compute nodes. +# Do NOT use the commented out directory (the normal mount of CFS) +# unless the read only mount is broken +##TEMPexampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$realization/initled +#exampleLedgerBase=/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ +#exampleLedgerBase=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ +#exampleLedgerBase=$SCRATCH/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ +#Options for DateLoopAltMTL and runAltMTLParallel + +#Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). +#Default = Empty String/False. Uncomment second option if you want to restart from the first observations +#PLEASE DO NOT CHANGEME +echo "Fix QR resetting for new argparse usage" +qR='' +#qR='-qr' + +#Number of observation dates to loop through +#Defaults to 40 dates for SV3 +NObsDates=99999 + +# Whether to submit a new job with dateLoopAltMTL for each date +# or to submit a single job +# multiDate=0 +multiDate='--multiDate' +echo 'setting QVal here for debug. Fix later.' +#QVal='debug' +QVal='regular' +#QVal='interactive' +# + + + +#Number of nodes to run on. This will launch up to 64*N jobs +#if that number of alternate universes have already been generated +#Calculated automatically from number of sims requested and number of processes per node. Be careful if setting manually +NNodes=$(( ($ndir + $ProcPerNode - 1 )/$ProcPerNode )) +#echo $NNodes +#getosubp: grab subpriorities from the original (exampleledgerbase) MTLs +#This should only be turned on for SV testing/debugging purposes +#This should not be required for main survey debugging. +getosubp='' +#getosubp='--getosubp' + +#shuffleSubpriorities(reproducing) must be left as empty strings to ensure +#subpriorities are shuffled. debug mode for main survey +#will only require these flags to be set by uncommenting second options + +dontShuffleSubpriorities='' +reproducing='' +#dontShuffleSubpriorities='--dontShuffleSubpriorities' +#reproducing='--reproducing' +#Include secondary targets? +secondary='' +#secondary='--secondary' + + +#If running from mocks, must set target directory. +#Otherwise this is optional +#targfile='' #CHANGEME IF RUNNING ON MOCKS +#targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory +#targfile="--targfile=/pscratch/sd/a/acarnero/test_main/forFA{mock_number}.fits" +targfile="--targfile=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/forFA{mock_number}.fits" +#targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' + + +#Default is use numobs from ledger. Uncomment second option to set numobs NOT from ledger +numobs_from_ledger='' +#numobs_from_ledger='--NumObsNotFromLedger' + +#Uncomment second line to force redo fiber assignment if it has already been done. +redoFA='' +#redoFA='--redoFA' + + +#Options for MakeBitweightsParallel +#True/False(1/0) as to whether to split bitweight calculation +#among nodes by MPI between realizations +#splitByReal=1 + +#Split the calculation of bitweights into splitByChunk +#chunks of healpixels. +#splitByChunk=1 + +#Set to true (1) if you want to clobber already existing bitweight files +overwrite2='' +#overwrite2='--overwrite' +#Actual running of scripts + +#Copy this script to output directory for reproducbility +thisFileName=$outputMTLFinalDestination/$0 + +echo $thisFileName + +#if [ -f "$thisFileName" ] +#then +# echo "File is found. Checking to see it is identical to the original." +# cmp $0 $thisFileName +# comp=$? +# if [[ $comp -eq 1 ]] +# then +# echo "Files are not identical." +# echo "If this is intended, please delete or edit the original copied script at $thisFileName" +# echo "If this is unintended, you can reuse the original copied script at that same location" +# echo "goodbye" +# exit 3141 +# elif [[ $comp -eq 0 ]] +# then +# echo "files are same, continuing" +# else +# echo "Something has gone very wrong. Exit code for cmp was $a" +# exit $a +# fi +#else +# echo "Copied script is not found. Copying now, making directories as needed." +# mkdir -p $outputMTLFinalDestination +# cp $SLURM_SUBMIT_DIR $0 $outputMTLFinalDestination/$0 +#fi + +if [ -d "$outputMTLFinalDestination" ] +then + echo "output final directory exists" + echo $outputMTLFinalDestination +else + echo "output final directory does not exist. Creating and copying script there" + mkdir -p $outputMTLFinalDestination + cp $0 $outputMTLFinalDestination +fi + +if [ -z $getosubp ] +then + touch $outputMTLFinalDestination/GetOSubpTrue +fi + +printf -v OFDL "%s/dateLoop%sAltMTLOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring + +runtimeInit=$( echo "$endInit - $start" | bc -l ) +argstring="--altMTLBaseDir=$outputMTLFinalDestination --obscon=$obscon --survey=$survey --ProcPerNode=$ProcPerNode $numobs_from_ledger $redoFA $getosubp $debug $verbose $secondary $mock $targfile $multiDate $reproducing --mockmin=$mockinit --mockmax=$mockend" +echo 'argstring for dateloop' +echo $argstring +nohup bash $path2LSS/dateLoopAltMTLBugFix_mock_batch.sh $NObsDates $NNodes $path2LSS $CVal $QVal $qR $argstring >& $OFDL + +endDL=`date +%s.%N` + +if [ $? -ne 0 ]; then + runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) + echo "runtime for Dateloop of $NObsDates days" + echo $runtimeDateLoop + exit 12345 +fi +runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) +echo "runtime for Dateloop of $NObsDates days" +echo $runtimeDateLoop +exit 54321 + + + +runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) + +echo "runtime for Dateloop of $NObsDates days" +echo $runtimeDateLoop diff --git a/bin/Y1ALTMTLRealizationsBRIGHT_mock_init.sh b/bin/Y1ALTMTLRealizationsBRIGHT_mock_init.sh index faffd575f..e53fc3d02 100755 --- a/bin/Y1ALTMTLRealizationsBRIGHT_mock_init.sh +++ b/bin/Y1ALTMTLRealizationsBRIGHT_mock_init.sh @@ -3,7 +3,7 @@ start=`date +%s.%N` #simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written #simName=JL_DebugReprocReprod2 -simName="altmtl0" +simName="altmtl5" #Location where you have cloned the LSS Repo path2LSS=/pscratch/sd/a/acarnero/codes/LSS/bin @@ -124,7 +124,7 @@ printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $s #List of healpixels to create Alt MTLs for #hpListFile="$path2LSS/MainSurveyHPList_mock.txt" -hpListFile="/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl0/initled/hpxlist_bright.txt" +hpListFile="/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl5/initled/hpxlist_bright.txt" #hpListFile="$path2LSS/DebugMainHPList.txt" #hpListFile="$path2LSS/SV3HPList.txt" @@ -156,7 +156,7 @@ PromoteFracELG=0.1 #exampleLedgerBase=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ #exampleLedgerBase=$SCRATCH/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ #Options for DateLoopAltMTL and runAltMTLParallel -exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl0/initled +exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl5/initled #Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). #Default = Empty String/False. Uncomment second option if you want to restart from the first observations @@ -205,7 +205,7 @@ secondary='' #Otherwise this is optional #targfile='' #CHANGEME IF RUNNING ON MOCKS #targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory -targfile="--targfile=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/forFA0.fits" +targfile="--targfile=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/forFA5.fits" #targfile='--targfile=/cscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA1.fits' #targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' diff --git a/bin/Y1Bitweights128RealizationsDARK_mock.sh b/bin/Y1Bitweights128RealizationsDARK_mock.sh index c6ea4f6b5..fe51c8306 100755 --- a/bin/Y1Bitweights128RealizationsDARK_mock.sh +++ b/bin/Y1Bitweights128RealizationsDARK_mock.sh @@ -3,7 +3,7 @@ start=`date +%s.%N` #simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written #simName=JL_DebugReprocReprod2 -simName=altmtl0_R256 +simName=altmtl1_R64 #Location where you have cloned the LSS Repo path2LSS=/pscratch/sd/a/acarnero/codes/LSS/bin/ @@ -82,7 +82,7 @@ fi seed=3593589 #Number of realizations to generate. Ideally a multiple of 64 for bitweights #However, you can choose smaller numbers for debugging -ndir=128 +ndir=64 #Uncomment second option if you want to clobber already existing files for Alt MTL generation overwrite='' @@ -128,7 +128,7 @@ printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $s #hpListFile="$path2LSS/MainSurveyHPList.txt" #hpListFile="$path2LSS/DebugMainHPList.txt" #hpListFile="$path2LSS/SV3HPList.txt" -hpListFile="/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl0/initled/hpxlist_dark.txt" +hpListFile="/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl1/initled/hpxlist_dark.txt" #These two options only are considered if the obscon is BRIGHT #First option indicates whether to shuffle the top level priorities @@ -153,7 +153,7 @@ PromoteFracELG=0.0 # You can only access that directory from compute nodes. # Do NOT use the commented out directory (the normal mount of CFS) # unless the read only mount is broken -exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl0/initled/ +exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl1/initled/ #exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ #exampleLedgerBase=/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ #exampleLedgerBase=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ @@ -209,7 +209,7 @@ secondary='' #targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory #targfile='--targfile=/cscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA1.fits' #targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' -targfile="--targfile=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/forFA0.fits" +targfile="--targfile=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/forFA1.fits" #Default is use numobs from ledger. Uncomment second option to set numobs NOT from ledger numobs_from_ledger='' @@ -281,8 +281,8 @@ fi printf -v OFIM "%s/Initialize%sAltMTLsParallelOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $date -echo "srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclusive $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM" -#srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclusive --dependency=afterany:20412532 $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM +#echo "srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclusive $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM" +#srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclusive $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM #cp -r $outputMTLFinalDestination/ "$ALTMTLHOME/BACKUPInitial_$simName/" #exit 1234 @@ -293,7 +293,6 @@ echo "srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclu # echo "runtime for initialization" # echo $runtimeInit #fi - #endInit=`date +%s.%N` #runtimeInit=$( echo "$endInit - $start" | bc -l ) #echo "runtime for initialization" diff --git a/scripts/mock_tools/abBGSamtl_cat_sbatch.sh b/scripts/mock_tools/abBGSamtl_cat_sbatch.sh new file mode 100755 index 000000000..8451b66ac --- /dev/null +++ b/scripts/mock_tools/abBGSamtl_cat_sbatch.sh @@ -0,0 +1,12 @@ +#!/bin/bash +#SBATCH --time=02:30:00 +#SBATCH --qos=regular +#SBATCH --nodes=1 +#SBATCH --constraint=cpu +#SBATCH --array=0-24 +#SBATCH --account=desi + +source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main +PYTHONPATH=$PYTHONPATH:$HOME/LSS/py + +srun /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run1_AMTLmock_LSS_BGS.sh $SLURM_ARRAY_TASK_ID diff --git a/scripts/mock_tools/add_extra_tilesTracker.py b/scripts/mock_tools/add_extra_tilesTracker.py index 0dd4d7be3..97b4364f8 100644 --- a/scripts/mock_tools/add_extra_tilesTracker.py +++ b/scripts/mock_tools/add_extra_tilesTracker.py @@ -4,8 +4,8 @@ program = 'bright' -rmin = 0 -rmax = 1 +rmin = 1 +rmax = 25 #path = '/pscratch/sd/a/acarnero/test_main/altmtl{MOCKNUM}/Univ000' path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM}/Univ000' diff --git a/scripts/mock_tools/copyfiles_bright.py b/scripts/mock_tools/copyfiles_bright.py new file mode 100644 index 000000000..a5ff4db16 --- /dev/null +++ b/scripts/mock_tools/copyfiles_bright.py @@ -0,0 +1,5 @@ +import os +for i in range(7,25): + print(i) + os.system('cp -R /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl%d/initled/main /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl%d/Univ000/.' %(i,i)) + diff --git a/scripts/mock_tools/getpota_Y1_bright_script.sh b/scripts/mock_tools/getpota_Y1_bright_script.sh new file mode 100755 index 000000000..04d8d0f60 --- /dev/null +++ b/scripts/mock_tools/getpota_Y1_bright_script.sh @@ -0,0 +1,21 @@ +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 4 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 5 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 6 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 7 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 8 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 9 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 10 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 11 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 12 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 13 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 14 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 15 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 16 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 17 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 18 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 19 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 20 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 21 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 22 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 23 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 24 --prog BRIGHT --mock_version BGS diff --git a/scripts/mock_tools/initAMTL.py b/scripts/mock_tools/initAMTL.py new file mode 100644 index 000000000..f8e930dc1 --- /dev/null +++ b/scripts/mock_tools/initAMTL.py @@ -0,0 +1,6 @@ +import os + +path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS' +for i in range(6,25): +# os.system('cp -R %s/altmtl%d/initled/main %s/altmtl%d/Univ000/.' %(path, i, path, i)) + os.system('cp %s/altmtl5/Univ000/*.ecsv %s/altmtl%d/Univ000/.' %(path, path, i)) diff --git a/scripts/mock_tools/mkCat_SecondGen_amtl.py b/scripts/mock_tools/mkCat_SecondGen_amtl.py index 9022603d9..1bcc49329 100644 --- a/scripts/mock_tools/mkCat_SecondGen_amtl.py +++ b/scripts/mock_tools/mkCat_SecondGen_amtl.py @@ -214,7 +214,8 @@ def test_dir(value): cols = ['TARGETID','RA','DEC','PRIORITY_INIT','DESI_TARGET'] if pdir == 'bright': - cols.append('BGS_TARGET', 'R_MAG_ABS') + cols.append('BGS_TARGET') + cols.append('R_MAG_ABS') pa = common.combtiles_wdup_altmtl('FAVAIL', tiles, fbadir, os.path.join(outdir, 'datcomb_' + pdir + 'wdup.fits'), tarf, addcols=cols) fcoll = os.path.join(lssdir, 'collision_'+pdir+'_mock%d.fits' % mocknum) @@ -455,9 +456,9 @@ def _parfun2(rann): #ct.mkclusdat(os.path.join(dirout,args.tracer+notqso),tp=args.tracer,dchi2=None,tsnrcut=0,zmin=zmin,zmax=zmax)#,ntilecut=ntile) if args.ccut is not None: - targets = Table(fitsio.read(os.path.join(args.targDir, 'forFA{MOCKNUM}.fits').format(MOCKNUM=mocknum).replace('global','dvs_ro'), columns=['TARGETID', 'R_MAG_ABS'])) ffile = Table.read(os.path.join(readdir, args.tracer + notqso + '_full'+args.use_map_veto + '.dat.fits').replace('global','dvs_ro')) if 'R_MAG_ABS' not in ffile.columns: + targets = Table(fitsio.read(os.path.join(args.targDir, 'forFA{MOCKNUM}.fits').format(MOCKNUM=mocknum).replace('global','dvs_ro'), columns=['TARGETID', 'R_MAG_ABS'])) nm = Table(join(ffile, targets, keys=['TARGETID'])) #print(nm) common.write_LSS(nm, os.path.join(readdir, args.tracer + notqso + '_full'+args.use_map_veto + '.dat.fits')) diff --git a/scripts/mock_tools/prepare_mocks_Y1_bright.py b/scripts/mock_tools/prepare_mocks_Y1_bright.py index 0b7e2c4fc..a435d16b1 100644 --- a/scripts/mock_tools/prepare_mocks_Y1_bright.py +++ b/scripts/mock_tools/prepare_mocks_Y1_bright.py @@ -129,7 +129,7 @@ def mask_secondgen(nz=0, foot=None, nz_lop=0): mockdir = os.path.join(args.base_output, 'SecondGenMocks', 'AbacusSummit') elif args.prog == 'bright': - mockdir = os.path.join(args.base_output, 'SecondGenMocks', 'AbacusSummitBGS') + mockdir = os.path.join(args.base_output, 'SecondGenMocks', 'AbacusSummitBGS_v2') out_file_name = os.path.join(mockdir, 'forFA{real}.fits'.format(real=real)) @@ -172,7 +172,7 @@ def mask_secondgen(nz=0, foot=None, nz_lop=0): thepath = os.path.join(mockpath, type_, 'v0.1', zs[type_], file_name.format(TYPE = type_, Z = zs[type_], PH = "%03d" % real)) print('thepath') print(thepath) - data = Table(fitsio.read(thepath, columns=['RA', 'DEC', 'Z', 'Z_COSMO', 'R_MAG_APP', 'R_MAG_ABS', 'IN_Y1'])) + data = Table(fitsio.read(thepath, columns=['RA', 'DEC', 'Z', 'Z_COSMO', 'R_MAG_APP', 'R_MAG_ABS', 'IN_Y1', 'G_R_OBS', 'G_R_REST'])) print("Length before rbandcut") print(len(data)) diff --git a/scripts/mock_tools/prepare_script_bright.sh b/scripts/mock_tools/prepare_script_bright.sh index 02a8d22d3..6b17d3cc0 100755 --- a/scripts/mock_tools/prepare_script_bright.sh +++ b/scripts/mock_tools/prepare_script_bright.sh @@ -1,9 +1,7 @@ -#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 1 --realmax 4 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 4 --realmax 7 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 0 --realmax 4 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 11 --realmax 14 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 4 --realmax 7 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 11 --realmax 14 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 7 --realmax 11 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 14 --realmax 17 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 17 --realmax 21 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 21 --realmax 23 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -srun -N 1 -C cpu -t 02:00:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 24 --realmax 25 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 14 --realmax 17 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 17 --realmax 21 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 21 --realmax 25 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 diff --git a/scripts/mock_tools/run1_AMTLmock_LSS_BGS.sh b/scripts/mock_tools/run1_AMTLmock_LSS_BGS.sh new file mode 100755 index 000000000..ed71bed77 --- /dev/null +++ b/scripts/mock_tools/run1_AMTLmock_LSS_BGS.sh @@ -0,0 +1,7 @@ +#!/bin/bash +OUTBASE='/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM}' + +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer bright --combd y --survey Y1 --add_gtl y --specdata iron --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS --joindspec y +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer BGS_ANY --survey Y1 --add_gtl y --specdata iron --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS --fulld y --fullr y --minr 0 --maxr 18 --use_map_veto '_HPmapcut' --apply_veto y --mkclusran y --nz y --mkclusdat y --splitGC y --outmd 'notscratch' +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer BGS_BRIGHT --survey Y1 --add_gtl y --specdata iron --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS --fulld y --fullr y --minr 0 --maxr 18 --use_map_veto '_HPmapcut' --apply_veto y --mkclusran y --nz y --mkclusdat y --splitGC y --outmd 'notscratch' +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer BGS_BRIGHT --survey Y1 --add_gtl y --specdata iron --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS --minr 0 --maxr 18 --use_map_veto '_HPmapcut' --mkclusran y --nz y --mkclusdat y --splitGC y --outmd 'notscratch' --ccut -21.55 diff --git a/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh index 259a7f511..6548b300e 100755 --- a/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh +++ b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh @@ -1,5 +1,5 @@ SeconGenVer=AbacusSummitBGS #AbacusSummit -for j in {0..4} +for j in {5..24} do #j=0 echo $j From e5de319b5223321b3aca538442335bda4df997ed Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 6 Feb 2024 16:39:12 -0500 Subject: [PATCH 114/297] Update validation_improp_full.py --- scripts/validation/validation_improp_full.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/scripts/validation/validation_improp_full.py b/scripts/validation/validation_improp_full.py index cef0c0b19..11fcb5cc9 100644 --- a/scripts/validation/validation_improp_full.py +++ b/scripts/validation/validation_improp_full.py @@ -38,10 +38,11 @@ indir = args.basedir+args.survey+'/'+args.data+'/'+args.verspec+'/LSScats/'+args.version+'/' outdir = indir+'plots/imaging/' outdir = outdir.replace('dvs_ro','global') +outdir_txt = outdir+'ngalvsysfiles/' print('writing to '+outdir) -if not os.path.exists(outdir): - os.makedirs(outdir) +if not os.path.exists(outdir_txt): + os.makedirs(outdir_txt) zcol = 'Z_not4clus' @@ -210,6 +211,12 @@ def plot_reldens(parv,pixlg,pixlgw,pixlr,titl='',cl='k',xlab='',yl = (0.8,1.1),d plt.grid() plt.ylim(yl[0],yl[1]) print(xlab,'weighted: '+str(chi2),'unweighted: '+str(chi2nw)) + fname = outdir_txt + 'ngalvs_'+xlab+titl.replace(' ','') + fo = open(fname,'w') + for i in range(0,len(bc)): + fo.write(str(bc[i])+' '+str(svw[i])+' '+str(sv[i])+' '+str(ep[i])+'\n') + fo.close() + print('wrote to '+fname) return chi2,chi2nw From 5bc66e8178d00172159be5597c16554d394f4d38 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 6 Feb 2024 16:55:00 -0500 Subject: [PATCH 115/297] Update validation_improp_full.py --- scripts/validation/validation_improp_full.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/validation/validation_improp_full.py b/scripts/validation/validation_improp_full.py index 11fcb5cc9..530edf448 100644 --- a/scripts/validation/validation_improp_full.py +++ b/scripts/validation/validation_improp_full.py @@ -212,6 +212,8 @@ def plot_reldens(parv,pixlg,pixlgw,pixlr,titl='',cl='k',xlab='',yl = (0.8,1.1),d plt.ylim(yl[0],yl[1]) print(xlab,'weighted: '+str(chi2),'unweighted: '+str(chi2nw)) fname = outdir_txt + 'ngalvs_'+xlab+titl.replace(' ','') + fname = fname.replace(' - ','') + fname = fname.replace('<','') fo = open(fname,'w') for i in range(0,len(bc)): fo.write(str(bc[i])+' '+str(svw[i])+' '+str(sv[i])+' '+str(ep[i])+'\n') From 654c1e81bed79b44c3acde3c39f433b22960f286 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 6 Feb 2024 17:04:35 -0500 Subject: [PATCH 116/297] Update validation_improp_full.py --- scripts/validation/validation_improp_full.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/validation/validation_improp_full.py b/scripts/validation/validation_improp_full.py index 530edf448..793b217c7 100644 --- a/scripts/validation/validation_improp_full.py +++ b/scripts/validation/validation_improp_full.py @@ -211,7 +211,7 @@ def plot_reldens(parv,pixlg,pixlgw,pixlr,titl='',cl='k',xlab='',yl = (0.8,1.1),d plt.grid() plt.ylim(yl[0],yl[1]) print(xlab,'weighted: '+str(chi2),'unweighted: '+str(chi2nw)) - fname = outdir_txt + 'ngalvs_'+xlab+titl.replace(' ','') + fname = outdir_txt + 'ngalvs_'+xlab+titl.replace(' ','')+'.txt' fname = fname.replace(' - ','') fname = fname.replace('<','') fo = open(fname,'w') From b0c68a91ea2d440f162262c61a1d41d93297df7f Mon Sep 17 00:00:00 2001 From: Jiaxi-Yu Date: Wed, 7 Feb 2024 00:19:17 -0800 Subject: [PATCH 117/297] SSR 2D plot without ZFAIL --- scripts/validation/validation_ssr_plot.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/scripts/validation/validation_ssr_plot.py b/scripts/validation/validation_ssr_plot.py index b1fcb2173..2b5ad3591 100644 --- a/scripts/validation/validation_ssr_plot.py +++ b/scripts/validation/validation_ssr_plot.py @@ -23,6 +23,7 @@ parser.add_argument("--zmin", help="minimum redshift",default=-0.1) parser.add_argument("--zmax", help="maximum redshift",default=1.5) parser.add_argument("--focalplane_SSR_LSS", help="add WEIGHT_focal to the full data or not",action='store_true',default=False) +parser.add_argument("--fulltype", help="use full_HPmapcut data or not (full data instead)",default='',choices=['','_HPmapcut']) args = parser.parse_args() @@ -55,7 +56,7 @@ def apply_imaging_veto(ff,reccircmasks,ebits): else: indir = args.basedir+'/'+args.survey+'/'+args.data+'/'+args.verspec+'/LSScats/'+args.version+'/' ############ - outdir = indir+'plots/ssr/' + outdir = indir+'plots/ssr'+args.fulltype+'/' ############ # create the susscessful rate vs observation figure @@ -99,7 +100,7 @@ def SSR_chi2(goodz, allz, err): # list all tracers tps = [args.tracers] if args.tracers == 'all': - tps = ['ELG_LOPnotqso']#['BGS_BRIGHT','ELG_LOPnotqso','QSO','LRG'] + tps = ['BGS_BRIGHT','ELG_LOPnotqso','QSO','LRG'] if args.survey == 'SV3' and args.tracers == 'all': tps = ['QSO','LRG','BGS_ANY','BGS_BRIGHT','ELG','ELG_HIP','ELG_HIPnotqso','ELGnotqso'] @@ -112,20 +113,25 @@ def SSR_chi2(goodz, allz, err): if tp[:3] == 'QSO': zmin = float(args.zmin) zmax = float(args.zmax) + dv = 0.08 elif tp[:3] == 'ELG': zmin = 0.01 zmax = 1.8 flux = 'G' + dv = 0.05 elif tp[:3] == 'LRG': zmin = float(args.zmin) zmax = float(args.zmax) + dv = 0.02 elif tp[:3] == 'BGS': zmin = 0.01 zmax = 0.5 flux = 'Z' + dv = 0.02 # read the full catalogue if args.data == 'LSS': - full = Table(fitsio.read(indir+tp+'_full.dat.fits')) + + full = Table(fitsio.read(indir+tp+'_full'+args.fulltype+'.dat.fits')) elif args.data == 'mock': full = Table(fitsio.read(indir+'ffa_full_' + tp+'.fits')) # add new deducted observing conditions @@ -353,12 +359,10 @@ def SSR_chi2(goodz, allz, err): ssrmean = np.sum(GOOD)/np.sum(ALL) err[err==0]= 1 chi2s = (ssrmodel-ssrmean)/err - plt.title('{} chi2 = {:.1f}/{}'.format(ptypetl,np.sum(chi2s[np.isfinite(chi2s)]**2),np.sum(np.isfinite(ssrmodel))),fontsize=10) if ptype != 'chi2hist': if (ptype == 'SSR')|(ptype == 'noZFAIL'): # fibrewise SSR - dv = 0.05 vmin = 1-dv vmax = 1+dv value = ssrmodel/ssrmean @@ -396,6 +400,7 @@ def SSR_chi2(goodz, allz, err): if cp ==0: plt.ylabel('normalised counts') plt.legend() + plt.title('{} chi2 = {:.1f}/{}'.format(ptypetl,np.sum(chi2s[np.isfinite(chi2s)]**2),np.sum(np.isfinite(chi2s))),fontsize=10) if ptype == 'SSR': plt.savefig(outdir+'{}_focalplane_success_rate_z{}z{}_{}.png'.format(tp,zmin,zmax,args.version)) From 9e5734b227ee785247d38611f86e3e250e1c88ca Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 7 Feb 2024 15:07:37 -0500 Subject: [PATCH 118/297] Update ez_cat_sbatch_BGS.sh --- scripts/mock_tools/ez_cat_sbatch_BGS.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mock_tools/ez_cat_sbatch_BGS.sh b/scripts/mock_tools/ez_cat_sbatch_BGS.sh index 62be5debd..305b0e1c7 100755 --- a/scripts/mock_tools/ez_cat_sbatch_BGS.sh +++ b/scripts/mock_tools/ez_cat_sbatch_BGS.sh @@ -3,7 +3,7 @@ #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu -#SBATCH --array=2-200 +#SBATCH --array=201-320 #SBATCH --account=desi source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main From 29114d80338e1d9fa949602df0c05efa0eed7337 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 8 Feb 2024 15:14:14 -0500 Subject: [PATCH 119/297] Update cattools.py --- py/LSS/main/cattools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index fe8b92fe3..7a60ce42f 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -3749,7 +3749,7 @@ def add_tlobs_ran_array(ranf,tlf): return ranf -def mkclusran(flin,fl,rann,rcols=['Z','WEIGHT'],zmask=False,tsnrcut=80,tsnrcol='TSNR2_ELG',utlid=False,ebits=None,write_cat='y',nosplit='y',return_cat='n',compmd='ran',clus_arrays=None,use_map_veto='',add_tlobs='n'): +def mkclusran(flin,fl,rann,rcols=['Z','WEIGHT'],zmask=False,tsnrcut=80,tsnrcol='TSNR2_ELG',utlid=False,ebits=None,write_cat='y',nosplit='y',return_cat='n',compmd='ran',clus_arrays=None,use_map_veto='',add_tlobs='y'): import LSS.common_tools as common #first find tilelocids where fiber was wanted, but none was assigned; should take care of all priority issues wzm = '' From 2b5da86c804eed2b26a4bd0d83a689900ac09acb Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 8 Feb 2024 18:29:01 -0500 Subject: [PATCH 120/297] Update LSSpipe_Y1.txt --- Sandbox/LSSpipe_Y1.txt | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/Sandbox/LSSpipe_Y1.txt b/Sandbox/LSSpipe_Y1.txt index d781f3bf0..02cf1c19d 100644 --- a/Sandbox/LSSpipe_Y1.txt +++ b/Sandbox/LSSpipe_Y1.txt @@ -280,4 +280,18 @@ srun -N 1 -n 128 -C cpu -t 04:00:00 --qos interactive --account desi python scri #from an interactive node (runs out of memory otherwise) python scripts/main/patch_HPmapcut.py --version v1.1 +==== Steps to make v1.2 (bug with FRACZ_TLOBS_TILES was not actually fixed) +mkdir /global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/LSScats/v1.2/ +cp /global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/LSScats/v1.1/*full* /global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/LSScats/v1.2/ + +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type QSO --fulld n --survey Y1 --verspec iron --version v1.2 --clusd y --clusran y --splitGC y --nz y --par y --imsys_colname WEIGHT_RF --basedir /global/cfs/cdirs/desi/survey/catalogs/ + +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type ELG_LOP --notqso y --fulld n --survey Y1 --verspec iron --version v1.2 --clusd y --clusran y --splitGC y --nz y --par y --imsys_colname WEIGHT_SN --basedir /global/cfs/cdirs/desi/survey/catalogs/ + +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type LRG --fulld n --survey Y1 --verspec iron --version v1.2 --clusd y --clusran y --splitGC y --nz y --par y --imsys_colname WEIGHT_IMLIN --basedir /global/cfs/cdirs/desi/survey/catalogs/ + +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type BGS_BRIGHT-21.5 --fulld n --survey Y1 --verspec iron --version v1.2 --clusd y --clusran y --splitGC y --nz y --par y --imsys_colname WEIGHT_IMLIN --basedir /global/cfs/cdirs/desi/survey/catalogs/ + + + From fc245a07163cf4adc0cbae0b067dfb25abc71b8b Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 8 Feb 2024 19:58:33 -0500 Subject: [PATCH 121/297] Update LSSpipe_Y1.txt --- Sandbox/LSSpipe_Y1.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Sandbox/LSSpipe_Y1.txt b/Sandbox/LSSpipe_Y1.txt index 02cf1c19d..6376bc122 100644 --- a/Sandbox/LSSpipe_Y1.txt +++ b/Sandbox/LSSpipe_Y1.txt @@ -292,6 +292,10 @@ srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/mai srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type BGS_BRIGHT-21.5 --fulld n --survey Y1 --verspec iron --version v1.2 --clusd y --clusran y --splitGC y --nz y --par y --imsys_colname WEIGHT_IMLIN --basedir /global/cfs/cdirs/desi/survey/catalogs/ +#run blinding script on each tracer +srun -N 1 -n 128 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/apply_blinding_main_fromfile_fcomp.py --type QSO --wsyscol WEIGHT_RF --version v1.2 --baoblind y --mkclusdat y --mkclusran y --maxr 18 --dorecon y --rsdblind y --fnlblind y --getFKP y --resamp y --mv_out2cfs y + +srun -N 1 -n 128 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/apply_blinding_main_fromfile_fcomp.py --type ELG_LOPnotqso --wsyscol WEIGHT_SN --version v1.2 --baoblind y --mkclusdat y --mkclusran y --maxr 18 --dorecon y --rsdblind y --fnlblind y --getFKP y --resamp y --mv_out2cfs y From d046a3316872c8d53835c70cd27c2d6248445258 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 9 Feb 2024 15:54:04 -0500 Subject: [PATCH 122/297] Update LSSpipe_Y1.txt --- Sandbox/LSSpipe_Y1.txt | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Sandbox/LSSpipe_Y1.txt b/Sandbox/LSSpipe_Y1.txt index 6376bc122..b25768c64 100644 --- a/Sandbox/LSSpipe_Y1.txt +++ b/Sandbox/LSSpipe_Y1.txt @@ -298,4 +298,14 @@ srun -N 1 -n 128 -C cpu -t 04:00:00 --qos interactive --account desi python scri srun -N 1 -n 128 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/apply_blinding_main_fromfile_fcomp.py --type ELG_LOPnotqso --wsyscol WEIGHT_SN --version v1.2 --baoblind y --mkclusdat y --mkclusran y --maxr 18 --dorecon y --rsdblind y --fnlblind y --getFKP y --resamp y --mv_out2cfs y +srun -N 1 -n 128 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/apply_blinding_main_fromfile_fcomp.py --type LRG --wsyscol WEIGHT_IMLIN --version v1.2 --baoblind y --mkclusdat y --mkclusran y --maxr 18 --dorecon y --rsdblind y --fnlblind y --getFKP y --resamp y --mv_out2cfs y + +srun -N 1 -n 128 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/apply_blinding_main_fromfile_fcomp.py --type BGS_BRIGHT-21.5 --wsyscol WEIGHT_IMLIN --version v1.2 --baoblind y --mkclusdat y --mkclusran y --maxr 18 --dorecon y --rsdblind y --fnlblind y --getFKP y --resamp y --mv_out2cfs y + +#run BGS_ANY and BGS_BRIGHT + +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type BGS_BRIGHT --fulld n --survey Y1 --verspec iron --version v1.2 --clusd y --clusran y --splitGC y --nz y --par y --basedir /global/cfs/cdirs/desi/survey/catalogs/ + +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type BGS_ANY --fulld n --survey Y1 --verspec iron --version v1.2 --clusd y --clusran y --splitGC y --nz y --par y --basedir /global/cfs/cdirs/desi/survey/catalogs/ + From a1821fa928536810cdb849602cd08492a1713ebe Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Sat, 10 Feb 2024 10:42:53 -0500 Subject: [PATCH 123/297] Update ez_cat_sbatch_BGS.sh --- scripts/mock_tools/ez_cat_sbatch_BGS.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mock_tools/ez_cat_sbatch_BGS.sh b/scripts/mock_tools/ez_cat_sbatch_BGS.sh index 305b0e1c7..20c00655f 100755 --- a/scripts/mock_tools/ez_cat_sbatch_BGS.sh +++ b/scripts/mock_tools/ez_cat_sbatch_BGS.sh @@ -3,7 +3,7 @@ #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu -#SBATCH --array=201-320 +#SBATCH --array=321-550 #SBATCH --account=desi source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main From 2160a65ad75b415cf83511c9dddd6be490c81519 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Sat, 10 Feb 2024 10:52:28 -0500 Subject: [PATCH 124/297] Create process2genabversion_pota2clus.sh --- .../mock_tools/process2genabversion_pota2clus.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100755 scripts/mock_tools/process2genabversion_pota2clus.sh diff --git a/scripts/mock_tools/process2genabversion_pota2clus.sh b/scripts/mock_tools/process2genabversion_pota2clus.sh new file mode 100755 index 000000000..707f15588 --- /dev/null +++ b/scripts/mock_tools/process2genabversion_pota2clus.sh @@ -0,0 +1,13 @@ +#!/bin/bash +source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main +PYTHONPATH=$PYTHONPATH:$HOME/LSS/py + +for (( i=$1;i<=$2;i++ )) +do + srun -N 1 -C cpu -t 00:45:00 --qos interactive --account desi python scripts/mock_tools/pota2clus_fast.py --realization $i --mockver AbacusSummit_$3 + mv $SCRATCH/AbacusSummit_$3/mock$i/*GC* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_$3/mock$i/ + mv $SCRATCH/AbacusSummit_$3/mock$i/*nz* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_$3/mock$i/ + rm $SCRATCH/AbacusSummit_$3/mock$i/* + chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_$3/mock$i/*clustering* + chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_$3/mock$i/*nz* +done \ No newline at end of file From 35518db6277a477cf74f6de2e36bfe14b49dcbe2 Mon Sep 17 00:00:00 2001 From: Michael Rashkovetskyi Date: Sat, 10 Feb 2024 14:48:11 -0500 Subject: [PATCH 125/297] Allow the post-recon filenames from desipipe lacking the {rec_type}. part --- py/LSS/cosmodesi_io_tools.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index ceaf64999..cb1651fe4 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -128,10 +128,10 @@ def catalog_fn(tracer='ELG', region='', ctype='clustering', name='data', ran_sw= if name == 'randoms' and tracer == 'LRG_main' and ctype == 'full': tracer = 'LRG' if region: region = '_' + region + #recon_dir = kwargs['recon_dir'] + if recon_dir != 'n': + tracer = recon_dir+'/'+tracer if rec_type: - #recon_dir = kwargs['recon_dir'] - if recon_dir != 'n': - tracer = recon_dir+'/'+tracer dat_or_ran = '{}.{}'.format(rec_type, dat_or_ran) if name == 'data': return os.path.join(cat_dir, '{}{}_{}.{}.fits'.format(tracer, region, ctype, dat_or_ran)) From 465f2051bb17b95d3160f42a802d209e3f54e0f1 Mon Sep 17 00:00:00 2001 From: Michael Rashkovetskyi Date: Sat, 10 Feb 2024 15:06:49 -0500 Subject: [PATCH 126/297] Fixed the shifted randoms flag --- scripts/xirunpc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/xirunpc.py b/scripts/xirunpc.py index a1a71db8a..42b696803 100644 --- a/scripts/xirunpc.py +++ b/scripts/xirunpc.py @@ -422,7 +422,7 @@ def compute_correlation_function(corr_type, edges, distance, nthreads=8, gpu=Fal catalog_kwargs = kwargs.copy() catalog_kwargs['weight_type'] = weight_type #catalog_kwargs['recon_dir'] = recon_dir - with_shifted = rec_type is not None + with_shifted = rec_type is not None or recon_dir != "n" if 'angular' in weight_type and wang is None: wang = compute_angular_weights(nthreads=nthreads, gpu=gpu, dtype=dtype, weight_type=weight_type, tracer=tracer, tracer2=tracer2, mpicomm=mpicomm, mpiroot=mpiroot, **kwargs) From a2c9144d374d794567f136a3d7486a16859ca536 Mon Sep 17 00:00:00 2001 From: Michael Rashkovetskyi Date: Sat, 10 Feb 2024 15:11:00 -0500 Subject: [PATCH 127/297] Updated help on rec_type --- scripts/xirunpc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/xirunpc.py b/scripts/xirunpc.py index 42b696803..a0a8e9073 100644 --- a/scripts/xirunpc.py +++ b/scripts/xirunpc.py @@ -597,7 +597,7 @@ def corr_fn(file_type='npy', region='', tracer='ELG', tracer2=None, zmin=0, zmax parser.add_argument('--use_arrays', help = 'use pre-stored arrays rather than reading from memory again', default = 'n') parser.add_argument('--write_arrays', help = 'save the pre-stored arrays', default = 'n') #only relevant for reconstruction - parser.add_argument('--rec_type', help='reconstruction algorithm + reconstruction convention', choices=['IFTPrecsym', 'IFTPreciso','IFTrecsym', 'IFTreciso', 'MGrecsym', 'MGreciso'], type=str, default=None) + parser.add_argument('--rec_type', help='reconstruction algorithm + reconstruction convention, but only if included in the catalog filename between dots, otherwise leave blank', choices=['IFTPrecsym', 'IFTPreciso','IFTrecsym', 'IFTreciso', 'MGrecsym', 'MGreciso'], type=str, default=None) parser.add_argument('--recon_dir', help='if recon catalogs are in a subdirectory, put that here', type=str, default='n') parser.add_argument('--rpcut', help='apply this rp-cut', type=float, default=None) From c6ac0ce0c2156112ebdf33952aae50327ed63a41 Mon Sep 17 00:00:00 2001 From: Jiaxi-Yu Date: Wed, 14 Feb 2024 23:55:31 -0800 Subject: [PATCH 128/297] redshift success rate study should use full_HPmapcut (cprrected on 10 Feb 2024) --- scripts/validation/validation_ssr_plot.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/scripts/validation/validation_ssr_plot.py b/scripts/validation/validation_ssr_plot.py index 2b5ad3591..2b7b8a83a 100644 --- a/scripts/validation/validation_ssr_plot.py +++ b/scripts/validation/validation_ssr_plot.py @@ -23,7 +23,7 @@ parser.add_argument("--zmin", help="minimum redshift",default=-0.1) parser.add_argument("--zmax", help="maximum redshift",default=1.5) parser.add_argument("--focalplane_SSR_LSS", help="add WEIGHT_focal to the full data or not",action='store_true',default=False) -parser.add_argument("--fulltype", help="use full_HPmapcut data or not (full data instead)",default='',choices=['','_HPmapcut']) +parser.add_argument("--fullonly", help="use full data instead of full_HPmapcut",action='store_true',default=False) args = parser.parse_args() @@ -130,8 +130,10 @@ def SSR_chi2(goodz, allz, err): dv = 0.02 # read the full catalogue if args.data == 'LSS': - - full = Table(fitsio.read(indir+tp+'_full'+args.fulltype+'.dat.fits')) + if args.fullonly: + full = Table(fitsio.read(indir+tp+'_full.dat.fits')) + else: + full = Table(fitsio.read(indir+tp+'_full_HPmapcut.dat.fits')) elif args.data == 'mock': full = Table(fitsio.read(indir+'ffa_full_' + tp+'.fits')) # add new deducted observing conditions @@ -414,7 +416,10 @@ def SSR_chi2(goodz, allz, err): #print('FIB',list(FIB)) #print('FIBER',full['FIBER']) if args.data == 'LSS': - full = Table(fitsio.read(indir+tp+'_full_HPmapcut.dat.fits')) + if args.fullonly: + full = Table(fitsio.read(indir+tp+'_full.dat.fits')) + else: + full = Table(fitsio.read(indir+tp+'_full_HPmapcut.dat.fits')) elif args.data == 'mock': full = Table(fitsio.read(indir+'ffa_full_' + tp+'.fits')) From d8f3f2a871116315f0da30f3d50addbe7ffd66be Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 15 Feb 2024 14:12:39 -0500 Subject: [PATCH 129/297] Update ez_cat_sbatch_BGS.sh --- scripts/mock_tools/ez_cat_sbatch_BGS.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mock_tools/ez_cat_sbatch_BGS.sh b/scripts/mock_tools/ez_cat_sbatch_BGS.sh index 20c00655f..b9efddf53 100755 --- a/scripts/mock_tools/ez_cat_sbatch_BGS.sh +++ b/scripts/mock_tools/ez_cat_sbatch_BGS.sh @@ -3,7 +3,7 @@ #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu -#SBATCH --array=321-550 +#SBATCH --array=511-820 #SBATCH --account=desi source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main From 3172a43b7cf9c716b0bd1bc7e02192256dba0c68 Mon Sep 17 00:00:00 2001 From: Jiaxi-Yu Date: Sat, 17 Feb 2024 02:00:09 -0800 Subject: [PATCH 130/297] only change 'TARGETID' to 'TARGETID_DATA' once when generating randoms sequentially --- scripts/mock_tools/ffa2clus_fast.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/mock_tools/ffa2clus_fast.py b/scripts/mock_tools/ffa2clus_fast.py index a056a4c8c..35fa81857 100644 --- a/scripts/mock_tools/ffa2clus_fast.py +++ b/scripts/mock_tools/ffa2clus_fast.py @@ -118,7 +118,8 @@ def splitGC(flroot,datran='.dat',rann=0): def ran_col_assign(randoms,data,sample_columns,tracer): - data.rename_column('TARGETID', 'TARGETID_DATA') + if (not 'TARGETID_DATA' in data.colnames)&('TARGETID' in data.colnames): + data.rename_column('TARGETID', 'TARGETID_DATA') def _resamp(selregr,selregd): for col in sample_columns: randoms[col] = np.zeros(len(randoms)) From 095f74f556c959afb9543362c67d9991b3a490bb Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 19 Feb 2024 08:58:02 -0500 Subject: [PATCH 131/297] Update ez_cat_sbatch_BGS.sh --- scripts/mock_tools/ez_cat_sbatch_BGS.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mock_tools/ez_cat_sbatch_BGS.sh b/scripts/mock_tools/ez_cat_sbatch_BGS.sh index b9efddf53..445c026dc 100755 --- a/scripts/mock_tools/ez_cat_sbatch_BGS.sh +++ b/scripts/mock_tools/ez_cat_sbatch_BGS.sh @@ -3,7 +3,7 @@ #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu -#SBATCH --array=511-820 +#SBATCH --array=511-900 #SBATCH --account=desi source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main From 0b637e1254746bd7da8aff70d13acefe313ca2a2 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 19 Feb 2024 14:31:33 -0500 Subject: [PATCH 132/297] Update ssr_tools_new.py --- py/LSS/ssr_tools_new.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/py/LSS/ssr_tools_new.py b/py/LSS/ssr_tools_new.py index 523b802a1..df1ddec8e 100644 --- a/py/LSS/ssr_tools_new.py +++ b/py/LSS/ssr_tools_new.py @@ -172,6 +172,14 @@ def __init__(self,input_data,tsnr_min=80,tsnr_max=200,tracer='ELG',reg=None,outd self.maxz = maxz self.res_mod_slp = self.get_slpfunc() + fo = open(self.outdir+outfn_root+'slp_wzfac.txt','w') + fo.write('self.res_mod_slp') + #fo.write('#a b c chi2\n') + #for par in pars: + # fo.write(str(par)+' ') + #fo.write(str(chi2)+'\n') + fo.close() + self.ssrtot = len(self.cat[self.selgz])/len(self.cat) self.tracer = tracer From 209c78a505d92346bb309ddcaf331fcdd7e15cd5 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 19 Feb 2024 14:42:51 -0500 Subject: [PATCH 133/297] Update ssr_tools_new.py --- py/LSS/ssr_tools_new.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/py/LSS/ssr_tools_new.py b/py/LSS/ssr_tools_new.py index df1ddec8e..7278af910 100644 --- a/py/LSS/ssr_tools_new.py +++ b/py/LSS/ssr_tools_new.py @@ -172,13 +172,13 @@ def __init__(self,input_data,tsnr_min=80,tsnr_max=200,tracer='ELG',reg=None,outd self.maxz = maxz self.res_mod_slp = self.get_slpfunc() - fo = open(self.outdir+outfn_root+'slp_wzfac.txt','w') - fo.write('self.res_mod_slp') - #fo.write('#a b c chi2\n') - #for par in pars: - # fo.write(str(par)+' ') - #fo.write(str(chi2)+'\n') - fo.close() + fo = open(self.outdir+outfn_root+'slp_wzfac.txt','w') + fo.write('self.res_mod_slp') + #fo.write('#a b c chi2\n') + #for par in pars: + # fo.write(str(par)+' ') + #fo.write(str(chi2)+'\n') + fo.close() self.ssrtot = len(self.cat[self.selgz])/len(self.cat) self.tracer = tracer From 94706f9998ef8acf2bb2f048c7e22aa8c8dfc608 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 19 Feb 2024 14:58:50 -0500 Subject: [PATCH 134/297] Update ssr_tools_new.py --- py/LSS/ssr_tools_new.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/py/LSS/ssr_tools_new.py b/py/LSS/ssr_tools_new.py index 7278af910..a1048a6b9 100644 --- a/py/LSS/ssr_tools_new.py +++ b/py/LSS/ssr_tools_new.py @@ -172,8 +172,8 @@ def __init__(self,input_data,tsnr_min=80,tsnr_max=200,tracer='ELG',reg=None,outd self.maxz = maxz self.res_mod_slp = self.get_slpfunc() - fo = open(self.outdir+outfn_root+'slp_wzfac.txt','w') - fo.write('self.res_mod_slp') + fo = open(outdir+outfn_root+'slp_wzfac.txt','w') + fo.write(str(self.res_mod_slp)) #fo.write('#a b c chi2\n') #for par in pars: # fo.write(str(par)+' ') From c9858496e1d0b9236db68d983448619f31369bbe Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 19 Feb 2024 15:19:03 -0500 Subject: [PATCH 135/297] Update ssr_tools_new.py --- py/LSS/ssr_tools_new.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/py/LSS/ssr_tools_new.py b/py/LSS/ssr_tools_new.py index a1048a6b9..f6410a3fd 100644 --- a/py/LSS/ssr_tools_new.py +++ b/py/LSS/ssr_tools_new.py @@ -174,6 +174,11 @@ def __init__(self,input_data,tsnr_min=80,tsnr_max=200,tracer='ELG',reg=None,outd self.res_mod_slp = self.get_slpfunc() fo = open(outdir+outfn_root+'slp_wzfac.txt','w') fo.write(str(self.res_mod_slp)) + fo = open(outdir+outfn_root+'slp_vszfac.txt','w') + for i in range(0,len(self.slpl)): + fo.write(str(self.zfacl[i])+' '+str(self.slpl[i])+' ') + fo.close() + #fo.write('#a b c chi2\n') #for par in pars: # fo.write(str(par)+' ') @@ -210,6 +215,8 @@ def get_slpfunc(self,pstep=5): res = normed_linfit(self.cat,seltot,sel) slpl.append(res[0]) zfacl.append(np.median(self.relzfac[sel])) + self.slpl = slpl + self.zfacl = zfacl res = np.polyfit(zfacl,slpl,1) return res From 43cb6c825f594912165bab62aa2524806a6f2de5 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 19 Feb 2024 15:20:57 -0500 Subject: [PATCH 136/297] Update ssr_tools_new.py --- py/LSS/ssr_tools_new.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py/LSS/ssr_tools_new.py b/py/LSS/ssr_tools_new.py index f6410a3fd..287b95e7a 100644 --- a/py/LSS/ssr_tools_new.py +++ b/py/LSS/ssr_tools_new.py @@ -176,7 +176,7 @@ def __init__(self,input_data,tsnr_min=80,tsnr_max=200,tracer='ELG',reg=None,outd fo.write(str(self.res_mod_slp)) fo = open(outdir+outfn_root+'slp_vszfac.txt','w') for i in range(0,len(self.slpl)): - fo.write(str(self.zfacl[i])+' '+str(self.slpl[i])+' ') + fo.write(str(self.zfacl[i])+' '+str(self.slpl[i])+'\n') fo.close() #fo.write('#a b c chi2\n') From aaa5d255f789ad08bac96707ab29adc21897999e Mon Sep 17 00:00:00 2001 From: AlbertoRosado1 Date: Tue, 20 Feb 2024 10:30:24 -0800 Subject: [PATCH 137/297] added removeIMSYS option --- py/LSS/cosmodesi_io_tools.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index cb1651fe4..23e9da225 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -185,7 +185,11 @@ def get_clustering_positions_weights(catalog, distance, zlim=(0., np.inf),maglim print('multiplying weights by WEIGHT_ZFAIL') if 'default' in weight_type and 'bitwise' not in weight_type: weights *= catalog['WEIGHT'][mask] - print('multiplying weights by WEIGHT') + print('multiplying weights by WEIGHT') + if 'removeIMSYS' in weight_type: + #assumes default already added the rest of the weights and that SYS was used as default weight + weights /= catalog['WEIGHT_SYS'][mask] + print('dividing weights by WEIGHT_SYS') #if 'RF' in weight_type: # weights *= catalog['WEIGHT_RF'][mask] # print('multiplying weights by WEIGHT_RF') From fe6543d63c367b91f570041cb22c98dda944681c Mon Sep 17 00:00:00 2001 From: Jiaxi-Yu Date: Wed, 21 Feb 2024 01:52:53 -0800 Subject: [PATCH 138/297] enable the theta cut for fibre collision correction in 2PCF calculation --- scripts/xirunpc.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/scripts/xirunpc.py b/scripts/xirunpc.py index a0a8e9073..29828bbd3 100644 --- a/scripts/xirunpc.py +++ b/scripts/xirunpc.py @@ -416,7 +416,7 @@ def compute_angular_weights(nthreads=8, gpu=False, dtype='f8', tracer='ELG', tra return wang -def compute_correlation_function(corr_type, edges, distance, nthreads=8, gpu=False, dtype='f8', wang=None, split_randoms_above=30., weight_type='default', tracer='ELG', tracer2=None, recon_dir=None, rec_type=None, njack=120, option=None, mpicomm=None, mpiroot=None, cat_read=None, dat_cat=None, ran_cat=None, rpcut=None, **kwargs): +def compute_correlation_function(corr_type, edges, distance, nthreads=8, gpu=False, dtype='f8', wang=None, split_randoms_above=30., weight_type='default', tracer='ELG', tracer2=None, recon_dir=None, rec_type=None, njack=120, option=None, mpicomm=None, mpiroot=None, cat_read=None, dat_cat=None, ran_cat=None, rpcut=None, thetacut=None, **kwargs): autocorr = tracer2 is None catalog_kwargs = kwargs.copy() @@ -474,6 +474,7 @@ def compute_correlation_function(corr_type, edges, distance, nthreads=8, gpu=Fal kwargs.update(wang or {}) selection_attrs = None if rpcut is not None: selection_attrs = {'rp': (rpcut, np.inf)} + if thetacut is not None: selection_attrs = {'theta': (thetacut, np.inf)} randoms_kwargs = dict(randoms_positions1=randoms_positions1, randoms_weights1=randoms_weights1, randoms_samples1=randoms_samples1, randoms_positions2=randoms_positions2, randoms_weights2=randoms_weights2, randoms_samples2=randoms_samples2, shifted_positions1=shifted_positions1, shifted_weights1=shifted_weights1, shifted_samples1=shifted_samples1, @@ -547,7 +548,7 @@ def get_edges(corr_type='smu', bin_type='lin'): return edges -def corr_fn(file_type='npy', region='', tracer='ELG', tracer2=None, zmin=0, zmax=np.inf, recon_dir='n',rec_type=False, weight_type='default', bin_type='lin', njack=0, nrandoms=8, split_randoms_above=10, out_dir='.', option=None, wang=None, rpcut=None): +def corr_fn(file_type='npy', region='', tracer='ELG', tracer2=None, zmin=0, zmax=np.inf, recon_dir='n',rec_type=False, weight_type='default', bin_type='lin', njack=0, nrandoms=8, split_randoms_above=10, out_dir='.', option=None, wang=None, rpcut=None, thetacut=None): if tracer2: tracer += '_' + tracer2 if rec_type: tracer += '_' + rec_type if region: tracer += '_' + region @@ -560,6 +561,8 @@ def corr_fn(file_type='npy', region='', tracer='ELG', tracer2=None, zmin=0, zmax root = '{}{}_{}_{}_{}_{}_njack{:d}_nran{:d}{}'.format(wang, tracer, zmin, zmax, weight_type, bin_type, njack, nrandoms, split) if rpcut is not None: root += '_rpcut{}'.format(rpcut) + if thetacut is not None: + root += '_thetacut{}'.format(thetacut) if file_type == 'npy': return os.path.join(out_dir, 'allcounts_{}.npy'.format(root)) return os.path.join(out_dir, '{}_{}.txt'.format(file_type, root)) @@ -600,7 +603,8 @@ def corr_fn(file_type='npy', region='', tracer='ELG', tracer2=None, zmin=0, zmax parser.add_argument('--rec_type', help='reconstruction algorithm + reconstruction convention, but only if included in the catalog filename between dots, otherwise leave blank', choices=['IFTPrecsym', 'IFTPreciso','IFTrecsym', 'IFTreciso', 'MGrecsym', 'MGreciso'], type=str, default=None) parser.add_argument('--recon_dir', help='if recon catalogs are in a subdirectory, put that here', type=str, default='n') - parser.add_argument('--rpcut', help='apply this rp-cut', type=float, default=None) + parser.add_argument('--rpcut', help='apply the rp-cut', type=float, default=None) + parser.add_argument('--thetacut', help='apply the theta-cut (more up-to-date fibre collision correction), standard: 0.05', type=float, default=None) setup_logging() args = parser.parse_args() @@ -710,7 +714,7 @@ def corr_fn(file_type='npy', region='', tracer='ELG', tracer2=None, zmin=0, zmax logger.info('Computing correlation functions {} in regions {} in redshift ranges {}.'.format(args.corr_type, regions, zlims)) for zmin, zmax in zlims: - base_file_kwargs = dict(tracer=tracer, tracer2=tracer2, zmin=zmin, zmax=zmax, recon_dir=args.recon_dir,rec_type=args.rec_type, weight_type=args.weight_type, bin_type=args.bin_type, njack=args.njack, nrandoms=args.nran, split_randoms_above=args.split_ran_above, option=option, rpcut=args.rpcut) + base_file_kwargs = dict(tracer=tracer, tracer2=tracer2, zmin=zmin, zmax=zmax, recon_dir=args.recon_dir,rec_type=args.rec_type, weight_type=args.weight_type, bin_type=args.bin_type, njack=args.njack, nrandoms=args.nran, split_randoms_above=args.split_ran_above, option=option, rpcut=args.rpcut, thetacut=args.thetacut) for region in regions: if args.use_arrays == 'y': if region == "N": @@ -724,7 +728,7 @@ def corr_fn(file_type='npy', region='', tracer='ELG', tracer2=None, zmin=0, zmax logger.info('Computing correlation function {} in region {} in redshift range {}.'.format(corr_type, region, (zmin, zmax))) edges = get_edges(corr_type=corr_type, bin_type=args.bin_type) - result, wang = compute_correlation_function(corr_type, edges=edges, distance=distance, nrandoms=args.nran, split_randoms_above=args.split_ran_above, nthreads=nthreads, gpu=gpu, region=region, zlim=(zmin, zmax), maglim=maglims, weight_type=args.weight_type, njack=args.njack, wang=wang, mpicomm=mpicomm, mpiroot=mpiroot, option=option, rpcut=args.rpcut, **catalog_kwargs) + result, wang = compute_correlation_function(corr_type, edges=edges, distance=distance, nrandoms=args.nran, split_randoms_above=args.split_ran_above, nthreads=nthreads, gpu=gpu, region=region, zlim=(zmin, zmax), maglim=maglims, weight_type=args.weight_type, njack=args.njack, wang=wang, mpicomm=mpicomm, mpiroot=mpiroot, option=option, rpcut=args.rpcut, thetacut=args.thetacut, **catalog_kwargs) # Save pair counts if mpicomm is None or mpicomm.rank == mpiroot: result.save(corr_fn(file_type='npy', region=region, out_dir=os.path.join(out_dir, corr_type), **base_file_kwargs)) From 8d069c6ec95699d167924e133921e3f9fca97abc Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 22 Feb 2024 11:26:35 -0500 Subject: [PATCH 139/297] process 4_1 --- scripts/mock_tools/abv41complete_cat_sbatch.sh | 12 ++++++++++++ scripts/mock_tools/process_one_2genab41_pota2clus.sh | 12 ++++++++++++ 2 files changed, 24 insertions(+) create mode 100755 scripts/mock_tools/abv41complete_cat_sbatch.sh create mode 100755 scripts/mock_tools/process_one_2genab41_pota2clus.sh diff --git a/scripts/mock_tools/abv41complete_cat_sbatch.sh b/scripts/mock_tools/abv41complete_cat_sbatch.sh new file mode 100755 index 000000000..19062af36 --- /dev/null +++ b/scripts/mock_tools/abv41complete_cat_sbatch.sh @@ -0,0 +1,12 @@ +#!/bin/bash +#SBATCH --time=01:00:00 +#SBATCH --qos=regular +#SBATCH --nodes=1 +#SBATCH --constraint=cpu +#SBATCH --array=1-24 +#SBATCH --account=desi + +source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main +PYTHONPATH=$PYTHONPATH:$HOME/LSS/py + +srun scripts/mock_tools/process_one_2genab41_pota2clus.sh $SLURM_ARRAY_TASK_ID \ No newline at end of file diff --git a/scripts/mock_tools/process_one_2genab41_pota2clus.sh b/scripts/mock_tools/process_one_2genab41_pota2clus.sh new file mode 100755 index 000000000..7a97f3c03 --- /dev/null +++ b/scripts/mock_tools/process_one_2genab41_pota2clus.sh @@ -0,0 +1,12 @@ +#!/bin/bash +source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main +PYTHONPATH=$PYTHONPATH:$HOME/LSS/py + +ver=v4_1 + +srun -N 1 -C cpu -t 00:45:00 --qos interactive --account desi python scripts/mock_tools/pota2clus_fast.py --realization $1 --mockver AbacusSummit_$ver +mv $SCRATCH/AbacusSummit_$ver/mock$1/*GC* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_$ver/mock$1/ +mv $SCRATCH/AbacusSummit_$ver/mock$1/*nz* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_$ver/mock$1/ +rm $SCRATCH/AbacusSummit_$ver/mock$1/* +chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_$ver/mock$1/*clustering* +chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_$ver/mock$1/*nz* From de6ef4dd0095b72cf6641a272e206c4044417e82 Mon Sep 17 00:00:00 2001 From: Hanyu Zhang Date: Thu, 22 Feb 2024 13:43:55 -0800 Subject: [PATCH 140/297] update LSS/py/cosmodesi_io_tools.py for Y1 catalogs --- py/LSS/cosmodesi_io_tools.py | 39 +++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 12 deletions(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index 23e9da225..850d2c0ac 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -17,7 +17,7 @@ import numpy as np -from astropy.table import Table, vstack +from astropy.table import Table, vstack, join from matplotlib import pyplot as plt from pycorr import TwoPointCorrelationFunction, TwoPointEstimator, KMeansSubsampler, utils, setup_logging @@ -87,7 +87,7 @@ def get_zlims(tracer, tracer2=None, option=None): def get_regions(survey, rec=False): - regions = ['N', 'S']#, ''] + regions = ['NGC', 'SGC']#, ''] #if survey in ['main', 'DA02']: # regions = ['DN', 'DS', 'N', 'S'] # if rec: regions = ['DN', 'N'] @@ -108,12 +108,12 @@ def select_region(ra, dec, region): return mask -def catalog_dir(survey='main', verspec='guadalupe', version='test', base_dir='/global/cfs/cdirs/desi/survey/catalogs'): +def catalog_dir(survey='Y1', verspec='iron', version='v1.2', base_dir='/global/cfs/cdirs/desi/survey/catalogs'): return os.path.join(base_dir, survey, 'LSS', verspec, 'LSScats', version) -def catalog_fn(tracer='ELG', region='', ctype='clustering', name='data', ran_sw='',recon_dir='n',rec_type=False, nrandoms=4, cat_dir=None, survey='main', **kwargs): +def catalog_fn(tracer='ELG', region='', ctype='clustering', name='data', ran_sw='',recon_dir='n',rec_type=False, nrandoms=4, cat_dir=None, survey='Y1', **kwargs): #print(kwargs) if cat_dir is None: cat_dir = catalog_dir(survey=survey, **kwargs) @@ -121,7 +121,8 @@ def catalog_fn(tracer='ELG', region='', ctype='clustering', name='data', ran_sw= # tracer += 'zdone' if 'edav1' in cat_dir: cat_dir += ctype - + if ctype == 'clustering': + cat_dir += '/unblinded/' if ctype == 'full': region = '' dat_or_ran = name[:3] @@ -149,6 +150,7 @@ def get_clustering_positions_weights(catalog, distance, zlim=(0., np.inf),maglim mask = (catalog['Z'] >= zlim[0]) & (catalog['Z'] < zlim[1]) if maglim is not None: mask = (catalog['Z'] >= zlim[0]) & (catalog['Z'] < zlim[1]) & (catalog['ABSMAG_R'] >= maglim[0]) & (catalog['ABSMAG_R'] < maglim[1]) + mask &= (catalog['FRAC_TLOBS_TILES'] != 0) if option: if 'noNorth' in option: decmask = catalog['DEC'] < 32.375 @@ -241,9 +243,12 @@ def get_clustering_positions_weights(catalog, distance, zlim=(0., np.inf),maglim print('dividing weights by WEIGHT_COMP') weights = _format_bitweights(catalog['BITWEIGHTS'][mask]) + [weights] -# if name == 'randoms': -# if 'default' in weight_type: -# weights *= catalog['WEIGHT'][mask] + if name == 'randoms': + if 'default' in weight_type: + weights *= catalog['WEIGHT'][mask] + if 'bitwise' in weight_type: + weights /= catalog['FRAC_TLOBS_TILES'][mask] + print('dividing weights by FRAC_TLOBS_TILES') # if 'RF' in weight_type: # weights *= catalog['WEIGHT_RF'][mask]*catalog['WEIGHT_COMP'][mask] # if 'zfail' in weight_type: @@ -282,13 +287,19 @@ def read_positions_weights(name): positions, weights = [], [] for reg in region: cat_fns = catalog_fn(ctype='clustering', name=name, region=reg, **kwargs) + if name=='data': + cat_full = catalog_fn(ctype='full_HPmapcut', name=name, **kwargs) +# cat_full = catalog_fn(ctype='full', name=name, **kwargs) logger.info('Loading {}.'.format(cat_fns)) isscalar = not isinstance(cat_fns, (tuple, list)) if isscalar: cat_fns = [cat_fns] - positions_weights = [get_clustering_positions_weights(Table.read(cat_fn), distance, zlim=zlim, maglim=maglim, weight_type=weight_type, name=name, option=option) for cat_fn in cat_fns] + if name=='data': + positions_weights = [get_clustering_positions_weights(join(Table.read(cat_fn), Table.read(cat_full)['TARGETID', 'BITWEIGHTS'], keys='TARGETID', join_type='left'), distance, zlim=zlim, maglim=maglim, weight_type=weight_type, name=name, option=option) for cat_fn in cat_fns] + else: + positions_weights = [get_clustering_positions_weights(Table.read(cat_fn), distance, zlim=zlim, maglim=maglim, weight_type=weight_type, name=name, option=option) for cat_fn in cat_fns] if isscalar: positions.append(positions_weights[0][0]) @@ -340,7 +351,9 @@ def get_full_positions_weights(catalog, name='data', weight_type='default', fibe if region in ['DS', 'DN']: mask &= select_region(catalog['RA'], catalog['DEC'], region) elif region: - mask &= catalog['PHOTSYS'] == region.strip('_') + #mask &= catalog['PHOTSYS'] == region.strip('_') + mask &= catalog['PHOTSYS'] == region.strip('GC') + if fibered: mask &= catalog['LOCATION_ASSIGNED'] positions = [catalog['RA'][mask], catalog['DEC'][mask], catalog['DEC'][mask]] @@ -356,11 +369,13 @@ def get_full_positions_weights(catalog, name='data', weight_type='default', fibe def read_full_positions_weights(name='data', weight_type='default', fibered=False, region='', weight_attrs=None, **kwargs): - + if 'GC' in region: + region = [region] def read_positions_weights(name): positions, weights = [], [] for reg in region: - cat_fn = catalog_fn(ctype='full', name=name, **kwargs) + cat_fn = catalog_fn(ctype='full_HPmapcut', name=name, **kwargs) + #cat_fn = catalog_fn(ctype='full', name=name, **kwargs) logger.info('Loading {}.'.format(cat_fn)) if isinstance(cat_fn, (tuple, list)): catalog = vstack([Table.read(fn) for fn in cat_fn]) From eb737bf06e5bc25eb2fe1dfe42a50295467ac86a Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 22 Feb 2024 22:20:07 -0500 Subject: [PATCH 141/297] Update process_one_2genab41_pota2clus.sh --- scripts/mock_tools/process_one_2genab41_pota2clus.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mock_tools/process_one_2genab41_pota2clus.sh b/scripts/mock_tools/process_one_2genab41_pota2clus.sh index 7a97f3c03..0d59ca440 100755 --- a/scripts/mock_tools/process_one_2genab41_pota2clus.sh +++ b/scripts/mock_tools/process_one_2genab41_pota2clus.sh @@ -4,7 +4,7 @@ PYTHONPATH=$PYTHONPATH:$HOME/LSS/py ver=v4_1 -srun -N 1 -C cpu -t 00:45:00 --qos interactive --account desi python scripts/mock_tools/pota2clus_fast.py --realization $1 --mockver AbacusSummit_$ver +python scripts/mock_tools/pota2clus_fast.py --realization $1 --mockver AbacusSummit_$ver mv $SCRATCH/AbacusSummit_$ver/mock$1/*GC* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_$ver/mock$1/ mv $SCRATCH/AbacusSummit_$ver/mock$1/*nz* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_$ver/mock$1/ rm $SCRATCH/AbacusSummit_$ver/mock$1/* From b9f5e0ffd2d43cfd72e54dadeac77ca92590a443 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 22 Feb 2024 23:11:38 -0500 Subject: [PATCH 142/297] Update cosmodesi_io_tools.py --- py/LSS/cosmodesi_io_tools.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index 850d2c0ac..7e28dfc85 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -297,7 +297,11 @@ def read_positions_weights(name): if isscalar: cat_fns = [cat_fns] if name=='data': - positions_weights = [get_clustering_positions_weights(join(Table.read(cat_fn), Table.read(cat_full)['TARGETID', 'BITWEIGHTS'], keys='TARGETID', join_type='left'), distance, zlim=zlim, maglim=maglim, weight_type=weight_type, name=name, option=option) for cat_fn in cat_fns] + if 'bitwise' in weight_type: + tab = join(Table.read(cat_fn), Table.read(cat_full)['TARGETID', 'BITWEIGHTS'], keys='TARGETID', join_type='left') + else: + tab = Table.read(cat_fn) + positions_weights = [get_clustering_positions_weights(tab, distance, zlim=zlim, maglim=maglim, weight_type=weight_type, name=name, option=option) for cat_fn in cat_fns] else: positions_weights = [get_clustering_positions_weights(Table.read(cat_fn), distance, zlim=zlim, maglim=maglim, weight_type=weight_type, name=name, option=option) for cat_fn in cat_fns] From 4ee5bcb5796f3d40ac28062100562a97156d76d4 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 22 Feb 2024 23:13:48 -0500 Subject: [PATCH 143/297] Update cosmodesi_io_tools.py --- py/LSS/cosmodesi_io_tools.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index 7e28dfc85..cacddcdb0 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -297,11 +297,13 @@ def read_positions_weights(name): if isscalar: cat_fns = [cat_fns] if name=='data': - if 'bitwise' in weight_type: - tab = join(Table.read(cat_fn), Table.read(cat_full)['TARGETID', 'BITWEIGHTS'], keys='TARGETID', join_type='left') - else: - tab = Table.read(cat_fn) - positions_weights = [get_clustering_positions_weights(tab, distance, zlim=zlim, maglim=maglim, weight_type=weight_type, name=name, option=option) for cat_fn in cat_fns] + def _get_tab(cat_fn): + if 'bitwise' in weight_type: + tab = join(Table.read(cat_fn), Table.read(cat_full)['TARGETID', 'BITWEIGHTS'], keys='TARGETID', join_type='left') + else: + tab = Table.read(cat_fn) + return tab + positions_weights = [get_clustering_positions_weights(_get_tab(cat_fn), distance, zlim=zlim, maglim=maglim, weight_type=weight_type, name=name, option=option) for cat_fn in cat_fns] else: positions_weights = [get_clustering_positions_weights(Table.read(cat_fn), distance, zlim=zlim, maglim=maglim, weight_type=weight_type, name=name, option=option) for cat_fn in cat_fns] From ae34a5e5b0f3f985d74d974faf17be9301e56230 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 22 Feb 2024 23:35:52 -0500 Subject: [PATCH 144/297] Update cosmodesi_io_tools.py --- py/LSS/cosmodesi_io_tools.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index cacddcdb0..db57471a7 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -244,8 +244,8 @@ def get_clustering_positions_weights(catalog, distance, zlim=(0., np.inf),maglim weights = _format_bitweights(catalog['BITWEIGHTS'][mask]) + [weights] if name == 'randoms': - if 'default' in weight_type: - weights *= catalog['WEIGHT'][mask] + #if 'default' in weight_type: + # weights *= catalog['WEIGHT'][mask] if 'bitwise' in weight_type: weights /= catalog['FRAC_TLOBS_TILES'][mask] print('dividing weights by FRAC_TLOBS_TILES') From ff6655b47ffe40c761e53b99cd6154b08926bcbf Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 23 Feb 2024 12:14:15 -0500 Subject: [PATCH 145/297] Update process_one_2genab41_pota2clus.sh --- scripts/mock_tools/process_one_2genab41_pota2clus.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/mock_tools/process_one_2genab41_pota2clus.sh b/scripts/mock_tools/process_one_2genab41_pota2clus.sh index 0d59ca440..c98e7ccd9 100755 --- a/scripts/mock_tools/process_one_2genab41_pota2clus.sh +++ b/scripts/mock_tools/process_one_2genab41_pota2clus.sh @@ -4,6 +4,7 @@ PYTHONPATH=$PYTHONPATH:$HOME/LSS/py ver=v4_1 +python scripts/readwrite_pixel_bitmask.py --tracer lrg --input $1 --cat_type 'Ab2ndgen' --secgen_ver $ver python scripts/mock_tools/pota2clus_fast.py --realization $1 --mockver AbacusSummit_$ver mv $SCRATCH/AbacusSummit_$ver/mock$1/*GC* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_$ver/mock$1/ mv $SCRATCH/AbacusSummit_$ver/mock$1/*nz* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_$ver/mock$1/ From dbf7e66ff41dd9fdb1740aeb262e09e1191285dd Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 23 Feb 2024 12:33:42 -0500 Subject: [PATCH 146/297] Update process_one_2genab41_pota2clus.sh --- scripts/mock_tools/process_one_2genab41_pota2clus.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mock_tools/process_one_2genab41_pota2clus.sh b/scripts/mock_tools/process_one_2genab41_pota2clus.sh index c98e7ccd9..6f817d11a 100755 --- a/scripts/mock_tools/process_one_2genab41_pota2clus.sh +++ b/scripts/mock_tools/process_one_2genab41_pota2clus.sh @@ -4,7 +4,7 @@ PYTHONPATH=$PYTHONPATH:$HOME/LSS/py ver=v4_1 -python scripts/readwrite_pixel_bitmask.py --tracer lrg --input $1 --cat_type 'Ab2ndgen' --secgen_ver $ver +python scripts/readwrite_pixel_bitmask.py --tracer lrg --input $1 --cat_type 'Ab2ndgen' --secgen_ver AbacusSummit_$ver python scripts/mock_tools/pota2clus_fast.py --realization $1 --mockver AbacusSummit_$ver mv $SCRATCH/AbacusSummit_$ver/mock$1/*GC* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_$ver/mock$1/ mv $SCRATCH/AbacusSummit_$ver/mock$1/*nz* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_$ver/mock$1/ From 6cfe818fd13efd9e730e72d6809ac4ecc8c9635e Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 23 Feb 2024 13:17:09 -0500 Subject: [PATCH 147/297] Update abv41complete_cat_sbatch.sh --- scripts/mock_tools/abv41complete_cat_sbatch.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mock_tools/abv41complete_cat_sbatch.sh b/scripts/mock_tools/abv41complete_cat_sbatch.sh index 19062af36..2a3789725 100755 --- a/scripts/mock_tools/abv41complete_cat_sbatch.sh +++ b/scripts/mock_tools/abv41complete_cat_sbatch.sh @@ -3,7 +3,7 @@ #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu -#SBATCH --array=1-24 +#SBATCH --array=2-24 #SBATCH --account=desi source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main From b665b1a72304316430be9c6b997c5dd64c1bbf1d Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 26 Feb 2024 11:58:29 -0500 Subject: [PATCH 148/297] Update cosmodesi_io_tools.py --- py/LSS/cosmodesi_io_tools.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index db57471a7..43e15c0ef 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -121,10 +121,12 @@ def catalog_fn(tracer='ELG', region='', ctype='clustering', name='data', ran_sw= # tracer += 'zdone' if 'edav1' in cat_dir: cat_dir += ctype - if ctype == 'clustering': - cat_dir += '/unblinded/' + #if ctype == 'clustering': + # cat_dir += '/unblinded/' if ctype == 'full': region = '' + cat_dir = cat_dir.replace('/unblinded','') + cat_dir = cat_dir.replace('/blinded','') dat_or_ran = name[:3] if name == 'randoms' and tracer == 'LRG_main' and ctype == 'full': tracer = 'LRG' From f88462de74949874822291bd0720ded504a301cc Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 27 Feb 2024 18:35:18 -0500 Subject: [PATCH 149/297] Update ez_cat_sbatch_BGS.sh --- scripts/mock_tools/ez_cat_sbatch_BGS.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mock_tools/ez_cat_sbatch_BGS.sh b/scripts/mock_tools/ez_cat_sbatch_BGS.sh index 445c026dc..aae6579d7 100755 --- a/scripts/mock_tools/ez_cat_sbatch_BGS.sh +++ b/scripts/mock_tools/ez_cat_sbatch_BGS.sh @@ -3,7 +3,7 @@ #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu -#SBATCH --array=511-900 +#SBATCH --array=901-1000 #SBATCH --account=desi source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main From 6f7a402b5f41812584a8d86fc822c4c09dca1f86 Mon Sep 17 00:00:00 2001 From: echaussidon Date: Thu, 29 Feb 2024 17:19:57 -0800 Subject: [PATCH 150/297] add cuts for WISE VAR QSO sncd target --- py/LSS/qso_cat_utils.py | 44 +++++++++++++++++++++++++++++++++-------- 1 file changed, 36 insertions(+), 8 deletions(-) diff --git a/py/LSS/qso_cat_utils.py b/py/LSS/qso_cat_utils.py index ebf5dfe69..2f16d1dbe 100644 --- a/py/LSS/qso_cat_utils.py +++ b/py/LSS/qso_cat_utils.py @@ -2,8 +2,8 @@ # coding: utf-8 """ -author: edmond chaussidon (CEA saclay) -contact: edmond.chaussidon@cea.fr +author: edmond chaussidon (CEA Saclay --> LBL) +contact: echaussidon@lbl.gov Remarks: * 1) log: @@ -231,6 +231,11 @@ def is_target_in_survey(QSO_cat, DESI_TARGET, target_name): from desitarget.sv1.sv1_targetmask import desi_mask as sv1_mask from desitarget.cmx.cmx_targetmask import cmx_mask + from desitarget.targetmask import scnd_mask + from desitarget.sv3.sv3_targetmask import scnd_mask as sv3_scnd_mask + from desitarget.sv2.sv2_targetmask import scnd_mask as sv2_scnd_mask + from desitarget.sv1.sv1_targetmask import scnd_mask as sv1_scnd_mask + mask_bit = None if target_name == 'BGS': @@ -266,8 +271,24 @@ def is_target_in_survey(QSO_cat, DESI_TARGET, target_name): mask_bit = sv1_mask.mask('QSO') elif DESI_TARGET == 'CMX_TARGET': mask_bit = cmx_mask.mask('SV0_QSO|MINI_SV_QSO') + elif target_name == 'WISE_VAR_QSO': + if DESI_TARGET == 'DESI_TARGET': + DESI_TARGET = 'SCND_TARGET' + mask_bit = scnd_mask.mask('WISE_VAR_QSO') + elif DESI_TARGET == 'SV3_DESI_TARGET': + mask_bit = sv3_scnd_mask.mask('WISE_VAR_QSO') + elif DESI_TARGET == 'SV2_DESI_TARGET': + DESI_TARGET = 'SV2_SCND_TARGET' + mask_bit = sv2_scnd_mask.mask('WISE_VAR_QSO') + elif DESI_TARGET == 'SV1_DESI_TARGET': + DESI_TARGET = 'SV1_SCND_TARGET' + mask_bit = sv1_scnd_mask.mask('WISE_VAR_QSO') + else: + # to avoid error, return one of the column, SV2_SCND_TARGET should be full of 0. + DESI_TARGET = 'SV2_SCND_TARGET' + mask_bit = sv2_scnd_mask.mask('WISE_VAR_QSO') else: - print("not ready for other targets tahan BGS / ELG") + print("not ready for other targets than BGS / ELG / QSO / WISE_VAR_QSO") sys.exit(1) return QSO_cat[DESI_TARGET].values & mask_bit != 0 @@ -347,7 +368,8 @@ def qso_catalog_maker(redrock, mgii, qn, use_old_extname_for_redrock=False, use_ is_BGS = is_target_in_survey(QSO_cat, DESI_TARGET, 'BGS') is_ELG = is_target_in_survey(QSO_cat, DESI_TARGET, 'ELG') is_QSO = is_target_in_survey(QSO_cat, DESI_TARGET, 'QSO') - + is_WAR_WISE_QSO = is_target_in_survey(QSO_cat, DESI_TARGET, 'WISE_VAR_QSO') + is_OK_for_BGS = (QSO_cat['SPECTYPE'] == 'QSO') & (QSO_cat['IS_QSO_QN_06'] | QSO_cat['IS_QSO_MGII']) QSO_cat.loc[is_BGS & (~is_ELG) & (~is_QSO) & is_OK_for_BGS, 'QSO_MASKBITS'] += 2**5 # do not forget to update redshift for QN object ! @@ -375,6 +397,12 @@ def qso_catalog_maker(redrock, mgii, qn, use_old_extname_for_redrock=False, use_ QSO_cat.loc[is_QSO & QSO_cat['IS_QSO_QN_NEW_RR'], 'Z'] = QSO_cat['Z_NEW'][is_QSO & QSO_cat['IS_QSO_QN_NEW_RR']].values QSO_cat.loc[is_QSO & QSO_cat['IS_QSO_QN_NEW_RR'], 'ZERR'] = QSO_cat['ZERR_NEW'][is_QSO & QSO_cat['IS_QSO_QN_NEW_RR']].values + # selection for WISE_VAR_QSO targets (secondary target) --> same as QSO + is_WAR_WISE_QSO = (QSO_cat['SPECTYPE'] == 'QSO') | QSO_cat['IS_QSO_MGII'] | QSO_cat['IS_QSO_QN_095'] + QSO_cat.loc[is_WAR_WISE_QSO & is_WAR_WISE_QSO, 'QSO_MASKBITS'] += 2**7 + QSO_cat.loc[is_WAR_WISE_QSO & QSO_cat['IS_QSO_QN_NEW_RR'], 'Z'] = QSO_cat['Z_NEW'][is_WAR_WISE_QSO & QSO_cat['IS_QSO_QN_NEW_RR']].values + QSO_cat.loc[is_WAR_WISE_QSO & QSO_cat['IS_QSO_QN_NEW_RR'], 'ZERR'] = QSO_cat['ZERR_NEW'][is_WAR_WISE_QSO & QSO_cat['IS_QSO_QN_NEW_RR']].values + # Add quality cuts: no cut on zwarn, cut on fiberstatus QSO_cat.loc[~((QSO_cat['COADD_FIBERSTATUS'] == 0) | (QSO_cat['COADD_FIBERSTATUS'] == 8388608) | (QSO_cat['COADD_FIBERSTATUS'] == 16777216)), 'QSO_MASKBITS'] = 0 @@ -563,10 +591,10 @@ def build_qso_catalog_from_healpix(redux='/global/cfs/cdirs/desi/spectro/redux/' QSO_cat = pd.concat(pool.starmap(qso_catalog_for_a_pixel, arguments), ignore_index=True) logging.getLogger("QSO_CAT_UTILS").setLevel(logging.INFO) - if not keep_all: - # to save computational time - log.info('Compute the TS probas...') - compute_RF_TS_proba(QSO_cat) + # if not keep_all: + # # to save computational time + # log.info('Compute the TS probas...') + # compute_RF_TS_proba(QSO_cat) if keep_qso_targets: log.info('Keep only qso targets...') From 5259635c366b26d5542b3ff04f9fd266bf8666b6 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 29 Feb 2024 23:01:41 -0500 Subject: [PATCH 151/297] Update combdata_main.py --- scripts/main/combdata_main.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/main/combdata_main.py b/scripts/main/combdata_main.py index 17aae1e85..95d6b0c56 100644 --- a/scripts/main/combdata_main.py +++ b/scripts/main/combdata_main.py @@ -545,11 +545,11 @@ #tj.write(outfs,format='fits', overwrite=True) print('joined to spec data and wrote out to '+outfs) - #if uptileloc: - # print('counting tiles') - # tc = ct.count_tiles_better('dat',tp+notqso,specrel=specrel) - # print('writing tile counts') - # tc.write(outtc,format='fits', overwrite=True) + if uptileloc: + print('counting tiles') + tc = ct.count_tiles_better('dat',tp+notqso,specrel=specrel) + print('writing tile counts') + tc.write(outtc,format='fits', overwrite=True) if args.get_petalsky == 'y': From e13932e6acf1b949e32620a2f751d7f983bb9e0b Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 1 Mar 2024 10:19:02 -0500 Subject: [PATCH 152/297] Update combdata_main.py --- scripts/main/combdata_main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/main/combdata_main.py b/scripts/main/combdata_main.py index 95d6b0c56..c5045329a 100644 --- a/scripts/main/combdata_main.py +++ b/scripts/main/combdata_main.py @@ -370,8 +370,8 @@ if prog == 'dark': if args.tracer == 'all': - tps = ['LRG','ELG','QSO','ELG_LOP','ELG_LOP'] - notqsos = ['','','','','notqso'] + tps = ['QSO','LRG','ELG_LOP','ELG_LOP','ELG'] #order is not least to most memory intensive + notqsos = ['','','notqso','',''] else: tps = [args.tracer] notqsos = [args.notqso] From 0d893f33881fd92eff7a174762f35167159f17cc Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 4 Mar 2024 14:23:16 -0500 Subject: [PATCH 153/297] Update ssr_tools_new.py --- py/LSS/ssr_tools_new.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/py/LSS/ssr_tools_new.py b/py/LSS/ssr_tools_new.py index 287b95e7a..3db65a09f 100644 --- a/py/LSS/ssr_tools_new.py +++ b/py/LSS/ssr_tools_new.py @@ -32,7 +32,7 @@ def fit_cons(dl,el,minv=0,step=0.01): return oldcost,c -def get_tsnr2z(tracer='ELG',night=20230128,expid=165078,tsnrdir='/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/TSNR2z/'): +def get_tsnr2z(tracer='ELG',night=20230128,expid=165078,tsnrdir='/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/TSNR2z/',dz=0.001): ''' get the relative template signal to noise ^2 for a given tracer type, night, and expid Copied from script from Julien Guy @@ -119,7 +119,7 @@ def get_tsnr2z(tracer='ELG',night=20230128,expid=165078,tsnrdir='/global/cfs/cdi dflux = tflux-smooth_flux # compute tsnr2 on a redshift range - redshift=np.linspace(0,1.7,int(1.700001/0.001)+1) + redshift=np.linspace(0,1.7,int(1.700001/dz)+1) tsnr2=np.zeros(redshift.shape) for i,z in enumerate(redshift): tmp=np.interp(wave,twave*(1+z)/(1+zref),dflux) From 490a993287f8731d287268394826ee88c49e6a4e Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 6 Mar 2024 18:15:43 -0500 Subject: [PATCH 154/297] Update getpota_daily_ran.py --- scripts/getpota_daily_ran.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/getpota_daily_ran.py b/scripts/getpota_daily_ran.py index aa4a3e157..c3529805f 100644 --- a/scripts/getpota_daily_ran.py +++ b/scripts/getpota_daily_ran.py @@ -22,7 +22,7 @@ parser.add_argument("--prog", choices=['DARK','BRIGHT']) parser.add_argument("--getcoll", choices=['n','y'],default='y') parser.add_argument("--minr",default=0,type=int) -parser.add_argument("--maxr",default=18,type=int) +parser.add_argument("--maxr",default=4,type=int) args = parser.parse_args() From 194cc68eec237ba4fcf373ef7cf56e1f5ccef6c7 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 6 Mar 2024 18:26:28 -0500 Subject: [PATCH 155/297] Update mkCat_main_ran.py --- scripts/main/mkCat_main_ran.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/main/mkCat_main_ran.py b/scripts/main/mkCat_main_ran.py index c9f89e837..0e81762f1 100644 --- a/scripts/main/mkCat_main_ran.py +++ b/scripts/main/mkCat_main_ran.py @@ -79,7 +79,7 @@ parser.add_argument("--maskz", help="apply sky line mask to redshifts?",default='n') parser.add_argument("--faver", help="version of fiberassign code to use for random; versions for main should be 5.0.0 or greater",default='5.0.0') parser.add_argument("--minr", help="minimum number for random files",default=0) -parser.add_argument("--maxr", help="maximum for random files, default is 1, but 18 are available (use parallel script for all)",default=18) +parser.add_argument("--maxr", help="maximum for random files, default is 1, but 18 are available (use parallel script for all)",default=4) parser.add_argument("--par", help="run different random number in parallel?",default='y') parser.add_argument("--notqso",help="if y, do not include any qso targets",default='n') From 587b4a38f26068635e7d395cc671eed75e610a68 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 8 Mar 2024 18:24:37 -0500 Subject: [PATCH 156/297] Update cosmodesi_io_tools.py --- py/LSS/cosmodesi_io_tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index 43e15c0ef..57c5ce813 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -248,7 +248,7 @@ def get_clustering_positions_weights(catalog, distance, zlim=(0., np.inf),maglim if name == 'randoms': #if 'default' in weight_type: # weights *= catalog['WEIGHT'][mask] - if 'bitwise' in weight_type: + if 'bitwise' in weight_type and 'default' in weight_type: weights /= catalog['FRAC_TLOBS_TILES'][mask] print('dividing weights by FRAC_TLOBS_TILES') # if 'RF' in weight_type: From a54e70e2143776027c13f4614aec715a98126929 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 8 Mar 2024 18:32:15 -0500 Subject: [PATCH 157/297] Update cosmodesi_io_tools.py --- py/LSS/cosmodesi_io_tools.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index 57c5ce813..253adc531 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -241,8 +241,9 @@ def get_clustering_positions_weights(catalog, distance, zlim=(0., np.inf),maglim print('multiplying weights by WEIGHT_focal') if name == 'data' and 'bitwise' in weight_type: - weights /= catalog['WEIGHT_COMP'][mask] - print('dividing weights by WEIGHT_COMP') + if 'default' in weight_type: + weights /= catalog['WEIGHT_COMP'][mask] + print('dividing weights by WEIGHT_COMP') weights = _format_bitweights(catalog['BITWEIGHTS'][mask]) + [weights] if name == 'randoms': From a4177b4e207c419ca766afa060e8fb7279f22952 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 8 Mar 2024 18:54:09 -0500 Subject: [PATCH 158/297] Update cosmodesi_io_tools.py --- py/LSS/cosmodesi_io_tools.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index 253adc531..d07bd77f6 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -301,10 +301,14 @@ def read_positions_weights(name): cat_fns = [cat_fns] if name=='data': def _get_tab(cat_fn): + tab = Table.read(cat_fn) if 'bitwise' in weight_type: - tab = join(Table.read(cat_fn), Table.read(cat_full)['TARGETID', 'BITWEIGHTS'], keys='TARGETID', join_type='left') - else: - tab = Table.read(cat_fn) + if 'BITWEIGHTS' in list(tab.dtype.names): + pass + else: + tab = join(Table.read(cat_fn), Table.read(cat_full)['TARGETID', 'BITWEIGHTS'], keys='TARGETID', join_type='left') + #else: + return tab positions_weights = [get_clustering_positions_weights(_get_tab(cat_fn), distance, zlim=zlim, maglim=maglim, weight_type=weight_type, name=name, option=option) for cat_fn in cat_fns] else: From 15e6b971fa97ed066869a4c40c51abbd3640f7ce Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 11 Mar 2024 00:05:04 -0400 Subject: [PATCH 159/297] Update cosmodesi_io_tools.py --- py/LSS/cosmodesi_io_tools.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index d07bd77f6..91584fb89 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -152,7 +152,8 @@ def get_clustering_positions_weights(catalog, distance, zlim=(0., np.inf),maglim mask = (catalog['Z'] >= zlim[0]) & (catalog['Z'] < zlim[1]) if maglim is not None: mask = (catalog['Z'] >= zlim[0]) & (catalog['Z'] < zlim[1]) & (catalog['ABSMAG_R'] >= maglim[0]) & (catalog['ABSMAG_R'] < maglim[1]) - mask &= (catalog['FRAC_TLOBS_TILES'] != 0) + if 'bitwise' in weight_type and 'default' in weight_type: + mask &= (catalog['FRAC_TLOBS_TILES'] != 0) if option: if 'noNorth' in option: decmask = catalog['DEC'] < 32.375 From 454dce4602256f8d3b1314a5c57362d693fa7b0c Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 11 Mar 2024 08:47:36 -0400 Subject: [PATCH 160/297] Update mkCat_main.py --- scripts/main/mkCat_main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/main/mkCat_main.py b/scripts/main/mkCat_main.py index db3efd753..322c2f933 100644 --- a/scripts/main/mkCat_main.py +++ b/scripts/main/mkCat_main.py @@ -552,7 +552,7 @@ def _wrapper(N): #if args.test == 'n': common.write_LSS(res,fn,comments=['added k+e corrections']) -if type == 'BGS_BRIGHT-21.5' and args.survey == 'Y1': +if type == 'BGS_BRIGHT-21.5' and args.survey == 'Y1' args.clusd == 'y': ffull = dirout+type+notqso+'_full'+args.use_map_veto+'.dat.fits' if os.path.isfile(ffull) == False or args.redoBGS215 == 'y': logf.write('making BGS_BRIGHT-21.5 full data catalog for '+str(datetime.now())) From a6b3f6bf5e015902dcd9708170e7f780cb8f6ad6 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 11 Mar 2024 08:48:26 -0400 Subject: [PATCH 161/297] Update mkCat_main.py --- scripts/main/mkCat_main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/main/mkCat_main.py b/scripts/main/mkCat_main.py index 322c2f933..1ef93404a 100644 --- a/scripts/main/mkCat_main.py +++ b/scripts/main/mkCat_main.py @@ -552,7 +552,7 @@ def _wrapper(N): #if args.test == 'n': common.write_LSS(res,fn,comments=['added k+e corrections']) -if type == 'BGS_BRIGHT-21.5' and args.survey == 'Y1' args.clusd == 'y': +if type == 'BGS_BRIGHT-21.5' and args.survey == 'Y1' and args.clusd == 'y': ffull = dirout+type+notqso+'_full'+args.use_map_veto+'.dat.fits' if os.path.isfile(ffull) == False or args.redoBGS215 == 'y': logf.write('making BGS_BRIGHT-21.5 full data catalog for '+str(datetime.now())) From c3cdade1a6fa0ea69db473be9a5d97ef5d00eb6e Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 11 Mar 2024 17:01:26 -0400 Subject: [PATCH 162/297] Update patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py index b9d70c0db..fc4c6b43b 100644 --- a/scripts/main/patch_HPmapcut.py +++ b/scripts/main/patch_HPmapcut.py @@ -35,8 +35,10 @@ for tp in tps: mainp = main(tp,args.verspec,survey=args.survey) df_cutdisk = Table(fitsio.read(indir+tp+'_full_HPmapcut.dat.fits')) - df_cutdisk.remove_columns(['BITWEIGHTS','PROB_OBS']) df_cutdisk_cols = list(df_cutdisk.dtype.names) + if 'BITWEIGHTS' in df_cutdisk_cols: + df_cutdisk.remove_columns(['BITWEIGHTS','PROB_OBS']) + df = Table(fitsio.read(indir+tp+'_full.dat.fits')) df_cols = list(df.dtype.names) for name in df_cols: From b967575aa2bc0322af3a584e126fdb58b3af8efc Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 11 Mar 2024 17:49:31 -0400 Subject: [PATCH 163/297] Update patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py index fc4c6b43b..f5d574fd4 100644 --- a/scripts/main/patch_HPmapcut.py +++ b/scripts/main/patch_HPmapcut.py @@ -49,8 +49,8 @@ df[name] = np.ones(len(df)) print(name+' added to not file as 1') - mapn = fitsio.read(lssmapdirout+tp+'_mapprops_healpix_nested_nside'+str(nside)+'_N.fits') - maps = fitsio.read(lssmapdirout+tp+'_mapprops_healpix_nested_nside'+str(nside)+'_S.fits') + mapn = fitsio.read(lssmapdirout+tp.replace('-21.5','')+'_mapprops_healpix_nested_nside'+str(nside)+'_N.fits') + maps = fitsio.read(lssmapdirout+tp.replace('-21.5','')+'_mapprops_healpix_nested_nside'+str(nside)+'_S.fits') mapcuts = mainp.mapcuts df_cut = common.apply_map_veto_arrays(df,mapn,maps,mapcuts) sel_idmatch = np.isin(df_cut['TARGETID'],df_cutdisk['TARGETID']) From 9f6bc10b7f05c80be2db61f32ec0468f973cd1c1 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 11 Mar 2024 17:52:02 -0400 Subject: [PATCH 164/297] Update patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py index f5d574fd4..92bbba17b 100644 --- a/scripts/main/patch_HPmapcut.py +++ b/scripts/main/patch_HPmapcut.py @@ -41,9 +41,13 @@ df = Table(fitsio.read(indir+tp+'_full.dat.fits')) df_cols = list(df.dtype.names) + rem_cols = [] for name in df_cols: if name not in df_cutdisk_cols: print(name+' not in HPmapcut file') + rem_cols.append(name) + if len(rem_cols) > 0: + df_cutdisk.remove_columns(rem_cols) for name in df_cutdisk_cols: if name not in df_cols: df[name] = np.ones(len(df)) From de81949325f8731fc4c1e7dda565a8a1651bee61 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 11 Mar 2024 17:54:03 -0400 Subject: [PATCH 165/297] Update patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py index 92bbba17b..de606bd0b 100644 --- a/scripts/main/patch_HPmapcut.py +++ b/scripts/main/patch_HPmapcut.py @@ -47,7 +47,7 @@ print(name+' not in HPmapcut file') rem_cols.append(name) if len(rem_cols) > 0: - df_cutdisk.remove_columns(rem_cols) + df.remove_columns(rem_cols) for name in df_cutdisk_cols: if name not in df_cols: df[name] = np.ones(len(df)) From 3c0e5370b3a0d0ae5c1452dfabd176b6ddee7d16 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 11 Mar 2024 17:56:00 -0400 Subject: [PATCH 166/297] Update patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py index de606bd0b..e2038cbdf 100644 --- a/scripts/main/patch_HPmapcut.py +++ b/scripts/main/patch_HPmapcut.py @@ -60,6 +60,7 @@ sel_idmatch = np.isin(df_cut['TARGETID'],df_cutdisk['TARGETID']) df_cutnomatch = df_cut[~sel_idmatch] #df_comb = np.concatenate((df_cutdisk,df_cutnomatch)) + print(df_cutdisk.dtype.names,df_cutnomatch.dtype.names) df_comb = vstack((df_cutdisk,df_cutnomatch)) print(tp,len(df_comb),len(np.unique(df_comb['TARGETID']))) if tp[:3] != 'BGS': From 9d0d549d2d760123bad166489c04e5730711b529 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 11 Mar 2024 17:59:30 -0400 Subject: [PATCH 167/297] Update patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py index e2038cbdf..26354c173 100644 --- a/scripts/main/patch_HPmapcut.py +++ b/scripts/main/patch_HPmapcut.py @@ -38,6 +38,8 @@ df_cutdisk_cols = list(df_cutdisk.dtype.names) if 'BITWEIGHTS' in df_cutdisk_cols: df_cutdisk.remove_columns(['BITWEIGHTS','PROB_OBS']) + if 'BITWEIGHTS_1' in df_cutdisk_cols: + df_cutdisk.remove_columns(['BITWEIGHTS_1','PROB_OBS_1','BITWEIGHTS_2','PROB_OBS_2']) df = Table(fitsio.read(indir+tp+'_full.dat.fits')) df_cols = list(df.dtype.names) From 27ece84f690728b0325373bb357ec042f2cf60ed Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 11 Mar 2024 18:04:26 -0400 Subject: [PATCH 168/297] Update patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py index 26354c173..4c6726883 100644 --- a/scripts/main/patch_HPmapcut.py +++ b/scripts/main/patch_HPmapcut.py @@ -40,7 +40,7 @@ df_cutdisk.remove_columns(['BITWEIGHTS','PROB_OBS']) if 'BITWEIGHTS_1' in df_cutdisk_cols: df_cutdisk.remove_columns(['BITWEIGHTS_1','PROB_OBS_1','BITWEIGHTS_2','PROB_OBS_2']) - + df_cutdisk_cols = list(df_cutdisk.dtype.names) df = Table(fitsio.read(indir+tp+'_full.dat.fits')) df_cols = list(df.dtype.names) rem_cols = [] From 37b0ab4b25ddecb7eadb4582355f6d04c427aae2 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 11 Mar 2024 18:10:47 -0400 Subject: [PATCH 169/297] Update mkCat_main.py --- scripts/main/mkCat_main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/main/mkCat_main.py b/scripts/main/mkCat_main.py index 1ef93404a..fe81251de 100644 --- a/scripts/main/mkCat_main.py +++ b/scripts/main/mkCat_main.py @@ -552,7 +552,7 @@ def _wrapper(N): #if args.test == 'n': common.write_LSS(res,fn,comments=['added k+e corrections']) -if type == 'BGS_BRIGHT-21.5' and args.survey == 'Y1' and args.clusd == 'y': +if type == 'BGS_BRIGHT-21.5' and args.survey == 'Y1': #and args.clusd == 'y': ffull = dirout+type+notqso+'_full'+args.use_map_veto+'.dat.fits' if os.path.isfile(ffull) == False or args.redoBGS215 == 'y': logf.write('making BGS_BRIGHT-21.5 full data catalog for '+str(datetime.now())) From 85dba408826ac83479cf41046abff712bf251948 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 13 Mar 2024 11:27:08 -0400 Subject: [PATCH 170/297] Update cosmodesi_io_tools.py --- py/LSS/cosmodesi_io_tools.py | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index 91584fb89..99dcffda3 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -103,8 +103,19 @@ def select_region(ra, dec, region): elif region == 'DS': mask = dec > -25 mask &= ~mask_ra - else: - raise ValueError('Input region must be one of ["DN", "DS"].') + elif 'GC' in region: + from astropy.coordinates import SkyCoord + import astropy.units as u + c = SkyCoord(ra* u.deg,dec* u.deg,frame='icrs') + gc = c.transform_to('galactic') + sel_ngc = gc.b > 0 + if region == 'NGC': + mask = sel_ngc + if region == 'SGC': + mask = ~sel_ngc + + #else: + # raise ValueError('Input region must be one of ["DN", "DS"].') return mask @@ -362,11 +373,11 @@ def get_full_positions_weights(catalog, name='data', weight_type='default', fibe from pycorr.twopoint_counter import get_inverse_probability_weight if weight_attrs is None: weight_attrs = {} mask = np.ones(len(catalog), dtype='?') - if region in ['DS', 'DN']: - mask &= select_region(catalog['RA'], catalog['DEC'], region) - elif region: - #mask &= catalog['PHOTSYS'] == region.strip('_') - mask &= catalog['PHOTSYS'] == region.strip('GC') + #if region in ['DS', 'DN']: + mask &= select_region(catalog['RA'], catalog['DEC'], region) + #elif region: + # #mask &= catalog['PHOTSYS'] == region.strip('_') + # mask &= catalog['PHOTSYS'] == region.strip('GC') if fibered: mask &= catalog['LOCATION_ASSIGNED'] From 3cb3d15a1f7259a0091463c5ee932b97eb86f3dc Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Thu, 8 Feb 2024 08:02:07 -0800 Subject: [PATCH 171/297] changes --- Sandbox/LSSpipe_Y1mock_2ndgen_BGS.txt | 117 +++++++ bin/Y1ALTMTLRealizationsBRIGHT_mock.sh | 67 ++-- bin/Y1ALTMTLRealizationsBRIGHT_mock2.sh | 319 ------------------ bin/Y1ALTMTLRealizationsBRIGHT_mock_init.sh | 14 +- bin/runAltMTLParallel.py | 3 +- py/LSS/main/mockaltmtltools.py | 18 +- scripts/mock_tools/add_extra_realizations.py | 4 +- scripts/mock_tools/add_extra_tilesTracker.py | 4 +- .../mock_tools/getpota_Y1_bright_script.sh | 42 +-- scripts/mock_tools/initAMTL_bright.py | 18 + scripts/mock_tools/prepare_mocks_Y1_bright.py | 2 +- scripts/mock_tools/prepare_script.sh | 20 +- scripts/mock_tools/prepare_script_bright.sh | 14 +- .../run_Y1SecondGen_initialledger_batch.sh | 4 +- 14 files changed, 234 insertions(+), 412 deletions(-) create mode 100644 Sandbox/LSSpipe_Y1mock_2ndgen_BGS.txt delete mode 100755 bin/Y1ALTMTLRealizationsBRIGHT_mock2.sh create mode 100644 scripts/mock_tools/initAMTL_bright.py diff --git a/Sandbox/LSSpipe_Y1mock_2ndgen_BGS.txt b/Sandbox/LSSpipe_Y1mock_2ndgen_BGS.txt new file mode 100644 index 000000000..f706f68d2 --- /dev/null +++ b/Sandbox/LSSpipe_Y1mock_2ndgen_BGS.txt @@ -0,0 +1,117 @@ +#pseudo pipeline describing how BGS LSS catalogs get created from 2nd gen mocks, very similar to LSSpipe_Y1mock_2ndgen.txt +#Set up +>> source /global/common/software/desi/desi_environment.sh main +In this documentation, $LSS is the directory where you have the LSS repository + +#1), cutsky mocks get created from boxes and have the Y1 tiles list applied to geometry (Alex Smith) + +#2) These are adapted to run LSS and AltMTL pipelines, giving the definition of BGS subtype and have columns added necessary for fiberassign, and have the targeting mask applied via +https://github.com/desihub/LSS/blob/main/scripts/mock_tools/prepare_mocks_Y1_bright.py + +>> ./prepare_script_bright.sh + +#3) CREATE FIBER COLLISION FILE AND CALCULATE POTA FILES +https://github.com/desihub/LSS/blob/main/scripts/getpotaY1_mock.py + +>> ./getpota_Y1_bright_script.sh + +### AltMTL + LSS pipeline INSTRUCTIONS ### + +4) CREATE INITIAL LEDGERS +>> ./run_Y1SecondGen_initialledger_batch.sh (adapt to define the mock path and program dark/bright) + + +*) GOTO LSS/bin + +5) INITIALIZE ALTMTL DIRECTORY STRUCTURE FOR 1 MOCK +>> ./Y1ALTMTLRealizationsBRIGHT_mock_init.sh + +6) MAKE PARENT DIRECTORIES FOR OTHER REALIZATIONS +>> python initAMTL_bright.py + +8) MANUALLY ADD EXTRA TILES TO THE END OF mainsurvey-DARKobscon-TileTracker.ecsv +>> python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/add_extra_tilesTracker.py + +9) RUN ALTMTL PROCESS ON MOCK REALIZATIONS +This process runs on regular queue that takes 12 hours but to complete the operation we need more time. Therefore, it needs to be run at least twice +>> nohup ./Y1ALTMTLRealizationsBRIGHT_mock.sh & + +** If you want to leave the second process in the queue, once you start running the first, you can put a second process in the queue stating to start after this first process ends, with the option --dependency=afterany: in dateLoopAltMTLBugFix_mock_batch.sh + can be found with squeue -u + +10) RUN A SECOND TIME +>> nohup ./Y1ALTMTLRealizationsBRIGHT_mock.sh & + + + + + + + + + +# Now you have several pathways. Make clustering catalogs only from potential assigments (applying all the data masking) or mimic the entire data processing, staring from AltMTL to LSS pipeline. + +### POTENTIAL ASSIGNMENT ONLY INSTRUCTIONS ### + +PYTHONPATH=$PYTHONPATH:$HOME/LSS/py + +#Make complete LSS catalogs with goodhardware and imaging veto masks applied +#Code does randoms in serial, 4 are likely enough +>> python scripts/mock_tools/pota2clus_simp.py --veto _gtlimaging --realization 0 --maxr + +----------------------------------------------- + + +### AltMTL + LSS pipeline INSTRUCTIONS ### +## CAUTION: NO PIP WEIGHTS YET TESTED OR IMPLEMENTED ## + +#The output directory names are just suggestions. +#Generate initial ledgers +# First define an output directory (like in your scratch or if in production for obscon=dark, this is $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit). + +# Create initial ledgers and save hplist to feed MockAltMTL production, both saved in /initial_ledger (~20 minutes) +>> python $LSS/scripts/mock_tools/run_mtl_ledger.py $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit/forFA.fits /altmtl/initled/ DARK + + First need to create (mkdir) the directory /altmtl/initled + It will save the HP list in the same directory with the name hpxlist_.txt + +# Run AltMTL process (without generating bitweights yet) for 1 realization with $LSS/scripts/mock_tools/MockAltMTLScriptMain.sh (~24 hours) +# Modify by hand several options inside MockAltMTLScriptMain.sh +# This are the ones that you should worry (up to line 50. there are other options but you can ignore) + * mockNumber= If running over production directories, set the realization here and can skip simName and targfile configuration. + * simName="/altmtl/initled/" This is the directory where to save all the AltMTL products for a given mock# + * path2LSS=/pscratch/sd/a/MYUSER/codes/LSS/bin/ This is the path to the LSS repo bin subdirectory + * ndir=1 Number of Alternative ledgers. In principle, we should run a single realization to get as data, and then another large run with ndir=128 or 256 to calculate PIP weights. If ndir = 1, then BitWeights are not calculated. + * obscon='DARK' Either DARK or BRIGHT + * endDate='--endDate=20220613' End date for Y1 campaign + * exampleLedgerBase=/initial_ledger This is the directory you indicated when creating the initial ledgers + * hpListFile="/initial_ledger/hpxlist_dark.txt" This is the hp file list created while running the initial ledgers + * targfile='--targfile=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit/forFA.fits' This is the path to the parent target sample + +# This script takes quite some time to run (~24h). It generates ndir random realizations (given subpriority) and run fiber assigment creating potential assignments and assignments itself. +# Their outputs will serve as input for the LSS pipeline. + + +# Run LSS pipeline as follows. +# CAUTION: it has only been tested to run one mock by one mock with mockmin = 0 and mockmax = 1 +# Prepare and combine data for dark or bright time data. Better run in batch mode since it might die in between if not running in batch mode. +# We don't need randoms at this stage any more. They will be read directly from data randoms when creating the full catalogs +>> srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python $LSS/scripts/mock_tools/mkCat_SecondGen.py --base_output --simName /altmtl_main_rea{MOCKNUM} --mockver ab_secondgen --mockmin 0 --mockmax 1 --survey Y1 --add_gtl y --specdata iron --tracer dark --combd y + +[40 minutes] + +Example for mock = 1: +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python mkCat_SecondGen.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit/altmtl1 --simName /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit/altmtl1 --mockver ab_secondgen --mockmin 1 --mockmax 2 --survey Y1 --add_gtl y --specdata iron --tracer dark --combd y + + + +# Create the full catalogs for data and randoms and apply vetos. Calcualte the FKP weights and make the clustering catalogs. +#LRG +>> srun -N 1 -C cpu -t 02:00:00 --qos interactive --account desi python mkCat_SecondGen.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit/altmtl{MOCKNUM} --mockver ab_secondgen --mockmin 0 --mockmax 1 --survey Y1 --add_gtl y --specdata iron --tracer LRG --minr 0 --maxr 18 --fulld y --fullr y --apply_map_veto y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --resamp y --nz y --getFKP y --mkclusdat y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit + +#ELG (ELG_LOP with notqso) +>> srun -N 1 -C cpu -t 02:00:00 --qos interactive --account desi python mkCat_SecondGen.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit/altmtl{MOCKNUM} --mockver ab_secondgen --mockmin 0 --mockmax 1 --survey Y1 --add_gtl y --specdata iron --tracer ELG_LOP --notqso y --minr 0 --maxr 18 --fulld y --fullr y --apply_map_veto y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --resamp y --nz y --getFKP y --mkclusdat y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit + +#QSO +>> srun -N 1 -C cpu -t 02:00:00 --qos interactive --account desi python mkCat_SecondGen.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit/altmtl{MOCKNUM} --mockver ab_secondgen --mockmin 0 --mockmax 1 --survey Y1 --add_gtl y --specdata iron --tracer QSO --minr 0 --maxr 18 --fulld y --fullr y --apply_map_veto y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --resamp y --nz y --getFKP y --mkclusdat y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit diff --git a/bin/Y1ALTMTLRealizationsBRIGHT_mock.sh b/bin/Y1ALTMTLRealizationsBRIGHT_mock.sh index a789fb76f..e611d285f 100755 --- a/bin/Y1ALTMTLRealizationsBRIGHT_mock.sh +++ b/bin/Y1ALTMTLRealizationsBRIGHT_mock.sh @@ -1,11 +1,13 @@ #!/bin/bash start=`date +%s.%N` +##TEMPrealization=0 + #simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written #simName=JL_DebugReprocReprod2 simName="altmtl{mock_number}" #Location where you have cloned the LSS Repo -path2LSS=/pscratch/sd/a/acarnero/codes/LSS/bin +path2LSS=/pscratch/sd/a/acarnero/codes/LSS/bin/ # Flags for debug/verbose mode/profiling code time usage. # Uncomment second set of options to turn on the modes @@ -16,23 +18,24 @@ debug='--debug' verbose='--verbose' #profile='--profile' -if [ -z "$debug" ] -then - echo "\$debug is empty" -else - echo "\$debug is set" - pwd - InitWorkingDirectory=`pwd` - cd $path2LSS - cd .. - pwd - pip install --user . - cd $InitWorkingDirectory - pwd - echo "end of pip in script attempt" -fi +#if [ -z "$debug" ] +#then +# echo "\$debug is empty" +#else +# echo "\$debug is set" +# pwd +# InitWorkingDirectory=`pwd` +# cd $path2LSS +# cd .. +# pwd +# pip install --user . +# cd $InitWorkingDirectory +# pwd +# echo "end of pip in script attempt" +#fi #Uncomment second option if running on mocks +#mock='' mock='--mock' #ALTMTLHOME is a home directory for all of your alternate MTLs. Default is your scratch directory @@ -42,7 +45,8 @@ mock='--mock' #Uncomment the following line to set your own/nonscratch directory #ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ -ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/ +#ALTMTLHOME=/pscratch/sd/a/acarnero/test_main/ +ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2/ if [[ "${NERSC_HOST}" == "cori" ]]; then CVal='haswell' @@ -81,9 +85,10 @@ fi seed=3593589 #Number of realizations to generate. Ideally a multiple of 64 for bitweights #However, you can choose smaller numbers for debugging +#Mock realization mockinit=0 -mockend=5 -let ndir=$mockend-$mockinit +mockend=25 +let ndir=$mockend-$mockinit #Uncomment second option if you want to clobber already existing files for Alt MTL generation @@ -91,8 +96,8 @@ overwrite='' #overwrite='--overwrite' #Observing conditions for generating MTLs (should be all caps "DARK" or "BRIGHT") -#obscon='DARK' obscon='BRIGHT' +#obscon='BRIGHT' #Survey to generate MTLs for (should be lowercase "sv3" or "main", sv2, sv1, and cmx are untested and will likely fail) #survey='sv3' @@ -127,7 +132,8 @@ printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $s #List of healpixels to create Alt MTLs for #hpListFile="$path2LSS/MainSurveyHPList_mock.txt" -hpListFile="/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl0/initled/hpxlist_bright.txt" +##TEMPhpListFile="/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$realization/initled/hpxlist_dark.txt" +#hpListFile="$path2LSS/MainSurveyHPList.txt" #hpListFile="$path2LSS/DebugMainHPList.txt" #hpListFile="$path2LSS/SV3HPList.txt" @@ -141,25 +147,24 @@ hpListFile="/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/Abacu shuffleBrightPriorities='' -#shuffleELGPriorities='' +shuffleELGPriorities='' #shuffleELGPriorities='--shuffleELGPriorities' #PromoteFracBGSFaint=0.2 PromoteFracBGSFaint=0.0 #PromoteFracELG=0.1 -PromoteFracELG=0.0 +PromoteFracELG=0. # location of original MTLs to shuffle. # Default directory is a read only mount of the CFS filesystem # You can only access that directory from compute nodes. # Do NOT use the commented out directory (the normal mount of CFS) # unless the read only mount is broken -#exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ +##TEMPexampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$realization/initled #exampleLedgerBase=/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ #exampleLedgerBase=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ #exampleLedgerBase=$SCRATCH/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ #Options for DateLoopAltMTL and runAltMTLParallel -exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl0/initled #Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). #Default = Empty String/False. Uncomment second option if you want to restart from the first observations @@ -180,6 +185,10 @@ echo 'setting QVal here for debug. Fix later.' #QVal='debug' QVal='regular' #QVal='interactive' +# + + + #Number of nodes to run on. This will launch up to 64*N jobs #if that number of alternate universes have already been generated #Calculated automatically from number of sims requested and number of processes per node. Be careful if setting manually @@ -195,8 +204,8 @@ getosubp='' #subpriorities are shuffled. debug mode for main survey #will only require these flags to be set by uncommenting second options -#dontShuffleSubpriorities='' -#reproducing='' +dontShuffleSubpriorities='' +reproducing='' #dontShuffleSubpriorities='--dontShuffleSubpriorities' #reproducing='--reproducing' #Include secondary targets? @@ -208,8 +217,8 @@ secondary='' #Otherwise this is optional #targfile='' #CHANGEME IF RUNNING ON MOCKS #targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory -targfile="--targfile=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/forFA{mock_number}.fits" -#targfile='--targfile=/cscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA1.fits' +#targfile="--targfile=/pscratch/sd/a/acarnero/test_main/forFA{mock_number}.fits" +targfile="--targfile=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2/forFA{mock_number}.fits" #targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' diff --git a/bin/Y1ALTMTLRealizationsBRIGHT_mock2.sh b/bin/Y1ALTMTLRealizationsBRIGHT_mock2.sh deleted file mode 100755 index 4255904df..000000000 --- a/bin/Y1ALTMTLRealizationsBRIGHT_mock2.sh +++ /dev/null @@ -1,319 +0,0 @@ -#!/bin/bash -start=`date +%s.%N` - -##TEMPrealization=0 - -#simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written -#simName=JL_DebugReprocReprod2 -simName="altmtl{mock_number}" -#Location where you have cloned the LSS Repo -path2LSS=/pscratch/sd/a/acarnero/codes/LSS/bin/ - -# Flags for debug/verbose mode/profiling code time usage. -# Uncomment second set of options to turn on the modes -#debug='' -#verbose='' -profile='' -debug='--debug' -verbose='--verbose' -#profile='--profile' - -#if [ -z "$debug" ] -#then -# echo "\$debug is empty" -#else -# echo "\$debug is set" -# pwd -# InitWorkingDirectory=`pwd` -# cd $path2LSS -# cd .. -# pwd -# pip install --user . -# cd $InitWorkingDirectory -# pwd -# echo "end of pip in script attempt" -#fi - -#Uncomment second option if running on mocks -#mock='' -mock='--mock' - -#ALTMTLHOME is a home directory for all of your alternate MTLs. Default is your scratch directory -#There will be an environment variable $ALTMTLHOME for the "survey alt MTLs" -#However, you should specify your own directory to a. not overwrite the survey alt MTLs -# and b. keep your alt MTLs somewhere that you have control/access - -#Uncomment the following line to set your own/nonscratch directory -#ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ -#ALTMTLHOME=/pscratch/sd/a/acarnero/test_main/ -ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/ - -if [[ "${NERSC_HOST}" == "cori" ]]; then - CVal='haswell' - QVal='interactive' - ProcPerNode=32 - if [[ -z "${ALTMTLHOME}" ]]; then - ALTMTLHOME=$CSCRATCH - else - echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" - fi -elif [[ "${NERSC_HOST}" == "perlmutter" ]]; then - srunConfig='-C cpu -q regular' - CVal='cpu' - QVal='interactive' - ProcPerNode=128 - if [[ -z "${ALTMTLHOME}" ]]; then - ALTMTLHOME=$PSCRATCH - else - echo "ALTMTLHOME Already set. ALTMTLHOME=$ALTMTLHOME" - fi - -else - echo "This code is only supported on NERSC Cori and NERSC Perlmutter. Goodbye" - exit 1234 -fi - - - - -#Options for InitializeAltMTLs - -#Random seed. Change to any integer you want (or leave the same) -#If seed is different between two otherwise identical runs, the initial MTLs will also be different -#seed is also saved in output directory -#seed=14126579 -seed=3593589 -#Number of realizations to generate. Ideally a multiple of 64 for bitweights -#However, you can choose smaller numbers for debugging -#Mock realization -mockinit=5 -mockend=25 -let ndir=$mockend-$mockinit - - -#Uncomment second option if you want to clobber already existing files for Alt MTL generation -overwrite='' -#overwrite='--overwrite' - -#Observing conditions for generating MTLs (should be all caps "DARK" or "BRIGHT") -obscon='BRIGHT' -#obscon='BRIGHT' - -#Survey to generate MTLs for (should be lowercase "sv3" or "main", sv2, sv1, and cmx are untested and will likely fail) -#survey='sv3' -survey='main' -# options are default None (empty strings). Uncommenting the second options will set them to the Y1 start and end dates. -startDate='' -#endDate='' -#startDate='2021-05-13T08:15:37+00:00' -endDate='2022-06-24T00:00:00+00:00' - -#For rundate formatting in simName, either manually modify the string below -#to be the desired date or comment that line out and uncomment the -#following line to autogenerate date strings. -#To NOT use any date string specification, use the third line, an empty string -#datestring='071322' -#datestring=`date +%y%m%d` -datestring='' - -#Can save time in MTL generation by first writing files to local tmp directory and then copying over later -#uncommenting the second option will directly write to your output directory -usetmp='' -#usetmp='--dontUseTemp' - -if [ -z $usetmp ] -then - outputMTLDirBaseBase=`mktemp -d /dev/shm/"$USER"_tempdirXXXX` -else - outputMTLDirBaseBase=$ALTMTLHOME -fi -printf -v outputMTLDirBase "$outputMTLDirBaseBase/$simName/" $datestring $ndir $survey -printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $survey - -#List of healpixels to create Alt MTLs for -#hpListFile="$path2LSS/MainSurveyHPList_mock.txt" -##TEMPhpListFile="/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$realization/initled/hpxlist_dark.txt" -#hpListFile="$path2LSS/MainSurveyHPList.txt" -#hpListFile="$path2LSS/DebugMainHPList.txt" -#hpListFile="$path2LSS/SV3HPList.txt" - -#These two options only are considered if the obscon is BRIGHT -#First option indicates whether to shuffle the top level priorities -#of BGS_FAINT/BGS_FAINT_HIP. Uncomment section option to turn off shuffling of bright time priorities -#Second option indicates what fraction/percent -#of BGS_FAINT to promote to BGS_FAINT_HIP. Default is 20%, same as SV3 - -#shuffleBrightPriorities='--shuffleBrightPriorities' -shuffleBrightPriorities='' - - -shuffleELGPriorities='' -#shuffleELGPriorities='--shuffleELGPriorities' - -#PromoteFracBGSFaint=0.2 -PromoteFracBGSFaint=0.0 -#PromoteFracELG=0.1 -PromoteFracELG=0. - -# location of original MTLs to shuffle. -# Default directory is a read only mount of the CFS filesystem -# You can only access that directory from compute nodes. -# Do NOT use the commented out directory (the normal mount of CFS) -# unless the read only mount is broken -##TEMPexampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$realization/initled -#exampleLedgerBase=/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ -#exampleLedgerBase=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ -#exampleLedgerBase=$SCRATCH/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ -#Options for DateLoopAltMTL and runAltMTLParallel - -#Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). -#Default = Empty String/False. Uncomment second option if you want to restart from the first observations -#PLEASE DO NOT CHANGEME -echo "Fix QR resetting for new argparse usage" -qR='' -#qR='-qr' - -#Number of observation dates to loop through -#Defaults to 40 dates for SV3 -NObsDates=99999 - -# Whether to submit a new job with dateLoopAltMTL for each date -# or to submit a single job -# multiDate=0 -multiDate='--multiDate' -echo 'setting QVal here for debug. Fix later.' -#QVal='debug' -QVal='regular' -#QVal='interactive' -# - - - -#Number of nodes to run on. This will launch up to 64*N jobs -#if that number of alternate universes have already been generated -#Calculated automatically from number of sims requested and number of processes per node. Be careful if setting manually -NNodes=$(( ($ndir + $ProcPerNode - 1 )/$ProcPerNode )) -#echo $NNodes -#getosubp: grab subpriorities from the original (exampleledgerbase) MTLs -#This should only be turned on for SV testing/debugging purposes -#This should not be required for main survey debugging. -getosubp='' -#getosubp='--getosubp' - -#shuffleSubpriorities(reproducing) must be left as empty strings to ensure -#subpriorities are shuffled. debug mode for main survey -#will only require these flags to be set by uncommenting second options - -dontShuffleSubpriorities='' -reproducing='' -#dontShuffleSubpriorities='--dontShuffleSubpriorities' -#reproducing='--reproducing' -#Include secondary targets? -secondary='' -#secondary='--secondary' - - -#If running from mocks, must set target directory. -#Otherwise this is optional -#targfile='' #CHANGEME IF RUNNING ON MOCKS -#targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory -#targfile="--targfile=/pscratch/sd/a/acarnero/test_main/forFA{mock_number}.fits" -targfile="--targfile=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/forFA{mock_number}.fits" -#targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' - - -#Default is use numobs from ledger. Uncomment second option to set numobs NOT from ledger -numobs_from_ledger='' -#numobs_from_ledger='--NumObsNotFromLedger' - -#Uncomment second line to force redo fiber assignment if it has already been done. -redoFA='' -#redoFA='--redoFA' - - -#Options for MakeBitweightsParallel -#True/False(1/0) as to whether to split bitweight calculation -#among nodes by MPI between realizations -#splitByReal=1 - -#Split the calculation of bitweights into splitByChunk -#chunks of healpixels. -#splitByChunk=1 - -#Set to true (1) if you want to clobber already existing bitweight files -overwrite2='' -#overwrite2='--overwrite' -#Actual running of scripts - -#Copy this script to output directory for reproducbility -thisFileName=$outputMTLFinalDestination/$0 - -echo $thisFileName - -#if [ -f "$thisFileName" ] -#then -# echo "File is found. Checking to see it is identical to the original." -# cmp $0 $thisFileName -# comp=$? -# if [[ $comp -eq 1 ]] -# then -# echo "Files are not identical." -# echo "If this is intended, please delete or edit the original copied script at $thisFileName" -# echo "If this is unintended, you can reuse the original copied script at that same location" -# echo "goodbye" -# exit 3141 -# elif [[ $comp -eq 0 ]] -# then -# echo "files are same, continuing" -# else -# echo "Something has gone very wrong. Exit code for cmp was $a" -# exit $a -# fi -#else -# echo "Copied script is not found. Copying now, making directories as needed." -# mkdir -p $outputMTLFinalDestination -# cp $SLURM_SUBMIT_DIR $0 $outputMTLFinalDestination/$0 -#fi - -if [ -d "$outputMTLFinalDestination" ] -then - echo "output final directory exists" - echo $outputMTLFinalDestination -else - echo "output final directory does not exist. Creating and copying script there" - mkdir -p $outputMTLFinalDestination - cp $0 $outputMTLFinalDestination -fi - -if [ -z $getosubp ] -then - touch $outputMTLFinalDestination/GetOSubpTrue -fi - -printf -v OFDL "%s/dateLoop%sAltMTLOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring - -runtimeInit=$( echo "$endInit - $start" | bc -l ) -argstring="--altMTLBaseDir=$outputMTLFinalDestination --obscon=$obscon --survey=$survey --ProcPerNode=$ProcPerNode $numobs_from_ledger $redoFA $getosubp $debug $verbose $secondary $mock $targfile $multiDate $reproducing --mockmin=$mockinit --mockmax=$mockend" -echo 'argstring for dateloop' -echo $argstring -nohup bash $path2LSS/dateLoopAltMTLBugFix_mock_batch.sh $NObsDates $NNodes $path2LSS $CVal $QVal $qR $argstring >& $OFDL - -endDL=`date +%s.%N` - -if [ $? -ne 0 ]; then - runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) - echo "runtime for Dateloop of $NObsDates days" - echo $runtimeDateLoop - exit 12345 -fi -runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) -echo "runtime for Dateloop of $NObsDates days" -echo $runtimeDateLoop -exit 54321 - - - -runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) - -echo "runtime for Dateloop of $NObsDates days" -echo $runtimeDateLoop diff --git a/bin/Y1ALTMTLRealizationsBRIGHT_mock_init.sh b/bin/Y1ALTMTLRealizationsBRIGHT_mock_init.sh index e53fc3d02..303ae77fc 100755 --- a/bin/Y1ALTMTLRealizationsBRIGHT_mock_init.sh +++ b/bin/Y1ALTMTLRealizationsBRIGHT_mock_init.sh @@ -3,7 +3,7 @@ start=`date +%s.%N` #simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written #simName=JL_DebugReprocReprod2 -simName="altmtl5" +simName="altmtl0" #Location where you have cloned the LSS Repo path2LSS=/pscratch/sd/a/acarnero/codes/LSS/bin @@ -42,7 +42,7 @@ mock='--mock' #Uncomment the following line to set your own/nonscratch directory #ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ -ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/ +ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2/ if [[ "${NERSC_HOST}" == "cori" ]]; then CVal='haswell' @@ -124,7 +124,7 @@ printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $s #List of healpixels to create Alt MTLs for #hpListFile="$path2LSS/MainSurveyHPList_mock.txt" -hpListFile="/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl5/initled/hpxlist_bright.txt" +hpListFile="/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2/altmtl0/initled/hpxlist_bright.txt" #hpListFile="$path2LSS/DebugMainHPList.txt" #hpListFile="$path2LSS/SV3HPList.txt" @@ -138,8 +138,8 @@ hpListFile="/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/Abacu shuffleBrightPriorities='' -#shuffleELGPriorities='' -shuffleELGPriorities='--shuffleELGPriorities' +shuffleELGPriorities='' +#shuffleELGPriorities='--shuffleELGPriorities' #PromoteFracBGSFaint=0.2 PromoteFracBGSFaint=0.0 @@ -156,7 +156,7 @@ PromoteFracELG=0.1 #exampleLedgerBase=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ #exampleLedgerBase=$SCRATCH/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ #Options for DateLoopAltMTL and runAltMTLParallel -exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl5/initled +exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2/altmtl0/initled #Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). #Default = Empty String/False. Uncomment second option if you want to restart from the first observations @@ -205,7 +205,7 @@ secondary='' #Otherwise this is optional #targfile='' #CHANGEME IF RUNNING ON MOCKS #targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory -targfile="--targfile=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/forFA5.fits" +targfile="--targfile=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2/forFA0.fits" #targfile='--targfile=/cscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA1.fits' #targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' diff --git a/bin/runAltMTLParallel.py b/bin/runAltMTLParallel.py index f630d4bbd..674b3a192 100755 --- a/bin/runAltMTLParallel.py +++ b/bin/runAltMTLParallel.py @@ -119,7 +119,8 @@ def procFunc(nproc): else: targets = None if args.mock: - retval = mockamt.loop_alt_ledger(args.obscon, survey = args.survey, mtldir = args.mtldir, zcatdir = args.zcatdir, altmtlbasedir = args.altMTLBaseDir, ndirs = ndirs, numobs_from_ledger = args.numobs_from_ledger,secondary = args.secondary, getosubp = args.getosubp, quickRestart = args.quickRestart, multiproc = multiproc, nproc = nproc, singleDate = singleDate, redoFA = args.redoFA, mock = args.mock, targets = targets, debug = args.debug, verbose = args.verbose, reproducing = args.reproducing) + retval = amt.loop_alt_ledger(args.obscon, survey = args.survey, mtldir = args.mtldir, zcatdir = args.zcatdir, altmtlbasedir = args.altMTLBaseDir, ndirs = ndirs, numobs_from_ledger = args.numobs_from_ledger,secondary = args.secondary, getosubp = args.getosubp, quickRestart = args.quickRestart, multiproc = multiproc, nproc = nproc, singleDate = singleDate, redoFA = args.redoFA, mock = args.mock, targets = targets, debug = args.debug, verbose = args.verbose, reproducing = args.reproducing) + #retval = mockamt.loop_alt_ledger(args.obscon, survey = args.survey, mtldir = args.mtldir, zcatdir = args.zcatdir, altmtlbasedir = args.altMTLBaseDir, ndirs = ndirs, numobs_from_ledger = args.numobs_from_ledger,secondary = args.secondary, getosubp = args.getosubp, quickRestart = args.quickRestart, multiproc = multiproc, nproc = nproc, singleDate = singleDate, redoFA = args.redoFA, mock = args.mock, targets = targets, debug = args.debug, verbose = args.verbose, reproducing = args.reproducing) else: retval = amt.loop_alt_ledger(args.obscon, survey = args.survey, mtldir = args.mtldir, zcatdir = args.zcatdir, altmtlbasedir = args.altMTLBaseDir, ndirs = ndirs, numobs_from_ledger = args.numobs_from_ledger,secondary = args.secondary, getosubp = args.getosubp, quickRestart = args.quickRestart, multiproc = multiproc, nproc = nproc, singleDate = singleDate, redoFA = args.redoFA, mock = args.mock, targets = targets, debug = args.debug, verbose = args.verbose, reproducing = args.reproducing) if args.verbose: diff --git a/py/LSS/main/mockaltmtltools.py b/py/LSS/main/mockaltmtltools.py index 97db7dfab..682f6fceb 100644 --- a/py/LSS/main/mockaltmtltools.py +++ b/py/LSS/main/mockaltmtltools.py @@ -13,18 +13,18 @@ ##TEMP -MODULE_PATH = '/global/homes/a/acarnero/.local/lib/python3.10/site-packages/desitarget/__init__.py' -MODULE_NAME = 'desitarget' -import importlib -import sys -spec = importlib.util.spec_from_file_location(MODULE_NAME, MODULE_PATH) -module = importlib.util.module_from_spec(spec) -sys.modules[spec.name] = module -spec.loader.exec_module(module) +#MODULE_PATH = '/global/homes/a/acarnero/.local/lib/python3.10/site-packages/desitarget/__init__.py' +#MODULE_NAME = 'desitarget' +#import importlib +#import sys +#spec = importlib.util.spec_from_file_location(MODULE_NAME, MODULE_PATH) +#module = importlib.util.module_from_spec(spec) +#sys.modules[spec.name] = module +#spec.loader.exec_module(module) ## import desitarget -#from desitarget import io, mtl +from desitarget import io, mtl from desitarget.cuts import random_fraction_of_trues from desitarget.mtl import get_mtl_dir, get_mtl_tile_file_name,get_mtl_ledger_format from desitarget.mtl import get_zcat_dir, get_ztile_file_name, tiles_to_be_processed diff --git a/scripts/mock_tools/add_extra_realizations.py b/scripts/mock_tools/add_extra_realizations.py index e5d2ed42d..0cdab4acd 100644 --- a/scripts/mock_tools/add_extra_realizations.py +++ b/scripts/mock_tools/add_extra_realizations.py @@ -5,10 +5,10 @@ program = 'dark' rmin = 0 -rmax = 256 +rmax = 64 #path = '/pscratch/sd/a/acarnero/test_main/altmtl{MOCKNUM}/Univ000' -path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl0_R256/Univ{MOCKNUM}' +path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl1_R64/Univ{MOCKNUM}' extratiles = Table.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/aux_data/extra_{PROGRAM}.ecsv'.format(PROGRAM = program), format='ascii.ecsv') diff --git a/scripts/mock_tools/add_extra_tilesTracker.py b/scripts/mock_tools/add_extra_tilesTracker.py index 97b4364f8..556b0674f 100644 --- a/scripts/mock_tools/add_extra_tilesTracker.py +++ b/scripts/mock_tools/add_extra_tilesTracker.py @@ -4,11 +4,11 @@ program = 'bright' -rmin = 1 +rmin = 0 rmax = 25 #path = '/pscratch/sd/a/acarnero/test_main/altmtl{MOCKNUM}/Univ000' -path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM}/Univ000' +path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2/altmtl{MOCKNUM}/Univ000' extratiles = Table.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/aux_data/extra_{PROGRAM}.ecsv'.format(PROGRAM = program), format='ascii.ecsv') diff --git a/scripts/mock_tools/getpota_Y1_bright_script.sh b/scripts/mock_tools/getpota_Y1_bright_script.sh index 04d8d0f60..aa4fe4c8e 100755 --- a/scripts/mock_tools/getpota_Y1_bright_script.sh +++ b/scripts/mock_tools/getpota_Y1_bright_script.sh @@ -1,21 +1,21 @@ -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 4 --prog BRIGHT --mock_version BGS -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 5 --prog BRIGHT --mock_version BGS -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 6 --prog BRIGHT --mock_version BGS -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 7 --prog BRIGHT --mock_version BGS -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 8 --prog BRIGHT --mock_version BGS -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 9 --prog BRIGHT --mock_version BGS -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 10 --prog BRIGHT --mock_version BGS -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 11 --prog BRIGHT --mock_version BGS -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 12 --prog BRIGHT --mock_version BGS -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 13 --prog BRIGHT --mock_version BGS -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 14 --prog BRIGHT --mock_version BGS -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 15 --prog BRIGHT --mock_version BGS -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 16 --prog BRIGHT --mock_version BGS -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 17 --prog BRIGHT --mock_version BGS -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 18 --prog BRIGHT --mock_version BGS -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 19 --prog BRIGHT --mock_version BGS -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 20 --prog BRIGHT --mock_version BGS -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 21 --prog BRIGHT --mock_version BGS -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 22 --prog BRIGHT --mock_version BGS -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 23 --prog BRIGHT --mock_version BGS -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 24 --prog BRIGHT --mock_version BGS +srun -N 1 -C cpu -t 01:00:00 -q regular --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 22 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q regular --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 23 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q regular --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 24 --prog BRIGHT --mock_version BGS_v2 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 4 --prog BRIGHT --mock_version BGS_v2 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 5 --prog BRIGHT --mock_version BGS_v2 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 6 --prog BRIGHT --mock_version BGS_v2 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 7 --prog BRIGHT --mock_version BGS_v2 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 8 --prog BRIGHT --mock_version BGS_v2 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 9 --prog BRIGHT --mock_version BGS_v2 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 10 --prog BRIGHT --mock_version BGS_v2 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 11 --prog BRIGHT --mock_version BGS_v2 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 12 --prog BRIGHT --mock_version BGS_v2 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 13 --prog BRIGHT --mock_version BGS_v2 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 14 --prog BRIGHT --mock_version BGS_v2 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 15 --prog BRIGHT --mock_version BGS_v2 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 16 --prog BRIGHT --mock_version BGS_v2 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 17 --prog BRIGHT --mock_version BGS_v2 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 18 --prog BRIGHT --mock_version BGS_v2 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 19 --prog BRIGHT --mock_version BGS_v2 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 20 --prog BRIGHT --mock_version BGS_v2 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 21 --prog BRIGHT --mock_version BGS_v2 diff --git a/scripts/mock_tools/initAMTL_bright.py b/scripts/mock_tools/initAMTL_bright.py new file mode 100644 index 000000000..aa672a472 --- /dev/null +++ b/scripts/mock_tools/initAMTL_bright.py @@ -0,0 +1,18 @@ +import os +import errno +def test_dir(value): + if not os.path.exists(value): + try: + os.makedirs(value, 0o755) + print('made ' + value) + except OSError as e: + if e.errno != errno.EEXIST: + raise + +path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2' +for i in range(1,25): + print(i) +# os.system('cp -R %s/altmtl%d/initled/main %s/altmtl%d/Univ000/.' %(path, i, path, i)) + test_dir('%s/altmtl%d/Univ000'%(path,i)) + os.system('cp %s/altmtl0/Univ000/*.ecsv %s/altmtl%d/Univ000/.' %(path, path, i)) + os.system('cp -R %s/altmtl%d/initled/main %s/altmtl%d/Univ000/.' %(path, i, path, i)) diff --git a/scripts/mock_tools/prepare_mocks_Y1_bright.py b/scripts/mock_tools/prepare_mocks_Y1_bright.py index a435d16b1..6b72b450e 100644 --- a/scripts/mock_tools/prepare_mocks_Y1_bright.py +++ b/scripts/mock_tools/prepare_mocks_Y1_bright.py @@ -278,7 +278,7 @@ def mask_secondgen(nz=0, foot=None, nz_lop=0): datat.append(dat_bright) - SubFracFaint=0.556 + SubFracFaint=0.695 ran_faint = np.random.uniform(size = len(dat_faint)) dat_faint_subfrac = dat_faint[(ran_faint<=SubFracFaint)] diff --git a/scripts/mock_tools/prepare_script.sh b/scripts/mock_tools/prepare_script.sh index d8a476c04..b78970e06 100755 --- a/scripts/mock_tools/prepare_script.sh +++ b/scripts/mock_tools/prepare_script.sh @@ -1,12 +1,8 @@ -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 0 --realmax 2 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 2 --realmax 4 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 4 --realmax 6 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 6 --realmax 8 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 8 --realmax 10 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 10 --realmax 12 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 12 --realmax 14 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 14 --realmax 16 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 16 --realmax 18 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 18 --realmax 20 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 20 --realmax 22 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 -srun -N 1 -C cpu -t 04:00:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v3 --realmin 22 --realmax 25 --isProduction y --split_snapshot y --new_version AbacusSummit_v3_1 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v4 --realmin 0 --realmax 3 --isProduction y --split_snapshot y --new_version AbacusSummit_v4 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v4 --realmin 3 --realmax 6 --isProduction y --split_snapshot y --new_version AbacusSummit_v4 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v4 --realmin 6 --realmax 9 --isProduction y --split_snapshot y --new_version AbacusSummit_v4 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v4 --realmin 9 --realmax 12 --isProduction y --split_snapshot y --new_version AbacusSummit_v4 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v4 --realmin 12 --realmax 15 --isProduction y --split_snapshot y --new_version AbacusSummit_v4 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v4 --realmin 15 --realmax 18 --isProduction y --split_snapshot y --new_version AbacusSummit_v4 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v4 --realmin 18 --realmax 21 --isProduction y --split_snapshot y --new_version AbacusSummit_v4 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v4 --realmin 21 --realmax 25 --isProduction y --split_snapshot y --new_version AbacusSummit_v4 diff --git a/scripts/mock_tools/prepare_script_bright.sh b/scripts/mock_tools/prepare_script_bright.sh index 6b17d3cc0..250f243b7 100755 --- a/scripts/mock_tools/prepare_script_bright.sh +++ b/scripts/mock_tools/prepare_script_bright.sh @@ -1,7 +1,7 @@ -srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 0 --realmax 4 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 4 --realmax 7 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 11 --realmax 14 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 7 --realmax 11 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 14 --realmax 17 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 17 --realmax 21 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 21 --realmax 25 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 0 --realmax 4 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 4 --realmax 7 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 11 --realmax 14 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 7 --realmax 11 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 14 --realmax 17 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 17 --realmax 21 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 22 --realmax 25 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 diff --git a/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh index 6548b300e..029a50c84 100755 --- a/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh +++ b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh @@ -1,5 +1,5 @@ -SeconGenVer=AbacusSummitBGS #AbacusSummit -for j in {5..24} +SeconGenVer=AbacusSummitBGS_v2 #AbacusSummit +for j in {1..24} do #j=0 echo $j From 9595b15212047e941e9eaf5adf1f6f81a5b10d38 Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Thu, 8 Feb 2024 09:58:54 -0800 Subject: [PATCH 172/297] Changes --- bin/InitializeAltMTLs.py | 3 + bin/InitializeAltMTLsParallel_mock.py | 131 ------------------------- bin/MockAltMTLScript_firstpart.sh | 74 -------------- bin/MockAltMTLScript_secondpart.sh | 85 ---------------- bin/Y1ALTMTLRealizationsBRIGHT_mock.sh | 2 +- bin/dateLoopAltMTL_mock.sh | 50 ---------- bin/runAltMTLParallel_mock.py | 113 --------------------- 7 files changed, 4 insertions(+), 454 deletions(-) delete mode 100755 bin/InitializeAltMTLsParallel_mock.py delete mode 100755 bin/MockAltMTLScript_firstpart.sh delete mode 100755 bin/MockAltMTLScript_secondpart.sh delete mode 100755 bin/dateLoopAltMTL_mock.sh delete mode 100755 bin/runAltMTLParallel_mock.py diff --git a/bin/InitializeAltMTLs.py b/bin/InitializeAltMTLs.py index 2dd6596d1..b785db0e6 100755 --- a/bin/InitializeAltMTLs.py +++ b/bin/InitializeAltMTLs.py @@ -1,4 +1,7 @@ #!/global/common/software/desi/cori/desiconda/20211217-2.0.0/conda/bin/python -u +from desiutil.iers import freeze_iers +freeze_iers() + from astropy.table import Table import desitarget.io as io import glob diff --git a/bin/InitializeAltMTLsParallel_mock.py b/bin/InitializeAltMTLsParallel_mock.py deleted file mode 100755 index e87d3a081..000000000 --- a/bin/InitializeAltMTLsParallel_mock.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/global/common/software/desi/cori/desiconda/20211217-2.0.0/conda/bin/python -u -from astropy.table import Table -import astropy -import multiprocessing as mp -from multiprocessing import Pool -import logging -import atexit -import desitarget.io as io -import glob -from LSS.SV3.mockaltmtltools import initializeAlternateMTLs -import numpy as np -import os -from sys import argv -from desiutil.log import get_logger -log = get_logger() - -#List of healpixels for SV3 (dark and bright are same) - -print(argv) -seed = int(argv[1]) -ndir = int(argv[2]) -try: - overwrite = bool(int(argv[3])) -except: - raise ValueError('Invalid non-integer value of overwrite: {0}'.format(overwrite)) -obscon = argv[4] -survey = argv[5] -outputMTLDirBase = argv[6] -HPListFile = argv[7] -try: - shuffleBrightPriorities = bool(int(argv[8])) -except: - if obscon.lower() == 'dark': - log.info('Ignoring invalid noninteger value of shuffleBrightPriorities: {0} because dark time MTLs are being initialized'.format(argv[8])) - shuffleBrightPriorities = False - else: - raise ValueError('Invalid non-integer value of shuffleBrightPriorities: {0}'.format(argv[8])) - -try: - PromoteFracBGSFaint = float(argv[9]) -except: - if obscon.lower() == 'dark': - log.info('Ignoring invalid nonfloat value of PromoteFracBGSFaint: {0} because dark time MTLs are being initialized'.format(argv[9])) - PromoteFracBGSFaint = 0.2 - else: - raise ValueError('Invalid non-float value of PromoteFracBGSFaint: {0}'.format(argv[9])) - PromoteFracBGSFaint = 0.2 - -exampleledgerbase = argv[10] #"/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/{0}/{2}/{3}mtl-{2}-hp-{1}.ecsv" -NNodes = int(argv[11]) -# If folder doesn't exist, then create it. -if not os.path.isdir(outputMTLDirBase): - os.makedirs(outputMTLDirBase) - -with open(outputMTLDirBase + 'SeedFile', 'w') as f: - f.write(str(seed)) - -HPList = np.array(open(HPListFile,'r').readlines()[0].split(',')).astype(int) -print(HPList) -NodeID = int(os.getenv('SLURM_NODEID')) -SlurmNProcs = int(os.getenv('SLURM_NPROCS')) - -NProc = int(NNodes*64) - - - - - -#outputMTLDirBase = "/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/altmtl/debug_jl/alt_mtls_mainTest_{0}dirs/".format(ndir) -outputMTLDir = outputMTLDirBase + "Univ{0:03d}/" - - -HPList = np.array(open(HPListFile,'r').readlines()[0].split(',')).astype(int) -print(HPList) - -# If folder doesn't exist, then create it. -if not os.path.isdir(outputMTLDirBase): - os.makedirs(outputMTLDirBase) - -with open(outputMTLDirBase + 'SeedFile', 'w') as f: - f.write(str(seed)) - -def procFunc(nproc): - print('starting fxn call') - for hpnum in HPList: - if 'sv' in survey.lower(): - mtlprestr = survey.lower() - else: - mtlprestr = '' - exampleledger = exampleledgerbase + '/{0}/{2}/{3}mtl-{2}-hp-{1}.ecsv'.format(survey.lower(),hpnum, obscon.lower(), mtlprestr) - if os.path.isfile(exampleledger): - initializeAlternateMTLs(exampleledger, outputMTLDir, genSubset = nproc, seed = seed, obscon = obscon, survey = survey, saveBackup = True, hpnum = hpnum, overwrite = overwrite) - else: - print(hpnum, 'not present') - print('ending function call') - return 42 - -inds = [] -start = int(NodeID*NProc/SlurmNProcs) -end = int((NodeID + 1)*NProc/SlurmNProcs) -print('start') -print(start) -print('end') -print(end) -if ndir < start: - raise ValueError('ndir is too low for the number of nodes requested. Either request more realizations (ndir) or fewer nodes') -for i in range(start, end): - if i >= ndir: - break - print('i') - print(i) - inds.append(i) - - -NProc = len(inds) -assert(len(inds)) - -print('b') -print(inds) -p = Pool(NProc) -atexit.register(p.close) -result = p.map(procFunc,inds) -print('c') - - - - - - - - diff --git a/bin/MockAltMTLScript_firstpart.sh b/bin/MockAltMTLScript_firstpart.sh deleted file mode 100755 index 17e9c7d5d..000000000 --- a/bin/MockAltMTLScript_firstpart.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash -#All Boolean True/False parameters are 0 for False or 1 for True -#So python interprets them correctly - -#Options for InitializeAltMTLs - -#Random seed. Change to any integer you want (or leave the same) -seed=12345 -#Number of realizations to generate. Ideally a multiple of 64 for bitweights -#However, you can choose smaller numbers for debugging -ndir=256 -#Set to true(1) if you want to clobber already existing files for Alt MTL generation -overwrite=0 -#Observing conditions to generate MTLs for (should be all caps "DARK" or "BRIGHT") -obscon='DARK' -#Survey to generate MTLs for (should be lowercase "sv3" or "main", sv2, sv1, and cmx are untested and will likely fail) -survey='sv3' - -###mockrea=0 #THIS IS FOR EACH MOCK REALIZATION - -for mockrea in 0 #{1..24} -do -#Where to generate MTLs. Automatically formats number of MTLs into directory name but you can change this -printf -v outputMTLDirBase "$CSCRATCH/alt_mtls_masterScriptTest_%03ddirs_rea%03d/" $ndir $mockrea -hpListFile='SV3HPList.txt' -#hpListFile='SV3HPList_mock.txt' -#These two options only are considered if the obscon is bright -#First option indicates whether to shuffle the top level priorities -#of BGS_FAINT/BGS_FAINT_HIP. Second option indicates what fraction/percent -#of BGS_FAINT to promote. Default is 20% -shuffleBrightPriorities=0 -PromoteFracBGSFaint=0.2 -#location of original MTLs to shuffle -printf -v exampleledgerbase "$CSCRATCH/mtl_test/init_mock%03d/" $mockrea -#Options for DateLoopAltMTL and runAltMTLParallel - -#Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). -#Default = 0/False. Set equal to 1 if you want to restart from the first observations -qR=0 -#Number of observation dates to loop through -#Defaults to 40 dates for SV3 -NObsDates=40 -#Number of nodes to run on. This will launch up to 64*N jobs -#if that number of alternate universes have already been generated -#Defaults to 1 for 64 directories -NNodes=4 - - -#Include secondary targets? -secondary=0 - -numobs_from_ledger=1 -#Force redo fiber assignment if it has already been done. -redoFA=1 - - -#Options for MakeBitweightsParallel -#True/False(1/0) as to whether to split bitweight calculation -#among nodes by MPI between realizations -splitByReal=0 -#Split the calculation of bitweights into splitByChunk -#chunks of healpixels. -splitByChunk=100 - -#Set to true if you want to clobber already existing bitweight files -overwrite2=1 -#Actual running of scripts - -srun --nodes=$NNodes -C haswell -A desi --qos=interactive -t 04:00:00 --mem=120000 InitializeAltMTLsParallel_mock.py $seed $ndir $overwrite $obscon $survey $outputMTLDirBase $hpListFile $shuffleBrightPriorities $PromoteFracBGSFaint $exampleledgerbase $NNodes >& InitializeAltMTLsParallelOutput.out -if [ $? -ne 0 ]; then - exit 1234 -fi - -done diff --git a/bin/MockAltMTLScript_secondpart.sh b/bin/MockAltMTLScript_secondpart.sh deleted file mode 100755 index 853441466..000000000 --- a/bin/MockAltMTLScript_secondpart.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/bash -#All Boolean True/False parameters are 0 for False or 1 for True -#So python interprets them correctly - -#Options for InitializeAltMTLs - -#Random seed. Change to any integer you want (or leave the same) -seed=12345 -#Number of realizations to generate. Ideally a multiple of 64 for bitweights -#However, you can choose smaller numbers for debugging -ndir=256 -#Set to true(1) if you want to clobber already existing files for Alt MTL generation -overwrite=0 -#Observing conditions to generate MTLs for (should be all caps "DARK" or "BRIGHT") -obscon='DARK' -#Survey to generate MTLs for (should be lowercase "sv3" or "main", sv2, sv1, and cmx are untested and will likely fail) -survey='sv3' -#mockrea=1 THIS IS THE MOCK REALIZATION - -#mockrea=0 #THIS IS FOR EACH MOCK REALIZATION - -for mockrea in 0 #{16..24} -###for mockrea in {0..24} -do -#Where to generate MTLs. Automatically formats number of MTLs into directory name but you can change this -printf -v outputMTLDirBase "$CSCRATCH/alt_mtls_masterScriptTest_%03ddirs_rea%03d/" $ndir $mockrea -hpListFile='SV3HPList.txt' -#hpListFile='SV3HPList_mock.txt' -#These two options only are considered if the obscon is bright -#First option indicates whether to shuffle the top level priorities -#of BGS_FAINT/BGS_FAINT_HIP. Second option indicates what fraction/percent -#of BGS_FAINT to promote. Default is 20% -shuffleBrightPriorities=0 -PromoteFracBGSFaint=0.2 -#location of original MTLs to shuffle -printf -v exampleledgerbase "$CSCRATCH/mtl_test/init_mock%03d/" $mockrea -#Options for DateLoopAltMTL and runAltMTLParallel - -#Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). -#Default = 0/False. Set equal to 1 if you want to restart from the first observations -qR=0 -#Number of observation dates to loop through -#Defaults to 40 dates for SV3 -NObsDates=40 -#Number of nodes to run on. This will launch up to 64*N jobs -#if that number of alternate universes have already been generated -#Defaults to 1 for 64 directories -NNodes=4 - - -#Include secondary targets? -secondary=0 - -numobs_from_ledger=1 -#Force redo fiber assignment if it has already been done. -redoFA=1 - - -#Options for MakeBitweightsParallel -#True/False(1/0) as to whether to split bitweight calculation -#among nodes by MPI between realizations -splitByReal=0 -#Split the calculation of bitweights into splitByChunk -#chunks of healpixels. -splitByChunk=100 - -#Set to true if you want to clobber already existing bitweight files -overwrite2=1 -#Actual running of scripts - - - -bash dateLoopAltMTL_mock.sh $qR $NObsDates $NNodes $outputMTLDirBase $secondary $obscon $survey $numobs_from_ledger $redoFA >& dateLoopAltMTLOutput.out -if [ $? -ne 0 ]; then - exit 12345 -fi - - -if [ $splitByReal -ne 0 ]; then - srun --nodes=$NNodes -C haswell -A desi --qos=interactive -t 04:00:00 --mem=120000 MakeBitweights_mock.py $survey $obscon $ndir $splitByReal $splitByChunk $hpListFile $outputMTLDirBase $overwrite2 $exampleledgerbase >& MakeBitweightsOutput.out -else - srun --nodes=1 -C haswell -A desi --qos=interactive -t 04:00:00 --mem=120000 MakeBitweights_mock.py $survey $obscon $ndir $splitByReal $splitByChunk $hpListFile $outputMTLDirBase $overwrite2 $exampleledgerbase >& MakeBitweightsOutput.out -fi - -done diff --git a/bin/Y1ALTMTLRealizationsBRIGHT_mock.sh b/bin/Y1ALTMTLRealizationsBRIGHT_mock.sh index e611d285f..bff3720dd 100755 --- a/bin/Y1ALTMTLRealizationsBRIGHT_mock.sh +++ b/bin/Y1ALTMTLRealizationsBRIGHT_mock.sh @@ -218,7 +218,7 @@ secondary='' #targfile='' #CHANGEME IF RUNNING ON MOCKS #targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory #targfile="--targfile=/pscratch/sd/a/acarnero/test_main/forFA{mock_number}.fits" -targfile="--targfile=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2/forFA{mock_number}.fits" +targfile="--targfile=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2/forFA{mock_number}.fits" #targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' diff --git a/bin/dateLoopAltMTL_mock.sh b/bin/dateLoopAltMTL_mock.sh deleted file mode 100755 index 1c02bbd55..000000000 --- a/bin/dateLoopAltMTL_mock.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash - -echo "All Arguments" -echo $@ - - -#Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). -#Default = 0/False. Set equal to 1 if you want to restart from the first observations -qR=$1 - -#Number of observation dates to loop through -NObsDates=$2 -#Number of nodes to run on. This will launch up to 64*N jobs -#if that number of alternate universes have already been generated -NNodes=$3 - -#Base directory for the alternate MTLs created in the InitializeAltMTLs script -altmtlbasedir=$4 - -#Include secondary targets? -secondary=$5 - -#Observing conditions to process the observations -obscon=$6 - -#Survey whose observations you are processing -survey=$7 - -numobs_from_ledger=$8 - -#Force redo fiber assignment if it has already been done. -redoFA=$9 - -for i in $(seq 0 1 $NObsDates) -do - echo " NextDate" - echo "" - echo "" - echo "" - echo $i - echo "" - echo "" - echo "" - #srun runAltMTLParallel.py $i - srun --nodes=$NNodes -C haswell -A desi --qos=interactive -t 02:00:00 runAltMTLParallel_mock.py $NNodes $qR $altmtlbasedir $secondary $obscon $survey $numobs_from_ledger $redoFA - qR=0 #DO NOT CHANGE. This prevents further restarts after the first if qR is set to 1 at top. - if [ $? -ne 0 ]; then - exit 1234 - fi -done diff --git a/bin/runAltMTLParallel_mock.py b/bin/runAltMTLParallel_mock.py deleted file mode 100755 index 59b9f701f..000000000 --- a/bin/runAltMTLParallel_mock.py +++ /dev/null @@ -1,113 +0,0 @@ -#!/global/common/software/desi/cori/desiconda/20211217-2.0.0/conda/bin/python -u -from multiprocessing import Pool -from LSS.SV3 import mockaltmtltools as amt -#import altmtltools as amt -import dill -from sys import argv -import os -from astropy.table import Table -import numpy as np -import astropy -import multiprocessing as mp -import logging -import atexit -import glob - -print('argv') -print(argv) - -#Base directory for the alternate MTLs created in the InitializeAltMTLs script -altmtlbasedir=argv[3] -#Include secondary targets? -secondary = bool(int(argv[4])) -#Observing conditions to process the observations -obscon = argv[5].lower() -#Survey whose observations you are processing -survey = argv[6] -numobs_from_ledger = bool(int(argv[7])) -#Force redo fiber assignment if it has already been done. -redoFA = bool(int(argv[8])) - -#Get information about environment for multiprocessing -NodeID = int(os.getenv('SLURM_NODEID')) -SlurmNProcs = int(os.getenv('SLURM_NPROCS')) - -NNodes = int(argv[1]) -NProc = int(NNodes*64) -print('NProc') -print(NProc) -print('NNodes') -print(NNodes) - -try: - try: - print('argv[2]') - print(argv[2]) - QRArg = int(argv[2]) - except: - print('argv[2]v2') - print(argv[2]) - QRArg = str(argv[2]) - print('QR Arg') - print(QRArg) - quickRestart = bool(QRArg) -except: - - print('No Quick Restart Argument given (or incorrect argument). Defaulting to false.') - quickRestart = False - -print('quick Restart') -print(quickRestart) -mtldir = '/global/cscratch1/sd/acarnero/mtl_test/init_mock000' -### AUREmtldir = '/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/' -zcatdir = '/global/cfs/cdirs/desi/spectro/redux/daily/' -ndirs = None -getosubp = False -multiproc = True -singleDate = True - - -def procFunc(nproc): - print('starting fxn call') - amt.loop_alt_ledger(obscon, survey = survey, mtldir = mtldir, zcatdir = zcatdir, altmtlbasedir = altmtlbasedir, ndirs = ndirs, numobs_from_ledger = numobs_from_ledger,secondary = secondary, getosubp = getosubp, quickRestart = quickRestart, multiproc = multiproc, nproc = nproc, singleDate = singleDate, redoFA = redoFA) - print('ending function call') - return 42 -#amt.quickRestartFxn(ndirs = 1, altmtlbasedir = altmtlbasedir, survey = 'sv3', obscon = 'dark') - -#amt.loop_alt_ledger('dark', survey = survey, mtldir = mtldir, zcatdir = zcatdir, altmtlbasedir = altmtlbasedir, ndirs = 1, numobs_from_ledger = True,secondary = False, getosubp = False, quickRestart = True, multiproc = True, nproc = 0, redoFA = True, singleDate = '20210406') -#for d in zdates: -# print('zdate')# -# print(d) -# amt.loop_alt_ledger('dark', survey = survey, mtldir = mtldir, zcatdir = zcatdir, altmtlbasedir = altmtlbasedir, ndirs = 1, numobs_from_ledger = True,secondary = False, getosubp = False, quickRestart = False, multiproc = True, nproc = 0, redoFA = True, singleDate = d) - -#procFunc(0) - -inds = [] -start = int(NodeID*NProc/SlurmNProcs) -end = int((NodeID + 1)*NProc/SlurmNProcs) -print("NodeID") -print(NodeID) -print('start') -print(start) -print('end') -print(end) - -for i in range(start, end): - print('i') - print(i) - files = glob.glob(altmtlbasedir + "Univ{0:03d}/*".format(i)) - if len(files): - pass - else: - print('no files in dir number {0}'.format(i)) - continue - inds.append(i) - -assert(len(inds)) - -print('b') -print(inds) -p = Pool(NProc) -atexit.register(p.close) -result = p.map(procFunc,inds) -print('c') From d3558c7a2eafd63c2043ba92c5ceb86c6dbc0bf4 Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Mon, 19 Feb 2024 03:39:27 -0800 Subject: [PATCH 173/297] changes --- Sandbox/LSSpipe_Y1mock_2ndgen_BGS.txt | 2 + bin/InitializeAltMTLs.py | 1 + bin/InitializeAltMTLsParallel.py | 19 ++++++- bin/Y1ALTMTLRealizationsDARK_mock.sh | 16 +++--- bin/Y1ALTMTLRealizationsDARK_mock_init.sh | 18 +++---- bin/Y1Bitweights128RealizationsDARK_mock.sh | 37 +++++++------- bin/dateLoopAltMTLBugFix.sh | 2 +- bin/dateLoopAltMTLBugFix_mock_batch.sh | 2 +- py/LSS/SV3/altmtltools.py | 12 ++++- py/LSS/common_tools.py | 4 ++ py/LSS/main/mockaltmtltools.py | 20 ++++---- py/LSS/mocktools.py | 6 ++- scripts/mock_tools/abBGSamtl_cat_sbatch.sh | 2 +- scripts/mock_tools/abamtl_cat_sbatch.sh | 4 +- scripts/mock_tools/abamtl_combd_cat_sbatch.sh | 2 +- scripts/mock_tools/add_extra_realizations.py | 6 +-- scripts/mock_tools/add_extra_tilesTracker.py | 6 +-- scripts/mock_tools/getpota_Y1_script.sh | 50 +++++++++---------- scripts/mock_tools/mkCat_SecondGen_amtl.py | 6 ++- scripts/mock_tools/prepare_script_bright.sh | 14 +++--- scripts/mock_tools/run1_AMTLmock_LSS_BGS.sh | 10 ++-- .../mock_tools/run1_AMTLmock_LSS_v31full.sh | 12 ++--- scripts/mock_tools/run1_AMTLmock_LSS_v4.sh | 12 +++++ scripts/mock_tools/run1_AMTLmock_combd_LSS.sh | 2 +- .../run_Y1SecondGen_initialledger_batch.sh | 8 +-- scripts/mock_tools/script_lrgmask_Y1.sh | 50 +++++++++---------- 26 files changed, 186 insertions(+), 137 deletions(-) create mode 100755 scripts/mock_tools/run1_AMTLmock_LSS_v4.sh diff --git a/Sandbox/LSSpipe_Y1mock_2ndgen_BGS.txt b/Sandbox/LSSpipe_Y1mock_2ndgen_BGS.txt index f706f68d2..215802e8f 100644 --- a/Sandbox/LSSpipe_Y1mock_2ndgen_BGS.txt +++ b/Sandbox/LSSpipe_Y1mock_2ndgen_BGS.txt @@ -42,7 +42,9 @@ This process runs on regular queue that takes 12 hours but to complete the opera 10) RUN A SECOND TIME >> nohup ./Y1ALTMTLRealizationsBRIGHT_mock.sh & +11) Combd calling run1_AMTLmock_combd_LSS.sh from abamtl_combd_cat_sbatch.sh (Change path accordingly) +12) run1_AMTLmock_LSS_BGS.sh with abBGSamtl_cat_sbatch.sh diff --git a/bin/InitializeAltMTLs.py b/bin/InitializeAltMTLs.py index b785db0e6..34cf846c6 100755 --- a/bin/InitializeAltMTLs.py +++ b/bin/InitializeAltMTLs.py @@ -3,6 +3,7 @@ freeze_iers() from astropy.table import Table +import desitarget import desitarget.io as io import glob from LSS.SV3.altmtltools import initializeAlternateMTLs diff --git a/bin/InitializeAltMTLsParallel.py b/bin/InitializeAltMTLsParallel.py index 2c704b88b..cd8420b5f 100755 --- a/bin/InitializeAltMTLsParallel.py +++ b/bin/InitializeAltMTLsParallel.py @@ -6,7 +6,22 @@ from multiprocessing import Pool import logging import atexit -import desitarget.io as io + +#TEMP +MODULE_PATH = '/global/homes/a/acarnero/.local/lib/python3.10/site-packages/desitarget/__init__.py' +MODULE_NAME = 'desitarget' +import importlib +import sys +spec = importlib.util.spec_from_file_location(MODULE_NAME, MODULE_PATH) +module = importlib.util.module_from_spec(spec) +sys.modules[spec.name] = module +spec.loader.exec_module(module) +# + +import desitarget +#from desitarget import io + +#import desitarget.io as io import glob from LSS.SV3.altmtltools import initializeAlternateMTLs import numpy as np @@ -212,4 +227,4 @@ def procFunc(nproc): ps = pstats.Stats(pr, stream=s).sort_stats(sortby) ps.print_stats() ps.dump_stats(args.outputMTLDirBase + '/InitializeAltMTLParallel.prof') - print(s.getvalue()) \ No newline at end of file + print(s.getvalue()) diff --git a/bin/Y1ALTMTLRealizationsDARK_mock.sh b/bin/Y1ALTMTLRealizationsDARK_mock.sh index 622bb0f55..0fcaf72db 100755 --- a/bin/Y1ALTMTLRealizationsDARK_mock.sh +++ b/bin/Y1ALTMTLRealizationsDARK_mock.sh @@ -45,8 +45,8 @@ mock='--mock' #Uncomment the following line to set your own/nonscratch directory #ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ -ALTMTLHOME=/pscratch/sd/a/acarnero/test_main/ -#ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/ +#ALTMTLHOME=/pscratch/sd/a/acarnero/test_main/ +ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/ if [[ "${NERSC_HOST}" == "cori" ]]; then CVal='haswell' @@ -86,8 +86,8 @@ seed=3593589 #Number of realizations to generate. Ideally a multiple of 64 for bitweights #However, you can choose smaller numbers for debugging #Mock realization -mockinit=10 -mockend=11 +mockinit=0 +mockend=25 let ndir=$mockend-$mockinit @@ -183,8 +183,8 @@ NObsDates=99999 multiDate='--multiDate' echo 'setting QVal here for debug. Fix later.' #QVal='debug' -#QVal='regular' -QVal='interactive' +QVal='regular' +#QVal='interactive' # @@ -217,8 +217,8 @@ secondary='' #Otherwise this is optional #targfile='' #CHANGEME IF RUNNING ON MOCKS #targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory -targfile="--targfile=/pscratch/sd/a/acarnero/test_main/forFA{mock_number}.fits" -##targfile="--targfile=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/forFA{mock_number}.fits" +#targfile="--targfile=/pscratch/sd/a/acarnero/test_main/forFA{mock_number}.fits" +targfile="--targfile=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/forFA{mock_number}.fits" #targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' diff --git a/bin/Y1ALTMTLRealizationsDARK_mock_init.sh b/bin/Y1ALTMTLRealizationsDARK_mock_init.sh index 309ee9fa9..8da8c1b06 100755 --- a/bin/Y1ALTMTLRealizationsDARK_mock_init.sh +++ b/bin/Y1ALTMTLRealizationsDARK_mock_init.sh @@ -3,7 +3,7 @@ start=`date +%s.%N` #simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written #simName=JL_DebugReprocReprod2 -simName="altmtl10" +simName="altmtl0" #Location where you have cloned the LSS Repo path2LSS=/pscratch/sd/a/acarnero/codes/LSS/bin @@ -42,8 +42,8 @@ mock='--mock' #Uncomment the following line to set your own/nonscratch directory #ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ -#ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/ -ALTMTLHOME=/pscratch/sd/a/acarnero/test_main/ +ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/ +#ALTMTLHOME=/pscratch/sd/a/acarnero/test_main/ if [[ "${NERSC_HOST}" == "cori" ]]; then CVal='haswell' @@ -125,8 +125,8 @@ printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $s #List of healpixels to create Alt MTLs for #hpListFile="$path2LSS/MainSurveyHPList_mock.txt" -#hpListFile="/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl4/initled/hpxlist_dark.txt" -hpListFile="/pscratch/sd/a/acarnero/test_main/altmtl10/initled/hpxlist_dark.txt" +hpListFile="/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl0/initled/hpxlist_dark.txt" +#hpListFile="/pscratch/sd/a/acarnero/test_main/altmtl10/initled/hpxlist_dark.txt" #hpListFile="$path2LSS/DebugMainHPList.txt" #hpListFile="$path2LSS/SV3HPList.txt" @@ -158,8 +158,8 @@ PromoteFracELG=0.0 #exampleLedgerBase=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ #exampleLedgerBase=$SCRATCH/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ #Options for DateLoopAltMTL and runAltMTLParallel -#exampleLedgerBase=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl4/initled/ -exampleLedgerBase=/pscratch/sd/a/acarnero/test_main/altmtl10/initled/ +exampleLedgerBase=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl0/initled/ +#exampleLedgerBase=/pscratch/sd/a/acarnero/test_main/altmtl10/initled/ #Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). #Default = Empty String/False. Uncomment second option if you want to restart from the first observations @@ -208,8 +208,8 @@ secondary='' #Otherwise this is optional #targfile='' #CHANGEME IF RUNNING ON MOCKS #targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory -targfile="--targfile=/pscratch/sd/a/acarnero/test_main/forFA10.fits" -## THIS IS THE GOODtargfile="--targfile=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/forFA4.fits" +#targfile="--targfile=/pscratch/sd/a/acarnero/test_main/forFA10.fits" +targfile="--targfile=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/forFA0.fits" #targfile='--targfile=/cscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA1.fits' #targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' diff --git a/bin/Y1Bitweights128RealizationsDARK_mock.sh b/bin/Y1Bitweights128RealizationsDARK_mock.sh index fe51c8306..c5247d4cd 100755 --- a/bin/Y1Bitweights128RealizationsDARK_mock.sh +++ b/bin/Y1Bitweights128RealizationsDARK_mock.sh @@ -3,7 +3,7 @@ start=`date +%s.%N` #simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written #simName=JL_DebugReprocReprod2 -simName=altmtl1_R64 +simName=altmtl1_R128 #Location where you have cloned the LSS Repo path2LSS=/pscratch/sd/a/acarnero/codes/LSS/bin/ @@ -82,7 +82,7 @@ fi seed=3593589 #Number of realizations to generate. Ideally a multiple of 64 for bitweights #However, you can choose smaller numbers for debugging -ndir=64 +ndir=128 #Uncomment second option if you want to clobber already existing files for Alt MTL generation overwrite='' @@ -273,7 +273,6 @@ else mkdir -p $outputMTLFinalDestination cp $0 $outputMTLFinalDestination fi - if [ -z $getosubp ] then touch $outputMTLFinalDestination/GetOSubpTrue @@ -281,22 +280,22 @@ fi printf -v OFIM "%s/Initialize%sAltMTLsParallelOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $date -#echo "srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclusive $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM" -#srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclusive $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM - -#cp -r $outputMTLFinalDestination/ "$ALTMTLHOME/BACKUPInitial_$simName/" -#exit 1234 -#if [ $? -ne 0 ]; then -# exit 1234 -# endInit=`date +%s.%N` -# runtimeInit=$( echo "$endInit - $start" | bc -l ) -# echo "runtime for initialization" -# echo $runtimeInit -#fi -#endInit=`date +%s.%N` -#runtimeInit=$( echo "$endInit - $start" | bc -l ) -#echo "runtime for initialization" -#echo $runtimeInit +echo "srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclusive $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM" +srun --nodes=$NNodes -C $CVal -q $QVal -A desi -t 04:00:00 --mem=0 --exclusive $path2LSS/InitializeAltMTLsParallel.py --seed=$seed --ndir=$ndir --obscon=$obscon --survey=$survey --outputMTLDirBase=$outputMTLDirBase --PromoteFracBGSFaint=$PromoteFracBGSFaint --PromoteFracELG=$PromoteFracELG --HPListFile=$hpListFile --exampleLedgerBase=$exampleLedgerBase --ProcPerNode=$ProcPerNode --finalDir="$outputMTLFinalDestination/Univ{0:03d}" $overwrite $shuffleBrightPriorities $shuffleELGPriorities $usetmp $dontShuffleSubpriorities $reproducing $debug $verbose --startDate=$startDate --endDate=$endDate >& $OFIM + +cp -r $outputMTLFinalDestination/ "$ALTMTLHOME/BACKUPInitial_$simName/" +exit 1234 +if [ $? -ne 0 ]; then + exit 1234 + endInit=`date +%s.%N` + runtimeInit=$( echo "$endInit - $start" | bc -l ) + echo "runtime for initialization" + echo $runtimeInit +fi +endInit=`date +%s.%N` +runtimeInit=$( echo "$endInit - $start" | bc -l ) +echo "runtime for initialization" +echo $runtimeInit printf -v OFDL "%s/dateLoop%sAltMTLOutput_%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring diff --git a/bin/dateLoopAltMTLBugFix.sh b/bin/dateLoopAltMTLBugFix.sh index 16988235e..3502ff8fb 100755 --- a/bin/dateLoopAltMTLBugFix.sh +++ b/bin/dateLoopAltMTLBugFix.sh @@ -37,7 +37,7 @@ fi if [ $QVal = 'regular' ]; then - srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:20481531 $path2LSS/runAltMTLParallel.py $argstring + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:21570852 $path2LSS/runAltMTLParallel.py $argstring fi if [ $QVal = 'debug' ]; diff --git a/bin/dateLoopAltMTLBugFix_mock_batch.sh b/bin/dateLoopAltMTLBugFix_mock_batch.sh index 05b7b6dc9..ff6090d19 100755 --- a/bin/dateLoopAltMTLBugFix_mock_batch.sh +++ b/bin/dateLoopAltMTLBugFix_mock_batch.sh @@ -46,7 +46,7 @@ fi if [ $QVal = 'regular' ]; then echo "srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:19598458 $path2LSS/runAltMTLRealizations.py $argstring" - srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:20272831 $path2LSS/runAltMTLRealizations.py $argstring + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:21611298 $path2LSS/runAltMTLRealizations.py $argstring #srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:17881308 $path2LSS/runAltMTLParallel.py $argstring fi diff --git a/py/LSS/SV3/altmtltools.py b/py/LSS/SV3/altmtltools.py index a0d35ef47..1b1eefe94 100644 --- a/py/LSS/SV3/altmtltools.py +++ b/py/LSS/SV3/altmtltools.py @@ -1,5 +1,15 @@ from desiutil.iers import freeze_iers freeze_iers() +#TEMP +MODULE_PATH = '/global/homes/a/acarnero/.local/lib/python3.10/site-packages/desitarget/__init__.py' +MODULE_NAME = 'desitarget' +import importlib +import sys +spec = importlib.util.spec_from_file_location(MODULE_NAME, MODULE_PATH) +module = importlib.util.module_from_spec(spec) +sys.modules[spec.name] = module +spec.loader.exec_module(module) +# import collections.abc from time import time @@ -12,7 +22,7 @@ from memory_profiler import profile import desitarget -#from desitarget import io, mtl +from desitarget import io, mtl from desitarget.cuts import random_fraction_of_trues from desitarget.mtl import get_mtl_dir, get_mtl_tile_file_name,get_mtl_ledger_format from desitarget.mtl import get_zcat_dir, get_ztile_file_name, tiles_to_be_processed diff --git a/py/LSS/common_tools.py b/py/LSS/common_tools.py index 017f087c5..b72ac04b4 100644 --- a/py/LSS/common_tools.py +++ b/py/LSS/common_tools.py @@ -1505,3 +1505,7 @@ def return_altmtl_fba_fadate(tileid): fhtOrig = fitsio.read_header(FAOrigName) fadate = fhtOrig['RUNDATE'] return ''.join(fadate.split('T')[0].split('-')) + +def return_hp_givenradec(nside, ra, dec): + theta, phi = np.radians(90-dec), np.radians(ra) + return hp.ang2pix(nside, theta, phi, nest=True) diff --git a/py/LSS/main/mockaltmtltools.py b/py/LSS/main/mockaltmtltools.py index 682f6fceb..98fb047be 100644 --- a/py/LSS/main/mockaltmtltools.py +++ b/py/LSS/main/mockaltmtltools.py @@ -12,16 +12,16 @@ from memory_profiler import profile -##TEMP -#MODULE_PATH = '/global/homes/a/acarnero/.local/lib/python3.10/site-packages/desitarget/__init__.py' -#MODULE_NAME = 'desitarget' -#import importlib -#import sys -#spec = importlib.util.spec_from_file_location(MODULE_NAME, MODULE_PATH) -#module = importlib.util.module_from_spec(spec) -#sys.modules[spec.name] = module -#spec.loader.exec_module(module) -## +#TEMP +MODULE_PATH = '/global/homes/a/acarnero/.local/lib/python3.10/site-packages/desitarget/__init__.py' +MODULE_NAME = 'desitarget' +import importlib +import sys +spec = importlib.util.spec_from_file_location(MODULE_NAME, MODULE_PATH) +module = importlib.util.module_from_spec(spec) +sys.modules[spec.name] = module +spec.loader.exec_module(module) +# import desitarget from desitarget import io, mtl diff --git a/py/LSS/mocktools.py b/py/LSS/mocktools.py index e84d78e6c..6a3044d06 100644 --- a/py/LSS/mocktools.py +++ b/py/LSS/mocktools.py @@ -446,6 +446,8 @@ def createrancomb_wdupspec(outdir, ranfile, alltileloc, mockassign, fdataspec): - +#def add_bitweights(bitweight_filename, input_file): +# data_ = Table(fitsio.read(input_file)) +# hpix = list(common. - \ No newline at end of file + diff --git a/scripts/mock_tools/abBGSamtl_cat_sbatch.sh b/scripts/mock_tools/abBGSamtl_cat_sbatch.sh index 8451b66ac..61a207a50 100755 --- a/scripts/mock_tools/abBGSamtl_cat_sbatch.sh +++ b/scripts/mock_tools/abBGSamtl_cat_sbatch.sh @@ -1,5 +1,5 @@ #!/bin/bash -#SBATCH --time=02:30:00 +#SBATCH --time=05:00:00 #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu diff --git a/scripts/mock_tools/abamtl_cat_sbatch.sh b/scripts/mock_tools/abamtl_cat_sbatch.sh index 7c5d7d46e..5d167b816 100755 --- a/scripts/mock_tools/abamtl_cat_sbatch.sh +++ b/scripts/mock_tools/abamtl_cat_sbatch.sh @@ -3,10 +3,10 @@ #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu -#SBATCH --array=0 +#SBATCH --array=0-24 #SBATCH --account=desi source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main PYTHONPATH=$PYTHONPATH:$HOME/LSS/py -srun /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run1_AMTLmock_LSS.sh $SLURM_ARRAY_TASK_ID +srun /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run1_AMTLmock_LSS_v4.sh $SLURM_ARRAY_TASK_ID diff --git a/scripts/mock_tools/abamtl_combd_cat_sbatch.sh b/scripts/mock_tools/abamtl_combd_cat_sbatch.sh index 46e17db41..4d147da9d 100755 --- a/scripts/mock_tools/abamtl_combd_cat_sbatch.sh +++ b/scripts/mock_tools/abamtl_combd_cat_sbatch.sh @@ -3,7 +3,7 @@ #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu -#SBATCH --array=0-3,5-24 +#SBATCH --array=0-24 #SBATCH --account=desi source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main diff --git a/scripts/mock_tools/add_extra_realizations.py b/scripts/mock_tools/add_extra_realizations.py index 0cdab4acd..740cbfa07 100644 --- a/scripts/mock_tools/add_extra_realizations.py +++ b/scripts/mock_tools/add_extra_realizations.py @@ -5,12 +5,12 @@ program = 'dark' rmin = 0 -rmax = 64 +rmax = 128 #path = '/pscratch/sd/a/acarnero/test_main/altmtl{MOCKNUM}/Univ000' -path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl1_R64/Univ{MOCKNUM}' +path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl1_R128/Univ{MOCKNUM}' -extratiles = Table.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/aux_data/extra_{PROGRAM}.ecsv'.format(PROGRAM = program), format='ascii.ecsv') +extratiles = Table.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/aux_data/extra_{PROGRAM}.ecsv'.format(PROGRAM = program), format='ascii.ecsv') tileref = extratiles['TILEID'][-1] print(tileref) diff --git a/scripts/mock_tools/add_extra_tilesTracker.py b/scripts/mock_tools/add_extra_tilesTracker.py index 556b0674f..c224a0a5c 100644 --- a/scripts/mock_tools/add_extra_tilesTracker.py +++ b/scripts/mock_tools/add_extra_tilesTracker.py @@ -2,15 +2,15 @@ from astropy.table import Table,vstack import os -program = 'bright' +program = 'dark' rmin = 0 rmax = 25 #path = '/pscratch/sd/a/acarnero/test_main/altmtl{MOCKNUM}/Univ000' -path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2/altmtl{MOCKNUM}/Univ000' +path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl{MOCKNUM}/Univ000' -extratiles = Table.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/aux_data/extra_{PROGRAM}.ecsv'.format(PROGRAM = program), format='ascii.ecsv') +extratiles = Table.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/aux_data/extra_{PROGRAM}.ecsv'.format(PROGRAM = program), format='ascii.ecsv') tileref = extratiles['TILEID'][-1] print(tileref) diff --git a/scripts/mock_tools/getpota_Y1_script.sh b/scripts/mock_tools/getpota_Y1_script.sh index fbe84236b..6748b7d6d 100755 --- a/scripts/mock_tools/getpota_Y1_script.sh +++ b/scripts/mock_tools/getpota_Y1_script.sh @@ -1,25 +1,25 @@ -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 0 --secgen_ver AbacusSummit_v3_1 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 1 --secgen_ver AbacusSummit_v3_1 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 2 --secgen_ver AbacusSummit_v3_1 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 3 --secgen_ver AbacusSummit_v3_1 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 4 --secgen_ver AbacusSummit_v3_1 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 5 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 6 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 7 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 8 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 9 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 10 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 11 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 12 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 13 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 14 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 15 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 16 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 17 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 18 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 19 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 20 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 21 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 22 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 23 --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 24 --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 0 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 1 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 2 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 3 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 4 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 5 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 6 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 7 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 8 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 9 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 10 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 11 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 12 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 13 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 14 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 15 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 16 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 17 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 18 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 19 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 20 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 21 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 22 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 23 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 24 --mock_version _v4 diff --git a/scripts/mock_tools/mkCat_SecondGen_amtl.py b/scripts/mock_tools/mkCat_SecondGen_amtl.py index 1bcc49329..430011a68 100644 --- a/scripts/mock_tools/mkCat_SecondGen_amtl.py +++ b/scripts/mock_tools/mkCat_SecondGen_amtl.py @@ -88,6 +88,7 @@ def test_dir(value): parser.add_argument("--use_map_veto", help="Tag for extraveto added in name, for example, _HPmapcut", default = '') parser.add_argument("--resamp",help="resample radial info for different selection function regions",default='n') parser.add_argument("--getFKP", help="calculate n(z) and FKP weights on final clustering catalogs", default='n') +parser.add_argument("--add_bitweights", help="Add bitweights to files before creating the final clustering catalogs.", default=None) #--use_map_veto _HPmapcut @@ -216,6 +217,8 @@ def test_dir(value): if pdir == 'bright': cols.append('BGS_TARGET') cols.append('R_MAG_ABS') + cols.append('G_R_OBS') + cols.append('G_R_REST') pa = common.combtiles_wdup_altmtl('FAVAIL', tiles, fbadir, os.path.join(outdir, 'datcomb_' + pdir + 'wdup.fits'), tarf, addcols=cols) fcoll = os.path.join(lssdir, 'collision_'+pdir+'_mock%d.fits' % mocknum) @@ -464,7 +467,8 @@ def _parfun2(rann): common.write_LSS(nm, os.path.join(readdir, args.tracer + notqso + '_full'+args.use_map_veto + '.dat.fits')) #nm.write(ffile, overwrite=True) - +# if args.add_bitweights is not None: +# mocktools.add_bitweights(args.add_bitweights, os.path.join(readdir, args.tracer + notqso + '_full'+args.use_map_veto + '.dat.fits')) ct.mkclusdat(os.path.join(readdir, args.tracer + notqso), tp = args.tracer, dchi2 = None, tsnrcut = 0, zmin = zmin, zmax = zmax, use_map_veto = args.use_map_veto,subfrac=subfrac,zsplit=zsplit, ismock=True, ccut=args.ccut)#,ntilecut=ntile,ccut=ccut) #ct.mkclusdat(os.path.join(dirout, args.tracer + notqso), tp = args.tracer, dchi2 = None, splitNS='y', tsnrcut = 0, zmin = zmin, zmax = zmax, use_map_veto = args.use_map_veto)#,ntilecut=ntile,ccut=ccut) print('*** END WITH MKCLUSDAT ***') diff --git a/scripts/mock_tools/prepare_script_bright.sh b/scripts/mock_tools/prepare_script_bright.sh index 250f243b7..32caf0147 100755 --- a/scripts/mock_tools/prepare_script_bright.sh +++ b/scripts/mock_tools/prepare_script_bright.sh @@ -1,7 +1,7 @@ -#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 0 --realmax 4 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 4 --realmax 7 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 11 --realmax 14 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 7 --realmax 11 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 14 --realmax 17 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -#srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 17 --realmax 21 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 -srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 22 --realmax 25 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 0 --realmax 4 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 4 --realmax 7 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 7 --realmax 11 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 11 --realmax 14 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 14 --realmax 17 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 17 --realmax 21 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 +srun -N 1 -C cpu -t 03:50:00 -q interactive --account desi python prepare_mocks_Y1_bright.py --mockver ab_secondgen_cosmosim --realmin 21 --realmax 25 --prog bright --apply_mask y --downsampling n --isProduction y --rbandcut 19.5 diff --git a/scripts/mock_tools/run1_AMTLmock_LSS_BGS.sh b/scripts/mock_tools/run1_AMTLmock_LSS_BGS.sh index ed71bed77..9488ea0ea 100755 --- a/scripts/mock_tools/run1_AMTLmock_LSS_BGS.sh +++ b/scripts/mock_tools/run1_AMTLmock_LSS_BGS.sh @@ -1,7 +1,7 @@ #!/bin/bash -OUTBASE='/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM}' +OUTBASE='/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2/altmtl{MOCKNUM}' -#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer bright --combd y --survey Y1 --add_gtl y --specdata iron --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS --joindspec y -#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer BGS_ANY --survey Y1 --add_gtl y --specdata iron --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS --fulld y --fullr y --minr 0 --maxr 18 --use_map_veto '_HPmapcut' --apply_veto y --mkclusran y --nz y --mkclusdat y --splitGC y --outmd 'notscratch' -#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer BGS_BRIGHT --survey Y1 --add_gtl y --specdata iron --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS --fulld y --fullr y --minr 0 --maxr 18 --use_map_veto '_HPmapcut' --apply_veto y --mkclusran y --nz y --mkclusdat y --splitGC y --outmd 'notscratch' -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer BGS_BRIGHT --survey Y1 --add_gtl y --specdata iron --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS --minr 0 --maxr 18 --use_map_veto '_HPmapcut' --mkclusran y --nz y --mkclusdat y --splitGC y --outmd 'notscratch' --ccut -21.55 +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer bright --combd y --survey Y1 --add_gtl y --specdata iron --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2 --joindspec y +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer BGS_ANY --survey Y1 --add_gtl y --specdata iron --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2 --fulld y --fullr y --minr 0 --maxr 18 --use_map_veto '_HPmapcut' --apply_veto y --mkclusran y --nz y --mkclusdat y --splitGC y --outmd 'notscratch' +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer BGS_BRIGHT --survey Y1 --add_gtl y --specdata iron --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2 --fulld y --fullr y --minr 0 --maxr 18 --use_map_veto '_HPmapcut' --apply_veto y --mkclusran y --nz y --mkclusdat y --splitGC y --outmd 'notscratch' +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer BGS_BRIGHT --survey Y1 --add_gtl y --specdata iron --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2 --minr 0 --maxr 18 --use_map_veto '_HPmapcut' --mkclusran y --nz y --mkclusdat y --splitGC y --outmd 'notscratch' --ccut -21.5 diff --git a/scripts/mock_tools/run1_AMTLmock_LSS_v31full.sh b/scripts/mock_tools/run1_AMTLmock_LSS_v31full.sh index d54ca26dc..5b2a0a5a1 100755 --- a/scripts/mock_tools/run1_AMTLmock_LSS_v31full.sh +++ b/scripts/mock_tools/run1_AMTLmock_LSS_v31full.sh @@ -1,12 +1,12 @@ #!/bin/bash -OUTBASE=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl{MOCKNUM} +OUTBASE=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl{MOCKNUM} -python scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer ELG_LOP --notqso y --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1 +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer ELG_LOP --notqso y --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4 -python scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer LRG --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1 +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer LRG --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4 -python scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer QSO --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1 +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer QSO --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4 -mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl$1/mock$1/LSScats/* /global/cfs/cdirs/desi/survey/catalogs//Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl$1/mock$1/LSScats/ -chmod 775 /global/cfs/cdirs/desi/survey/catalogs//Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl$1/mock$1/LSScats/* \ No newline at end of file +mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl$1/mock$1/LSScats/* /global/cfs/cdirs/desi/survey/catalogs//Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl$1/mock$1/LSScats/ +chmod 775 /global/cfs/cdirs/desi/survey/catalogs//Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl$1/mock$1/LSScats/* diff --git a/scripts/mock_tools/run1_AMTLmock_LSS_v4.sh b/scripts/mock_tools/run1_AMTLmock_LSS_v4.sh new file mode 100755 index 000000000..e742044c1 --- /dev/null +++ b/scripts/mock_tools/run1_AMTLmock_LSS_v4.sh @@ -0,0 +1,12 @@ +#!/bin/bash +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer ELG_LOP --notqso y --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4 --outmd 'notscratch' +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer LRG --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4 --outmd 'notscratch' +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer QSO --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4 --outmd 'notscratch' + +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer BGS_ANY --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer BGS_BRIGHT --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer QSO --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer LRG --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 + +#mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit/altmtl$1/mock$1/LSScats/* /global/cfs/cdirs/desi/survey/catalogs//Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/LSScats/ +#chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/LSScats/*clustering* diff --git a/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh b/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh index 0d4554598..1065576e2 100755 --- a/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh +++ b/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh @@ -1,5 +1,5 @@ #!/bin/bash -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer dark --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1 --combd y --joindspec y +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer dark --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4 --combd y --joindspec y #mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/ #mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/ diff --git a/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh index 029a50c84..98f4a3de6 100755 --- a/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh +++ b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh @@ -1,9 +1,9 @@ -SeconGenVer=AbacusSummitBGS_v2 #AbacusSummit -for j in {1..24} +SeconGenVer=AbacusSummit_v4 #AbacusSummit +for j in {0..24} do #j=0 echo $j #echo $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run_mtl_ledger.py $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled BRIGHT -#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run_mtl_ledger.py $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled DARK +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run_mtl_ledger.py $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled BRIGHT +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run_mtl_ledger.py $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled DARK done diff --git a/scripts/mock_tools/script_lrgmask_Y1.sh b/scripts/mock_tools/script_lrgmask_Y1.sh index 92dca491f..fc3c7e4c3 100755 --- a/scripts/mock_tools/script_lrgmask_Y1.sh +++ b/scripts/mock_tools/script_lrgmask_Y1.sh @@ -1,25 +1,25 @@ -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 0 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 1 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 2 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 3 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 4 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 5 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 6 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 7 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 8 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 9 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 10 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 11 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 12 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 13 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 14 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 15 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 16 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 17 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 18 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 19 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 20 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 21 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 22 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 23 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 -srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 24 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v3_1 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 0 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 1 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 2 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 3 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 4 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 5 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 6 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 7 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 8 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 9 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 10 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 11 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 12 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 13 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 14 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 15 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 16 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 17 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 18 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 19 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 20 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 21 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 22 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 23 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 +srun -N 1 -C cpu -t 00:20:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/readwrite_pixel_bitmask.py --tracer lrg -i 24 --cat_type Ab2ndgen --secgen_ver AbacusSummit_v4 From 3177fde6dfd07586218dac0e8c08a81b7a29f0fa Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Mon, 19 Feb 2024 04:33:15 -0800 Subject: [PATCH 174/297] add --- .../mock_tools/getpota_Y1_bright_script.sh | 40 ++++++++++--------- .../run_Y1SecondGen_initialledger_batch.sh | 2 +- 2 files changed, 23 insertions(+), 19 deletions(-) diff --git a/scripts/mock_tools/getpota_Y1_bright_script.sh b/scripts/mock_tools/getpota_Y1_bright_script.sh index aa4fe4c8e..44f303517 100755 --- a/scripts/mock_tools/getpota_Y1_bright_script.sh +++ b/scripts/mock_tools/getpota_Y1_bright_script.sh @@ -1,21 +1,25 @@ +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 0 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 1 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 2 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 3 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 4 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 5 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 6 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 7 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 8 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 9 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 10 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 11 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 12 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 13 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 14 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 15 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 16 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 17 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 18 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 19 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 20 --prog BRIGHT --mock_version BGS_v2 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 21 --prog BRIGHT --mock_version BGS_v2 srun -N 1 -C cpu -t 01:00:00 -q regular --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 22 --prog BRIGHT --mock_version BGS_v2 srun -N 1 -C cpu -t 01:00:00 -q regular --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 23 --prog BRIGHT --mock_version BGS_v2 srun -N 1 -C cpu -t 01:00:00 -q regular --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 24 --prog BRIGHT --mock_version BGS_v2 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 4 --prog BRIGHT --mock_version BGS_v2 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 5 --prog BRIGHT --mock_version BGS_v2 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 6 --prog BRIGHT --mock_version BGS_v2 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 7 --prog BRIGHT --mock_version BGS_v2 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 8 --prog BRIGHT --mock_version BGS_v2 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 9 --prog BRIGHT --mock_version BGS_v2 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 10 --prog BRIGHT --mock_version BGS_v2 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 11 --prog BRIGHT --mock_version BGS_v2 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 12 --prog BRIGHT --mock_version BGS_v2 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 13 --prog BRIGHT --mock_version BGS_v2 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 14 --prog BRIGHT --mock_version BGS_v2 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 15 --prog BRIGHT --mock_version BGS_v2 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 16 --prog BRIGHT --mock_version BGS_v2 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 17 --prog BRIGHT --mock_version BGS_v2 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 18 --prog BRIGHT --mock_version BGS_v2 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 19 --prog BRIGHT --mock_version BGS_v2 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 20 --prog BRIGHT --mock_version BGS_v2 -#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 21 --prog BRIGHT --mock_version BGS_v2 diff --git a/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh index 98f4a3de6..98e0ee57f 100755 --- a/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh +++ b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh @@ -1,4 +1,4 @@ -SeconGenVer=AbacusSummit_v4 #AbacusSummit +SeconGenVer=AbacusSummit_v4 #AbacusSummitBGS_v2 for j in {0..24} do #j=0 From b4d59c891e5528eb4a626540cf19a2ee83164099 Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Wed, 28 Feb 2024 08:41:48 -0800 Subject: [PATCH 175/297] Adding changes to mock_tools --- bin/Y1ALTMTLRealizationsDARK_mock.sh | 6 +-- bin/Y1ALTMTLRealizationsDARK_mock_init.sh | 8 +-- bin/dateLoopAltMTLBugFix_mock_batch.sh | 4 +- py/LSS/main/cattools.py | 13 ++++- py/LSS/mocktools.py | 6 --- scripts/mock_tools/abBGSamtl_cat_sbatch.sh | 4 +- scripts/mock_tools/abamtl_cat_sbatch.sh | 4 +- scripts/mock_tools/abamtl_combd_cat_sbatch.sh | 4 +- scripts/mock_tools/abamtl_prepmock_sbatch.sh | 12 +++++ scripts/mock_tools/add_extra_tilesTracker.py | 7 +-- scripts/mock_tools/concat_bitweights.py | 33 ++++++++++++ scripts/mock_tools/getpota_Y1_script.sh | 50 +++++++++---------- scripts/mock_tools/initAMTL_dark.py | 17 +++++++ scripts/mock_tools/mkCat_SecondGen_amtl.py | 21 +++----- scripts/mock_tools/run1_AMTLmock_LSS_BGS.sh | 1 - 15 files changed, 125 insertions(+), 65 deletions(-) create mode 100755 scripts/mock_tools/abamtl_prepmock_sbatch.sh create mode 100644 scripts/mock_tools/concat_bitweights.py create mode 100644 scripts/mock_tools/initAMTL_dark.py diff --git a/bin/Y1ALTMTLRealizationsDARK_mock.sh b/bin/Y1ALTMTLRealizationsDARK_mock.sh index 0fcaf72db..81461c464 100755 --- a/bin/Y1ALTMTLRealizationsDARK_mock.sh +++ b/bin/Y1ALTMTLRealizationsDARK_mock.sh @@ -46,7 +46,7 @@ mock='--mock' #Uncomment the following line to set your own/nonscratch directory #ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ #ALTMTLHOME=/pscratch/sd/a/acarnero/test_main/ -ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/ +ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/ if [[ "${NERSC_HOST}" == "cori" ]]; then CVal='haswell' @@ -87,7 +87,7 @@ seed=3593589 #However, you can choose smaller numbers for debugging #Mock realization mockinit=0 -mockend=25 +mockend=1 let ndir=$mockend-$mockinit @@ -218,7 +218,7 @@ secondary='' #targfile='' #CHANGEME IF RUNNING ON MOCKS #targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory #targfile="--targfile=/pscratch/sd/a/acarnero/test_main/forFA{mock_number}.fits" -targfile="--targfile=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/forFA{mock_number}.fits" +targfile="--targfile=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/forFA{mock_number}.fits" #targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' diff --git a/bin/Y1ALTMTLRealizationsDARK_mock_init.sh b/bin/Y1ALTMTLRealizationsDARK_mock_init.sh index 8da8c1b06..5e6bacc95 100755 --- a/bin/Y1ALTMTLRealizationsDARK_mock_init.sh +++ b/bin/Y1ALTMTLRealizationsDARK_mock_init.sh @@ -42,7 +42,7 @@ mock='--mock' #Uncomment the following line to set your own/nonscratch directory #ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ -ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/ +ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/ #ALTMTLHOME=/pscratch/sd/a/acarnero/test_main/ if [[ "${NERSC_HOST}" == "cori" ]]; then @@ -125,7 +125,7 @@ printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $s #List of healpixels to create Alt MTLs for #hpListFile="$path2LSS/MainSurveyHPList_mock.txt" -hpListFile="/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl0/initled/hpxlist_dark.txt" +hpListFile="/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/altmtl0/initled/hpxlist_dark.txt" #hpListFile="/pscratch/sd/a/acarnero/test_main/altmtl10/initled/hpxlist_dark.txt" #hpListFile="$path2LSS/DebugMainHPList.txt" #hpListFile="$path2LSS/SV3HPList.txt" @@ -158,7 +158,7 @@ PromoteFracELG=0.0 #exampleLedgerBase=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ #exampleLedgerBase=$SCRATCH/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ #Options for DateLoopAltMTL and runAltMTLParallel -exampleLedgerBase=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl0/initled/ +exampleLedgerBase=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/altmtl0/initled/ #exampleLedgerBase=/pscratch/sd/a/acarnero/test_main/altmtl10/initled/ #Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). @@ -209,7 +209,7 @@ secondary='' #targfile='' #CHANGEME IF RUNNING ON MOCKS #targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory #targfile="--targfile=/pscratch/sd/a/acarnero/test_main/forFA10.fits" -targfile="--targfile=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/forFA0.fits" +targfile="--targfile=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/forFA0.fits" #targfile='--targfile=/cscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA1.fits' #targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' diff --git a/bin/dateLoopAltMTLBugFix_mock_batch.sh b/bin/dateLoopAltMTLBugFix_mock_batch.sh index ff6090d19..40ee41f51 100755 --- a/bin/dateLoopAltMTLBugFix_mock_batch.sh +++ b/bin/dateLoopAltMTLBugFix_mock_batch.sh @@ -45,8 +45,8 @@ then fi if [ $QVal = 'regular' ]; then - echo "srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:19598458 $path2LSS/runAltMTLRealizations.py $argstring" - srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:21611298 $path2LSS/runAltMTLRealizations.py $argstring + echo "srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:22017975 $path2LSS/runAltMTLRealizations.py $argstring" + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:22017975 $path2LSS/runAltMTLRealizations.py $argstring #srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:17881308 $path2LSS/runAltMTLParallel.py $argstring fi diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index 7a60ce42f..d2fc6f01c 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -3609,6 +3609,9 @@ def mkclusdat(fl,weighttileloc=True,zmask=False,tp='',dchi2=9,tsnrcut=80,rcut=No kl.append('WEIGHT_RF') if 'WEIGHT_IMLIN' in cols: kl.append('WEIGHT_RF') + if 'BITWEIGHTS' in cols: + kl.append('BITWEIGHTS') + kl.append('PROB_OBS') if tp[:3] == 'BGS': #ff['flux_r_dered'] = ff['FLUX_R']/ff['MW_TRANSMISSION_R'] @@ -3639,7 +3642,15 @@ def mkclusdat(fl,weighttileloc=True,zmask=False,tp='',dchi2=9,tsnrcut=80,rcut=No print('comparison before/after abs mag cut') print(len(ff),len(ff[sel])) ff = ff[sel] - + if 'G_R_OBS' in cols: + kl.append('G_R_OBS') + if 'G_R_REST' in cols: + kl.append('G_R_REST') + if 'R_MAG_ABS' in cols: + kl.append('R_MAG_ABS') + if 'R_MAG_APP' in cols: + kl.append('R_MAG_APP') + wn = ff['PHOTSYS'] == 'N' kll = [] data_cols = list(ff.dtype.names) diff --git a/py/LSS/mocktools.py b/py/LSS/mocktools.py index 6a3044d06..415f46cf6 100644 --- a/py/LSS/mocktools.py +++ b/py/LSS/mocktools.py @@ -445,9 +445,3 @@ def createrancomb_wdupspec(outdir, ranfile, alltileloc, mockassign, fdataspec): return os.path.join(outdir, ranfile.split('/')[-1]), os.path.join(outdir, alltileloc.split('/')[-1]) - -#def add_bitweights(bitweight_filename, input_file): -# data_ = Table(fitsio.read(input_file)) -# hpix = list(common. - - diff --git a/scripts/mock_tools/abBGSamtl_cat_sbatch.sh b/scripts/mock_tools/abBGSamtl_cat_sbatch.sh index 61a207a50..1800ff42c 100755 --- a/scripts/mock_tools/abBGSamtl_cat_sbatch.sh +++ b/scripts/mock_tools/abBGSamtl_cat_sbatch.sh @@ -1,9 +1,9 @@ #!/bin/bash -#SBATCH --time=05:00:00 +#SBATCH --time=02:00:00 #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu -#SBATCH --array=0-24 +#SBATCH --array=12,13,15-24 #SBATCH --account=desi source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main diff --git a/scripts/mock_tools/abamtl_cat_sbatch.sh b/scripts/mock_tools/abamtl_cat_sbatch.sh index 5d167b816..e819338df 100755 --- a/scripts/mock_tools/abamtl_cat_sbatch.sh +++ b/scripts/mock_tools/abamtl_cat_sbatch.sh @@ -1,9 +1,9 @@ #!/bin/bash -#SBATCH --time=02:00:00 +#SBATCH --time=04:00:00 #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu -#SBATCH --array=0-24 +#SBATCH --array=20,21,24 #SBATCH --account=desi source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main diff --git a/scripts/mock_tools/abamtl_combd_cat_sbatch.sh b/scripts/mock_tools/abamtl_combd_cat_sbatch.sh index 4d147da9d..dd03d628a 100755 --- a/scripts/mock_tools/abamtl_combd_cat_sbatch.sh +++ b/scripts/mock_tools/abamtl_combd_cat_sbatch.sh @@ -1,9 +1,9 @@ #!/bin/bash -#SBATCH --time=03:00:00 +#SBATCH --time=04:30:00 #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu -#SBATCH --array=0-24 +#SBATCH --array=20-24 #SBATCH --account=desi source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main diff --git a/scripts/mock_tools/abamtl_prepmock_sbatch.sh b/scripts/mock_tools/abamtl_prepmock_sbatch.sh new file mode 100755 index 000000000..b91ff0ccb --- /dev/null +++ b/scripts/mock_tools/abamtl_prepmock_sbatch.sh @@ -0,0 +1,12 @@ +#!/bin/bash +#SBATCH --time=04:00:00 +#SBATCH --qos=regular +#SBATCH --nodes=1 +#SBATCH --constraint=cpu +#SBATCH --array=1-24 +#SBATCH --account=desi + +source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main +PYTHONPATH=$PYTHONPATH:$HOME/LSS/py + +srun /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run1_prepmock_LSS.sh $SLURM_ARRAY_TASK_ID diff --git a/scripts/mock_tools/add_extra_tilesTracker.py b/scripts/mock_tools/add_extra_tilesTracker.py index c224a0a5c..f56b6c74c 100644 --- a/scripts/mock_tools/add_extra_tilesTracker.py +++ b/scripts/mock_tools/add_extra_tilesTracker.py @@ -3,12 +3,13 @@ import os program = 'dark' +#program = 'bright' rmin = 0 -rmax = 25 +rmax = 1 -#path = '/pscratch/sd/a/acarnero/test_main/altmtl{MOCKNUM}/Univ000' -path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl{MOCKNUM}/Univ000' +path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/altmtl{MOCKNUM}/Univ000' +#path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2/altmtl{MOCKNUM}/Univ000' extratiles = Table.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/aux_data/extra_{PROGRAM}.ecsv'.format(PROGRAM = program), format='ascii.ecsv') diff --git a/scripts/mock_tools/concat_bitweights.py b/scripts/mock_tools/concat_bitweights.py new file mode 100644 index 000000000..6a0d1c702 --- /dev/null +++ b/scripts/mock_tools/concat_bitweights.py @@ -0,0 +1,33 @@ +from astropy.table import Table,vstack +import numpy as np + +def concatenateBWFiles(BWDir, hpList, survey = 'main', obscon = 'dark', OFName = '{0}bw-{1}-allTiles.fits', skipFailures = False, overwrite = False): + BWBaseFN = BWDir + '{0}/{1}/{0}bw-{1}-hp-{2:d}.fits' + assert(len(hpList) > 1) + AllBWFiles = Table.read(BWBaseFN.format(survey, obscon, hpList[0]), hdu = 1) + notCompleted = [] + for hp in hpList[1:]: + print(BWBaseFN.format(survey, obscon, hp)) + try: + BWTemp = Table.read(BWBaseFN.format(survey, obscon, hp), hdu = 1) + except Exception as e: + if skipFailures: + notCompleted.append(hp) + continue + else: + raise(e) + + AllBWFiles = vstack([AllBWFiles, BWTemp]) +# print(', '.join(map(str, notCompleted))) + AllBWFiles.write(BWDir + '{0}/{1}/'.format(survey, obscon) + OFName.format(survey, obscon), format = 'fits', overwrite = overwrite) + + + + +hpL_file = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl1/initled/hpxlist_dark.txt' +HPList = np.array(open(hpL_file, 'r').readlines()[0].split(',')).astype(int) + +BW_dir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl1_R64/BitweightFiles/' + + +concatenateBWFiles(BW_dir, HPList, skipFailures=True, overwrite=True) diff --git a/scripts/mock_tools/getpota_Y1_script.sh b/scripts/mock_tools/getpota_Y1_script.sh index 6748b7d6d..a3a092fd7 100755 --- a/scripts/mock_tools/getpota_Y1_script.sh +++ b/scripts/mock_tools/getpota_Y1_script.sh @@ -1,25 +1,25 @@ -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 0 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 1 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 2 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 3 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 4 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 5 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 6 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 7 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 8 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 9 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 10 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 11 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 12 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 13 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 14 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 15 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 16 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 17 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 18 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 19 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 20 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 21 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 22 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 23 --mock_version _v4 -srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 24 --mock_version _v4 +#srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 0 --mock_version _v4 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 1 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 2 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 3 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 4 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 5 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 6 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 7 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 8 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 9 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 10 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 11 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 12 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 13 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 14 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 15 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 16 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 17 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 18 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 19 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 20 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 21 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 22 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 23 --mock_version _v4_1 +srun -N 1 -C cpu -t 01:00:00 -q interactive --account desi python /pscratch/sd/a/acarnero/codes/LSS/scripts/getpotaY1_mock.py --realization 24 --mock_version _v4_1 diff --git a/scripts/mock_tools/initAMTL_dark.py b/scripts/mock_tools/initAMTL_dark.py new file mode 100644 index 000000000..c9f30eb1e --- /dev/null +++ b/scripts/mock_tools/initAMTL_dark.py @@ -0,0 +1,17 @@ +import os +import errno +def test_dir(value): + if not os.path.exists(value): + try: + os.makedirs(value, 0o755) + print('made ' + value) + except OSError as e: + if e.errno != errno.EEXIST: + raise + +path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4' +for i in range(1,25): + print(i) + test_dir('%s/altmtl%d/Univ000'%(path,i)) + os.system('cp -R %s/altmtl%d/initled/main %s/altmtl%d/Univ000/.' %(path, i, path, i)) + os.system('cp %s/altmtl0/Univ000/*.ecsv %s/altmtl%d/Univ000/.' %(path, path, i)) diff --git a/scripts/mock_tools/mkCat_SecondGen_amtl.py b/scripts/mock_tools/mkCat_SecondGen_amtl.py index 430011a68..c5715ee03 100644 --- a/scripts/mock_tools/mkCat_SecondGen_amtl.py +++ b/scripts/mock_tools/mkCat_SecondGen_amtl.py @@ -276,15 +276,6 @@ def test_dir(value): dz = os.path.join(lssdir, 'datcomb_'+pdir+'_tarspecwdup_zdone.fits') tlf = os.path.join(lssdir, 'Alltiles_'+pdir+'_tilelocs.dat.fits') -# fcoll = os.path.join(lssdir, 'collision_'+pdir+'_mock%d.fits' % mocknum) - -# if not os.path.isfile(fcoll): -# fin = os.path.join(args.targDir, 'mock%d' %mocknum, 'pota-' + pr + '.fits') - #fin = os.path.join('/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit','mock%d' %mocknum, 'pota-' + pr + '.fits') -# fcoll = mocktools.create_collision_from_pota(fin, fcoll) -# else: -# print('collision file already exist', fcoll) - ct.mkfulldat_mock(dz, imbits, ftar, args.tracer, bit, os.path.join(dirout, args.tracer + notqso + '_full_noveto.dat.fits'), tlf, survey = args.survey, maxp = maxp, desitarg = desitarg, specver = args.specdata, notqso = notqso, gtl_all = None, mockz = mockz, mask_coll = fcoll, badfib = mainp.badfib, min_tsnr2 = mainp.tsnrcut, mocknum = mocknum, mockassigndir = os.path.join(args.base_output, 'fba%d' % mocknum).format(MOCKNUM=mocknum)) print('*** END WITH FULLD ***') @@ -455,8 +446,6 @@ def _parfun2(rann): if args.mkclusdat == 'y': print('--- START MKCLUSDAT ---') nztl.append('') - #fin = os.path.join(dirout, args.tracer + notqso + '_full' + args.use_map_veto + '.dat.fits') - #ct.mkclusdat(os.path.join(dirout,args.tracer+notqso),tp=args.tracer,dchi2=None,tsnrcut=0,zmin=zmin,zmax=zmax)#,ntilecut=ntile) if args.ccut is not None: ffile = Table.read(os.path.join(readdir, args.tracer + notqso + '_full'+args.use_map_veto + '.dat.fits').replace('global','dvs_ro')) @@ -467,9 +456,13 @@ def _parfun2(rann): common.write_LSS(nm, os.path.join(readdir, args.tracer + notqso + '_full'+args.use_map_veto + '.dat.fits')) #nm.write(ffile, overwrite=True) -# if args.add_bitweights is not None: -# mocktools.add_bitweights(args.add_bitweights, os.path.join(readdir, args.tracer + notqso + '_full'+args.use_map_veto + '.dat.fits')) - ct.mkclusdat(os.path.join(readdir, args.tracer + notqso), tp = args.tracer, dchi2 = None, tsnrcut = 0, zmin = zmin, zmax = zmax, use_map_veto = args.use_map_veto,subfrac=subfrac,zsplit=zsplit, ismock=True, ccut=args.ccut)#,ntilecut=ntile,ccut=ccut) + if args.add_bitweights is not None: + ffile = Table.read(os.path.join(readdir, args.tracer + notqso + '_full'+args.use_map_veto + '.dat.fits').replace('global','dvs_ro')) + bitweights_file = Table.read(args.add_bitweights) + nm = Table(join(ffile, bitweights_file, join_type='left', keys=['TARGETID'])) + common.write_LSS(nm, os.path.join(readdir, args.tracer + notqso + '_full'+args.use_map_veto + '.dat.fits')) + + ct.mkclusdat(os.path.join(readdir, args.tracer + notqso), tp = args.tracer, dchi2 = None, tsnrcut = 0, zmin = zmin, zmax = zmax, use_map_veto = args.use_map_veto, subfrac=subfrac,zsplit=zsplit, ismock=True, ccut=args.ccut)#,ntilecut=ntile,ccut=ccut) #ct.mkclusdat(os.path.join(dirout, args.tracer + notqso), tp = args.tracer, dchi2 = None, splitNS='y', tsnrcut = 0, zmin = zmin, zmax = zmax, use_map_veto = args.use_map_veto)#,ntilecut=ntile,ccut=ccut) print('*** END WITH MKCLUSDAT ***') diff --git a/scripts/mock_tools/run1_AMTLmock_LSS_BGS.sh b/scripts/mock_tools/run1_AMTLmock_LSS_BGS.sh index 9488ea0ea..a7036c2f3 100755 --- a/scripts/mock_tools/run1_AMTLmock_LSS_BGS.sh +++ b/scripts/mock_tools/run1_AMTLmock_LSS_BGS.sh @@ -1,7 +1,6 @@ #!/bin/bash OUTBASE='/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2/altmtl{MOCKNUM}' -#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer bright --combd y --survey Y1 --add_gtl y --specdata iron --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2 --joindspec y #python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer BGS_ANY --survey Y1 --add_gtl y --specdata iron --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2 --fulld y --fullr y --minr 0 --maxr 18 --use_map_veto '_HPmapcut' --apply_veto y --mkclusran y --nz y --mkclusdat y --splitGC y --outmd 'notscratch' #python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer BGS_BRIGHT --survey Y1 --add_gtl y --specdata iron --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2 --fulld y --fullr y --minr 0 --maxr 18 --use_map_veto '_HPmapcut' --apply_veto y --mkclusran y --nz y --mkclusdat y --splitGC y --outmd 'notscratch' python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output $OUTBASE --mockver ab_secondgen --mocknum $1 --tracer BGS_BRIGHT --survey Y1 --add_gtl y --specdata iron --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2 --minr 0 --maxr 18 --use_map_veto '_HPmapcut' --mkclusran y --nz y --mkclusdat y --splitGC y --outmd 'notscratch' --ccut -21.5 From 1e384cd805472a7147ee0a2c24ed10308101babe Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Thu, 7 Mar 2024 04:41:43 -0800 Subject: [PATCH 176/297] more --- bin/InitializeAltMTLsParallel.py | 16 +++--- bin/Y1ALTMTLRealizationsDARK_mock.sh | 52 +++++++++---------- bin/Y1ALTMTLRealizationsDARK_mock_init.sh | 8 +-- bin/Y1Bitweights128RealizationsDARK_mock.sh | 12 ++--- bin/dateLoopAltMTLBugFix_mock_batch.sh | 2 +- py/LSS/SV3/altmtltools.py | 16 +++--- py/LSS/main/mockaltmtltools.py | 26 ++++++---- scripts/mock_tools/abBGSamtl_cat_sbatch.sh | 6 +-- scripts/mock_tools/abamtl_cat_sbatch.sh | 2 +- scripts/mock_tools/abamtl_combd_cat_sbatch.sh | 2 +- scripts/mock_tools/add_extra_tilesTracker.py | 2 +- scripts/mock_tools/initAMTL_dark.py | 4 +- scripts/mock_tools/mkCat_SecondGen_amtl.py | 10 ++++ scripts/mock_tools/run1_AMTLmock_LSS_v4.sh | 10 ++-- scripts/mock_tools/run1_AMTLmock_combd_LSS.sh | 6 +-- .../run_Y1SecondGen_initialledger_batch.sh | 4 +- 16 files changed, 97 insertions(+), 81 deletions(-) diff --git a/bin/InitializeAltMTLsParallel.py b/bin/InitializeAltMTLsParallel.py index cd8420b5f..394476e66 100755 --- a/bin/InitializeAltMTLsParallel.py +++ b/bin/InitializeAltMTLsParallel.py @@ -8,14 +8,14 @@ import atexit #TEMP -MODULE_PATH = '/global/homes/a/acarnero/.local/lib/python3.10/site-packages/desitarget/__init__.py' -MODULE_NAME = 'desitarget' -import importlib -import sys -spec = importlib.util.spec_from_file_location(MODULE_NAME, MODULE_PATH) -module = importlib.util.module_from_spec(spec) -sys.modules[spec.name] = module -spec.loader.exec_module(module) +#MODULE_PATH = '/global/homes/a/acarnero/.local/lib/python3.10/site-packages/desitarget/__init__.py' +#MODULE_NAME = 'desitarget' +#import importlib +#import sys +#spec = importlib.util.spec_from_file_location(MODULE_NAME, MODULE_PATH) +#module = importlib.util.module_from_spec(spec) +#sys.modules[spec.name] = module +#spec.loader.exec_module(module) # import desitarget diff --git a/bin/Y1ALTMTLRealizationsDARK_mock.sh b/bin/Y1ALTMTLRealizationsDARK_mock.sh index 81461c464..7959a10b4 100755 --- a/bin/Y1ALTMTLRealizationsDARK_mock.sh +++ b/bin/Y1ALTMTLRealizationsDARK_mock.sh @@ -46,7 +46,7 @@ mock='--mock' #Uncomment the following line to set your own/nonscratch directory #ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ #ALTMTLHOME=/pscratch/sd/a/acarnero/test_main/ -ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/ +ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/ if [[ "${NERSC_HOST}" == "cori" ]]; then CVal='haswell' @@ -218,7 +218,7 @@ secondary='' #targfile='' #CHANGEME IF RUNNING ON MOCKS #targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory #targfile="--targfile=/pscratch/sd/a/acarnero/test_main/forFA{mock_number}.fits" -targfile="--targfile=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/forFA{mock_number}.fits" +targfile="--targfile=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/forFA{mock_number}.fits" #targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' @@ -250,30 +250,30 @@ thisFileName=$outputMTLFinalDestination/$0 echo $thisFileName -#if [ -f "$thisFileName" ] -#then -# echo "File is found. Checking to see it is identical to the original." -# cmp $0 $thisFileName -# comp=$? -# if [[ $comp -eq 1 ]] -# then -# echo "Files are not identical." -# echo "If this is intended, please delete or edit the original copied script at $thisFileName" -# echo "If this is unintended, you can reuse the original copied script at that same location" -# echo "goodbye" -# exit 3141 -# elif [[ $comp -eq 0 ]] -# then -# echo "files are same, continuing" -# else -# echo "Something has gone very wrong. Exit code for cmp was $a" -# exit $a -# fi -#else -# echo "Copied script is not found. Copying now, making directories as needed." -# mkdir -p $outputMTLFinalDestination -# cp $SLURM_SUBMIT_DIR $0 $outputMTLFinalDestination/$0 -#fi +if [ -f "$thisFileName" ] +then + echo "File is found. Checking to see it is identical to the original." + cmp $0 $thisFileName + comp=$? + if [[ $comp -eq 1 ]] + then + echo "Files are not identical." + echo "If this is intended, please delete or edit the original copied script at $thisFileName" + echo "If this is unintended, you can reuse the original copied script at that same location" + echo "goodbye" + exit 3141 + elif [[ $comp -eq 0 ]] + then + echo "files are same, continuing" + else + echo "Something has gone very wrong. Exit code for cmp was $a" + exit $a + fi +else + echo "Copied script is not found. Copying now, making directories as needed." + mkdir -p $outputMTLFinalDestination + cp $SLURM_SUBMIT_DIR $0 $outputMTLFinalDestination/$0 +fi if [ -d "$outputMTLFinalDestination" ] then diff --git a/bin/Y1ALTMTLRealizationsDARK_mock_init.sh b/bin/Y1ALTMTLRealizationsDARK_mock_init.sh index 5e6bacc95..8da8c1b06 100755 --- a/bin/Y1ALTMTLRealizationsDARK_mock_init.sh +++ b/bin/Y1ALTMTLRealizationsDARK_mock_init.sh @@ -42,7 +42,7 @@ mock='--mock' #Uncomment the following line to set your own/nonscratch directory #ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ -ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/ +ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/ #ALTMTLHOME=/pscratch/sd/a/acarnero/test_main/ if [[ "${NERSC_HOST}" == "cori" ]]; then @@ -125,7 +125,7 @@ printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $s #List of healpixels to create Alt MTLs for #hpListFile="$path2LSS/MainSurveyHPList_mock.txt" -hpListFile="/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/altmtl0/initled/hpxlist_dark.txt" +hpListFile="/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl0/initled/hpxlist_dark.txt" #hpListFile="/pscratch/sd/a/acarnero/test_main/altmtl10/initled/hpxlist_dark.txt" #hpListFile="$path2LSS/DebugMainHPList.txt" #hpListFile="$path2LSS/SV3HPList.txt" @@ -158,7 +158,7 @@ PromoteFracELG=0.0 #exampleLedgerBase=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ #exampleLedgerBase=$SCRATCH/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ #Options for DateLoopAltMTL and runAltMTLParallel -exampleLedgerBase=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/altmtl0/initled/ +exampleLedgerBase=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl0/initled/ #exampleLedgerBase=/pscratch/sd/a/acarnero/test_main/altmtl10/initled/ #Quick Restart (i.e. reset the MTLs by copying the saved original shuffled files). @@ -209,7 +209,7 @@ secondary='' #targfile='' #CHANGEME IF RUNNING ON MOCKS #targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory #targfile="--targfile=/pscratch/sd/a/acarnero/test_main/forFA10.fits" -targfile="--targfile=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/forFA0.fits" +targfile="--targfile=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/forFA0.fits" #targfile='--targfile=/cscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA1.fits' #targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' diff --git a/bin/Y1Bitweights128RealizationsDARK_mock.sh b/bin/Y1Bitweights128RealizationsDARK_mock.sh index c5247d4cd..aaae0f827 100755 --- a/bin/Y1Bitweights128RealizationsDARK_mock.sh +++ b/bin/Y1Bitweights128RealizationsDARK_mock.sh @@ -3,7 +3,7 @@ start=`date +%s.%N` #simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written #simName=JL_DebugReprocReprod2 -simName=altmtl1_R128 +simName=altmtl10_R64 #Location where you have cloned the LSS Repo path2LSS=/pscratch/sd/a/acarnero/codes/LSS/bin/ @@ -43,7 +43,7 @@ mock='--mock' #Uncomment the following line to set your own/nonscratch directory #ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ -ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/ +ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/ if [[ "${NERSC_HOST}" == "cori" ]]; then CVal='haswell' @@ -82,7 +82,7 @@ fi seed=3593589 #Number of realizations to generate. Ideally a multiple of 64 for bitweights #However, you can choose smaller numbers for debugging -ndir=128 +ndir=64 #Uncomment second option if you want to clobber already existing files for Alt MTL generation overwrite='' @@ -128,7 +128,7 @@ printf -v outputMTLFinalDestination "$ALTMTLHOME/$simName/" $datestring $ndir $s #hpListFile="$path2LSS/MainSurveyHPList.txt" #hpListFile="$path2LSS/DebugMainHPList.txt" #hpListFile="$path2LSS/SV3HPList.txt" -hpListFile="/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl1/initled/hpxlist_dark.txt" +hpListFile="/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/altmtl10/initled/hpxlist_dark.txt" #These two options only are considered if the obscon is BRIGHT #First option indicates whether to shuffle the top level priorities @@ -153,7 +153,7 @@ PromoteFracELG=0.0 # You can only access that directory from compute nodes. # Do NOT use the commented out directory (the normal mount of CFS) # unless the read only mount is broken -exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl1/initled/ +exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/altmtl10/initled/ #exampleLedgerBase=/dvs_ro/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ #exampleLedgerBase=/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/ #exampleLedgerBase=/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/mtls/ @@ -209,7 +209,7 @@ secondary='' #targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory #targfile='--targfile=/cscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA1.fits' #targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' -targfile="--targfile=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/forFA1.fits" +targfile="--targfile=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/forFA10.fits" #Default is use numobs from ledger. Uncomment second option to set numobs NOT from ledger numobs_from_ledger='' diff --git a/bin/dateLoopAltMTLBugFix_mock_batch.sh b/bin/dateLoopAltMTLBugFix_mock_batch.sh index 40ee41f51..1eab29def 100755 --- a/bin/dateLoopAltMTLBugFix_mock_batch.sh +++ b/bin/dateLoopAltMTLBugFix_mock_batch.sh @@ -46,7 +46,7 @@ fi if [ $QVal = 'regular' ]; then echo "srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:22017975 $path2LSS/runAltMTLRealizations.py $argstring" - srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:22017975 $path2LSS/runAltMTLRealizations.py $argstring + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:22562205 $path2LSS/runAltMTLRealizations.py $argstring #srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:17881308 $path2LSS/runAltMTLParallel.py $argstring fi diff --git a/py/LSS/SV3/altmtltools.py b/py/LSS/SV3/altmtltools.py index 1b1eefe94..1a0be7884 100644 --- a/py/LSS/SV3/altmtltools.py +++ b/py/LSS/SV3/altmtltools.py @@ -1,14 +1,14 @@ from desiutil.iers import freeze_iers freeze_iers() #TEMP -MODULE_PATH = '/global/homes/a/acarnero/.local/lib/python3.10/site-packages/desitarget/__init__.py' -MODULE_NAME = 'desitarget' -import importlib -import sys -spec = importlib.util.spec_from_file_location(MODULE_NAME, MODULE_PATH) -module = importlib.util.module_from_spec(spec) -sys.modules[spec.name] = module -spec.loader.exec_module(module) +#MODULE_PATH = '/global/homes/a/acarnero/.local/lib/python3.10/site-packages/desitarget/__init__.py' +#MODULE_NAME = 'desitarget' +#import importlib +#import sys +#spec = importlib.util.spec_from_file_location(MODULE_NAME, MODULE_PATH) +#module = importlib.util.module_from_spec(spec) +#sys.modules[spec.name] = module +#spec.loader.exec_module(module) # import collections.abc diff --git a/py/LSS/main/mockaltmtltools.py b/py/LSS/main/mockaltmtltools.py index 98fb047be..1d6cce9ce 100644 --- a/py/LSS/main/mockaltmtltools.py +++ b/py/LSS/main/mockaltmtltools.py @@ -1,5 +1,5 @@ -from desiutil.iers import freeze_iers -freeze_iers() +#from desiutil.iers import freeze_iers +#freeze_iers() import collections.abc from time import time @@ -13,14 +13,14 @@ #TEMP -MODULE_PATH = '/global/homes/a/acarnero/.local/lib/python3.10/site-packages/desitarget/__init__.py' -MODULE_NAME = 'desitarget' -import importlib -import sys -spec = importlib.util.spec_from_file_location(MODULE_NAME, MODULE_PATH) -module = importlib.util.module_from_spec(spec) -sys.modules[spec.name] = module -spec.loader.exec_module(module) +#MODULE_PATH = '/global/homes/a/acarnero/.local/lib/python3.10/site-packages/desitarget/__init__.py' +#MODULE_NAME = 'desitarget' +#import importlib +#import sys +#spec = importlib.util.spec_from_file_location(MODULE_NAME, MODULE_PATH) +#module = importlib.util.module_from_spec(spec) +#sys.modules[spec.name] = module +#spec.loader.exec_module(module) # import desitarget @@ -1240,6 +1240,10 @@ def update_alt_ledger(altmtldir,althpdirname, altmtltilefn, actions, survey = ' # ADM ZTILEID, and other columns addes for the Main Survey. These # ADM columns may not be needed for non-ledger simulations. # ADM Note that the data model differs with survey type. + print('AURE') + print('zcat columns', zcat.columns) + print('altZCat columns', altZCat.columns) + zcatdm = survey_data_model(zcatdatamodel, survey=survey) if zcat.dtype.descr != zcatdm.dtype.descr: msg = "zcat data model must be {} not {}!".format( @@ -1265,7 +1269,7 @@ def update_alt_ledger(altmtldir,althpdirname, altmtltilefn, actions, survey = ' print('altZCat') print(altZCat) print('*************************') - + print('version of astropy at this moment', astropy.__version__) update_ledger(althpdirname, altZCat, obscon=obscon.upper(), numobs_from_ledger=numobs_from_ledger)#, targets = targets) print('AURE') diff --git a/scripts/mock_tools/abBGSamtl_cat_sbatch.sh b/scripts/mock_tools/abBGSamtl_cat_sbatch.sh index 1800ff42c..82b6617b2 100755 --- a/scripts/mock_tools/abBGSamtl_cat_sbatch.sh +++ b/scripts/mock_tools/abBGSamtl_cat_sbatch.sh @@ -1,12 +1,12 @@ #!/bin/bash -#SBATCH --time=02:00:00 +#SBATCH --time=03:00:00 #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu -#SBATCH --array=12,13,15-24 +#SBATCH --array=7,24 #SBATCH --account=desi source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main PYTHONPATH=$PYTHONPATH:$HOME/LSS/py -srun /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run1_AMTLmock_LSS_BGS.sh $SLURM_ARRAY_TASK_ID +srun /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run1_AMTLmock_LSS_BGS_clus.sh $SLURM_ARRAY_TASK_ID diff --git a/scripts/mock_tools/abamtl_cat_sbatch.sh b/scripts/mock_tools/abamtl_cat_sbatch.sh index e819338df..4aa09b4ec 100755 --- a/scripts/mock_tools/abamtl_cat_sbatch.sh +++ b/scripts/mock_tools/abamtl_cat_sbatch.sh @@ -3,7 +3,7 @@ #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu -#SBATCH --array=20,21,24 +#SBATCH --array=0-24 #SBATCH --account=desi source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main diff --git a/scripts/mock_tools/abamtl_combd_cat_sbatch.sh b/scripts/mock_tools/abamtl_combd_cat_sbatch.sh index dd03d628a..f3ee9c291 100755 --- a/scripts/mock_tools/abamtl_combd_cat_sbatch.sh +++ b/scripts/mock_tools/abamtl_combd_cat_sbatch.sh @@ -3,7 +3,7 @@ #SBATCH --qos=regular #SBATCH --nodes=1 #SBATCH --constraint=cpu -#SBATCH --array=20-24 +#SBATCH --array=0-24 #SBATCH --account=desi source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main diff --git a/scripts/mock_tools/add_extra_tilesTracker.py b/scripts/mock_tools/add_extra_tilesTracker.py index f56b6c74c..4fe9a8c80 100644 --- a/scripts/mock_tools/add_extra_tilesTracker.py +++ b/scripts/mock_tools/add_extra_tilesTracker.py @@ -6,7 +6,7 @@ #program = 'bright' rmin = 0 -rmax = 1 +rmax = 25 path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/altmtl{MOCKNUM}/Univ000' #path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2/altmtl{MOCKNUM}/Univ000' diff --git a/scripts/mock_tools/initAMTL_dark.py b/scripts/mock_tools/initAMTL_dark.py index c9f30eb1e..d07f46755 100644 --- a/scripts/mock_tools/initAMTL_dark.py +++ b/scripts/mock_tools/initAMTL_dark.py @@ -9,9 +9,9 @@ def test_dir(value): if e.errno != errno.EEXIST: raise -path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4' +path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1' for i in range(1,25): print(i) test_dir('%s/altmtl%d/Univ000'%(path,i)) - os.system('cp -R %s/altmtl%d/initled/main %s/altmtl%d/Univ000/.' %(path, i, path, i)) + os.system('cp -Rf %s/altmtl%d/initled/main %s/altmtl%d/Univ000/.' %(path, i, path, i)) os.system('cp %s/altmtl0/Univ000/*.ecsv %s/altmtl%d/Univ000/.' %(path, path, i)) diff --git a/scripts/mock_tools/mkCat_SecondGen_amtl.py b/scripts/mock_tools/mkCat_SecondGen_amtl.py index c5715ee03..969bb9937 100644 --- a/scripts/mock_tools/mkCat_SecondGen_amtl.py +++ b/scripts/mock_tools/mkCat_SecondGen_amtl.py @@ -482,6 +482,15 @@ def _parfun2(rann): tsnrcol = 'TSNR2_ELG' if args.tracer[:3] == 'BGS': + fl = os.path.join(dirout, finaltracer) + '_' + cols_clustering = Table.read(fl.replace('global','dvs_ro')+'clustering.dat.fits').columns + if 'G_R_OBS' in cols_clustering: + rcols.append('G_R_OBS') + if 'G_R_REST' in cols_clustering: + rcols.append('G_R_REST') + if 'R_MAG_ABS' in cols_clustering: + rcols.append('R_MAG_ABS') + tsnrcol = 'TSNR2_BGS' if args.ccut is not None: for rn in range(rannum[0], rannum[1]): @@ -495,6 +504,7 @@ def _parfun2(rann): global _parfun4 def _parfun4(rann): #ct.add_tlobs_ran(fl, rann, hpmapcut = args.use_map_veto) +# print(os.path.join(readdir, finaltracer) + '_', os.path.join(dirout, finaltracer) + '_', rann, rcols, -1, tsnrcol, args.use_map_veto, clus_arrays, 'y') ct.mkclusran(os.path.join(readdir, finaltracer) + '_', os.path.join(dirout, finaltracer) + '_', rann, rcols = rcols, tsnrcut = -1, tsnrcol = tsnrcol, use_map_veto = args.use_map_veto,clus_arrays=clus_arrays,add_tlobs='y')#,ntilecut=ntile,ccut=ccut) #ct.mkclusran(os.path.join(dirout, args.tracer + notqso + '_'), os.path.join(dirout, args.tracer + notqso + '_'), rann, rcols = rcols, nosplit='n', tsnrcut = 0, tsnrcol = tsnrcol, use_map_veto = args.use_map_veto)#,ntilecut=ntile,ccut=ccut) #for clustering, make rannum start from 0 diff --git a/scripts/mock_tools/run1_AMTLmock_LSS_v4.sh b/scripts/mock_tools/run1_AMTLmock_LSS_v4.sh index e742044c1..e464f9ee7 100755 --- a/scripts/mock_tools/run1_AMTLmock_LSS_v4.sh +++ b/scripts/mock_tools/run1_AMTLmock_LSS_v4.sh @@ -1,7 +1,9 @@ #!/bin/bash -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer ELG_LOP --notqso y --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4 --outmd 'notscratch' -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer LRG --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4 --outmd 'notscratch' -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer QSO --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4 --outmd 'notscratch' +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer ELG_LOP --notqso y --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1 --outmd 'notscratch' +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer LRG --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1 --outmd 'notscratch' +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer QSO --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1 --outmd 'notscratch' +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer LRG --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4 --outmd 'notscratch' +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer QSO --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4 --outmd 'notscratch' #python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer BGS_ANY --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS #python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer BGS_BRIGHT --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS @@ -9,4 +11,4 @@ python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl #python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer LRG --notqso n --minr 0 --maxr 18 --fulld y --fullr y --apply_veto y --use_map_veto _HPmapcut --mkclusran y --nz y --mkclusdat y --splitGC y --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3 #mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit/altmtl$1/mock$1/LSScats/* /global/cfs/cdirs/desi/survey/catalogs//Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/LSScats/ -#chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/LSScats/*clustering* +chmod -R 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/* diff --git a/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh b/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh index 1065576e2..78b8d0615 100755 --- a/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh +++ b/scripts/mock_tools/run1_AMTLmock_combd_LSS.sh @@ -1,7 +1,7 @@ #!/bin/bash -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer dark --targDir /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4 --combd y --joindspec y +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mkCat_SecondGen_amtl.py --base_output /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/altmtl{MOCKNUM} --mockver ab_secondgen --mocknum $1 --survey Y1 --add_gtl y --specdata iron --tracer dark --targDir /dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1 --combd y --joindspec y #mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/ #mv $SCRATCH/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/* /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/ -#chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/mock$1/* -#chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3/altmtl$1/fba$1/* +chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/altmtl$1/mock$1/* +chmod 775 /global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/altmtl$1/fba$1/* diff --git a/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh index 98e0ee57f..935b9ddc0 100755 --- a/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh +++ b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh @@ -1,5 +1,5 @@ -SeconGenVer=AbacusSummit_v4 #AbacusSummitBGS_v2 -for j in {0..24} +SeconGenVer=AbacusSummit_v4_1 #AbacusSummitBGS_v2 +for j in {1..24} do #j=0 echo $j From 5a8a7411e7b161358667e3946d27dd60f4e3b940 Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Thu, 14 Mar 2024 08:39:43 -0700 Subject: [PATCH 177/297] changes to mock creation --- bin/MakeBitweights_mock.py | 124 +++++++++++++++++------- scripts/mock_tools/concat_bitweights.py | 2 +- 2 files changed, 88 insertions(+), 38 deletions(-) diff --git a/bin/MakeBitweights_mock.py b/bin/MakeBitweights_mock.py index 0b62c8dbe..153864cd7 100755 --- a/bin/MakeBitweights_mock.py +++ b/bin/MakeBitweights_mock.py @@ -1,47 +1,97 @@ -#!/global/common/software/desi/cori/desiconda/20211217-2.0.0/conda/bin/python -u +#!/global/common/software/desi/perlmutter/desiconda/20230111-2.1.0/conda/bin/python -u +from desiutil.iers import freeze_iers +freeze_iers() +from LSS.SV3.altmtltools import writeBitweights from desiutil.log import get_logger -from LSS.SV3.mockaltmtltools import writeBitweights from LSS.bitweights import pack_bitweights from sys import argv import numpy as np +import argparse import os - +import multiprocessing as mp +from multiprocessing import Pool +import logging +import atexit log = get_logger() +parser = argparse.ArgumentParser( + prog = 'MakeBitweights', + description = 'Convert a set of {ndir} realizations of alternate MTLs into bitweights and (optionally) probobs') +parser.add_argument('-o', '--outdir', dest='outdir', required=True, type=str, help = 'base output directory.') +parser.add_argument('-obscon', '--obscon', dest='obscon', default='DARK', help = 'observation conditions, either BRIGHT or DARK.', required = False, type = str) +parser.add_argument('-s', '--survey', dest='survey', default='sv3', help = 'DESI survey to create Alt MTLs for. Either sv3 or main.', required = False, type = str) +parser.add_argument('-n', '--ndir', dest='ndir', default=128, help = 'Random seed to ensure reproducability.', required = False, type = int) +parser.add_argument('-ppn', '--ProcPerNode', dest='ProcPerNode', default=None, help = 'Number of processes to spawn per requested node. If not specified, determined automatically from NERSC_HOST.', required = False, type = int) +parser.add_argument('-hpfn', '--HPListFile', dest='HPListFile', default=None, help = 'Name of a text file consisting only of one line of comma separated healpixel numbers for which the code will generate alt MTLs. If not specified, it will be automatically determined from the survey name.', required = False, type = str) +parser.add_argument('-ow', '--overwrite', dest = 'overwrite', default=False, action='store_true', help = 'pass this flag to regenerate already existing bitweight files.') +parser.add_argument('-v', '--verbose', dest = 'verbose', default=False, action='store_true', help = 'set flag to enter verbose mode') +parser.add_argument('-d', '--debug', dest = 'debug', default=False, action='store_true', help = 'set flag to enter debug mode.') +parser.add_argument('-prof', '--profile', dest = 'profile', default=False, action='store_true', help = 'set flag to profile code time usage.') +''' survey = argv[1] obscon = argv[2] ndirs = int(argv[3]) -splitByReal = bool(int(argv[4])) -splitNChunks = int(argv[5]) -HPListFile = argv[6] -outdir = argv[7] -overwrite = argv[8] -exampleledgerbase = argv[9] -mtlBaseDir = outdir + '/Univ{0:03d}/' -HPList = np.array(open(HPListFile,'r').readlines()[0].split(',')).astype(int) - -HPList_true = [] - -'''AURE''' -for hpnum in HPList: - if 'sv' in survey.lower(): - mtlprestr = survey.lower() - else: - mtlprestr = '' - exampleledger = exampleledgerbase + '/{0}/{2}/{3}mtl-{2}-hp-{1}.ecsv'.format(survey.lower(),hpnum, obscon.lower(), mtlprestr) - if os.path.isfile(exampleledger): - HPList_true.append(hpnum) - else: - print(hpnum, 'not present') - -HPList_true = np.array(HPList_true) - -print(HPList_true) - -#mtlBaseDir = '/global/cscratch1/sd/jlasker/TestGeneralizedAltMTLScripts/alt_mtls_64dirs/Univ{0:03d}/' -#outdir = '/global/cscratch1/sd/jlasker/TestGeneralizedAltMTLScripts/alt_mtls_64dirs/' -#bw = makeBitweights(mtlBaseDir, ndirs = 64, hplist = hplist, debug = False) -#writeBitweights(mtlBaseDir, ndirs = 128, hplist = sv3dark, debug = False, outdir = outdir, survey = 'sv3', obscon = 'dark', allFiles = True) -#writeBitweights(mtlBaseDir, ndirs = 128, hplist = sv3dark, debug = False, outdir = outdir, survey = 'sv3', obscon = 'bright', allFiles = True) -#writeBitweights(mtlBaseDir, ndirs = None, hplist = None, debug = False, outdir = None, obscon = "dark", survey = 'sv3', overwrite = False, allFiles = False, splitByReal = False, splitNChunks = None) -writeBitweights(mtlBaseDir, ndirs = ndirs, hplist = HPList_true, debug = False, outdir = outdir, survey = survey, obscon = obscon.lower(), allFiles = True, overwrite = overwrite) +ProcPerNode = int(argv[4]) +HPListFile = argv[5] +outdir = argv[6] +overwrite = bool(int(argv[7])) +''' +args = parser.parse_args() + +mtlBaseDir = args.outdir + '/Univ{0:03d}/' +HPList = np.array(open(args.HPListFile,'r').readlines()[0].split(',')).astype(int) +print(len(HPList), HPList) + +def procFunc(nproc): + +# thisHPList = np.array_split(HPList, args.ProcPerNode)[nproc] + #print('suma todo ---', len(thisHPList), '---') + + for hp in nproc: + writeBitweights(mtlBaseDir, ndirs = args.ndir, hplist = [hp], debug = args.debug, verbose = args.verbose, outdir = args.outdir, survey = args.survey, obscon = args.obscon.lower(), allFiles = False, overwrite = args.overwrite) + +try: + NNodes = int(os.getenv('SLURM_JOB_NUM_NODES')) +except: + log.warning('no SLURM_JOB_NUM_NODES env set. You may not be on a compute node.') + NNodes = 1 +NodeID = int(os.getenv('SLURM_NODEID')) +SlurmNProcs = int(os.getenv('SLURM_NPROCS')) +NProc = int(NNodes*args.ProcPerNode) +if args.verbose or args.debug: + log.debug('requested number of nodes: {0:d}'.format(NNodes)) + log.debug('requested number of directories/realizations: {0:d}'.format(args.ndir)) + log.debug('requested number of processes: {0:d}'.format(NProc)) + +'''inds = [] +start = int(NodeID*NProc/SlurmNProcs) +end = int((NodeID + 1)*NProc/SlurmNProcs) +if args.verbose or args.debug: + log.debug('start') + log.debug(start) + log.debug('end') + log.debug(end) +if args.ndir < start: + raise ValueError('ndir is too low for the number of nodes requested. Either request more realizations (ndir) or fewer nodes') +for i in range(start, end): + if i >= args.ndir: + break + if args.debug or args.verbose or args.profile: + log.info('i') + log.info(i) + inds.append(i) +''' + +batches_lims = np.array_split(HPList, NProc) + +#NProc = len(inds) +#assert(len(inds)) + +log.info('running on NProc = {0} processes'.format(NProc)) +p = Pool(NProc) +atexit.register(p.close) +#log.info('running procFunc now on inds:') +#log.info(inds) +#print(len(inds), inds) +result = p.map(procFunc, batches_lims) + diff --git a/scripts/mock_tools/concat_bitweights.py b/scripts/mock_tools/concat_bitweights.py index 6a0d1c702..e2125c12c 100644 --- a/scripts/mock_tools/concat_bitweights.py +++ b/scripts/mock_tools/concat_bitweights.py @@ -30,4 +30,4 @@ def concatenateBWFiles(BWDir, hpList, survey = 'main', obscon = 'dark', OFName = BW_dir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl1_R64/BitweightFiles/' -concatenateBWFiles(BW_dir, HPList, skipFailures=True, overwrite=True) +concatenateBWFiles(BW_dir, HPList, skipFailures=False, overwrite=True) From 4b686497e3345ad3c0b33d9c25d661d555f33f29 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 22 Mar 2024 16:01:01 -0400 Subject: [PATCH 178/297] Update combdata_main.py --- scripts/main/combdata_main.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/main/combdata_main.py b/scripts/main/combdata_main.py index c5045329a..959be9c7f 100644 --- a/scripts/main/combdata_main.py +++ b/scripts/main/combdata_main.py @@ -52,6 +52,7 @@ parser.add_argument("--dospec",help="whether or not to combine spec data",default='y') parser.add_argument("--dotarspec",help="whether or not to combine spec and tar data per type, for non-daily data",default='y') parser.add_argument("--redospec",help="whether or not to combine spec data from beginning",default='n') +parser.add_argument("--redo_zmtl",help="whether or not to remake zmtl file",default='n') parser.add_argument("--counts_only",help="skip to just counting overlaps",default='n') parser.add_argument("--combpix",help="if n, just skip to next stage",default='y') parser.add_argument("--get_petalsky",help="if y, combine info across tiles to get dispersion in sky fibers",default='n') @@ -568,7 +569,8 @@ 'TSNR2_ELG_R','TSNR2_LYA_R','TSNR2_BGS_R','TSNR2_QSO_R','TSNR2_LRG_R','TSNR2_ELG_Z','TSNR2_LYA_Z','TSNR2_BGS_Z',\ 'TSNR2_QSO_Z','TSNR2_LRG_Z','TSNR2_ELG','TSNR2_LYA','TSNR2_BGS','TSNR2_QSO','TSNR2_LRG','PRIORITY','DESI_TARGET','BGS_TARGET','TARGET_RA','TARGET_DEC','LASTNIGHT']) specfo = ldirspec+'datcomb_'+prog+'_zmtl_zdone.fits' - ct.combtile_spec(tiles4comb,specfo,md='zmtl',specver=specrel) + if args.redo_zmtl == 'y': + ct.combtile_spec(tiles4comb,specfo,md='zmtl',specver=specrel) fzmtl = fitsio.read(specfo) specf = join(specf,fzmtl,keys=['TARGETID','TILEID']) outfs = ldirspec+'datcomb_'+prog+'_spec_zdone.fits' From 8fca58556a8b85f4201a7752b39d08f604c317c8 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 25 Mar 2024 22:36:53 -0400 Subject: [PATCH 179/297] use np.zeros_like to make targetid_data an int --- py/LSS/globals.py | 2 +- py/LSS/main/cattools.py | 4 ++-- scripts/mock_tools/ffa2clus_fast.py | 2 +- scripts/mock_tools/pota2clus_fast.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/py/LSS/globals.py b/py/LSS/globals.py index 9470f6a5c..1ffc25f10 100644 --- a/py/LSS/globals.py +++ b/py/LSS/globals.py @@ -59,7 +59,7 @@ def __init__(self,tp,weightmode='probobs',specver='fuji'): #'/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/altmtl/debug_jl/alt_mtls_run64_2/BitweightsRound2/BitweightFiles/sv3/dark/sv3bw-dark-AllTiles.fits' class main: - def __init__(self,tp,specver='guadalupe',survey='main'): + def __init__(self,tp,specver='iron',survey='main'): self.mdir = '/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/mtl/main/' #location of ledgers self.tdir = '/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/'#location of targets ss = Table.read('/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/ops/tiles-specstatus.ecsv') diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index d2fc6f01c..7f3a19820 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -3791,7 +3791,7 @@ def mkclusran(flin,fl,rann,rcols=['Z','WEIGHT'],zmask=False,tsnrcut=80,tsnrcol=' #def _resamp(selregr,selregd,ffr,fcdn): def _resamp(rand_sel,dat_sel,ffr,fcdn): for col in rcols: - ffr[col] = np.zeros(len(ffr)) + ffr[col] = np.zeros_like(fcdn[col],shape=len(ffr)) #rand_sel = [selregr,~selregr] #dat_sel = [ selregd,~selregd] for dsel,rsel in zip(dat_sel,rand_sel): @@ -3973,7 +3973,7 @@ def clusran_resamp(flin,rann,rcols=['Z','WEIGHT'],write_cat='y',compmd='ran'): len_o = len(ffr) def _resamp(selregr,selregd,ffr,fcdn): for col in rcols: - ffr[col] = np.zeros(len(ffr)) + ffr[col] = np.zeros_like(fcdn[col],shape=len(ffr)) rand_sel = [selregr,~selregr] dat_sel = [ selregd,~selregd] for dsel,rsel in zip(dat_sel,rand_sel): diff --git a/scripts/mock_tools/ffa2clus_fast.py b/scripts/mock_tools/ffa2clus_fast.py index 35fa81857..73c0dc20d 100644 --- a/scripts/mock_tools/ffa2clus_fast.py +++ b/scripts/mock_tools/ffa2clus_fast.py @@ -122,7 +122,7 @@ def ran_col_assign(randoms,data,sample_columns,tracer): data.rename_column('TARGETID', 'TARGETID_DATA') def _resamp(selregr,selregd): for col in sample_columns: - randoms[col] = np.zeros(len(randoms)) + randoms[col] = np.zeros_like(data[col],shape=len(randoms)) rand_sel = [selregr,~selregr] dat_sel = [ selregd,~selregd] for dsel,rsel in zip(dat_sel,rand_sel): diff --git a/scripts/mock_tools/pota2clus_fast.py b/scripts/mock_tools/pota2clus_fast.py index f00582bbf..6fb9b8073 100644 --- a/scripts/mock_tools/pota2clus_fast.py +++ b/scripts/mock_tools/pota2clus_fast.py @@ -130,7 +130,7 @@ def ran_col_assign(randoms,data,sample_columns,tracer): data.rename_column('TARGETID', 'TARGETID_DATA') def _resamp(selregr,selregd): for col in sample_columns: - randoms[col] = np.zeros(len(randoms)) + randoms[col] = np.zeros_like(data[col],shape=len(randoms)) rand_sel = [selregr,~selregr] dat_sel = [ selregd,~selregd] for dsel,rsel in zip(dat_sel,rand_sel): From fc1352287a56c73647ff4fba75b168c223ce2ec8 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 25 Mar 2024 22:53:50 -0400 Subject: [PATCH 180/297] Create patch_rand_targetid_data.py --- scripts/patch_rand_targetid_data.py | 55 +++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 scripts/patch_rand_targetid_data.py diff --git a/scripts/patch_rand_targetid_data.py b/scripts/patch_rand_targetid_data.py new file mode 100644 index 000000000..fac2be2ac --- /dev/null +++ b/scripts/patch_rand_targetid_data.py @@ -0,0 +1,55 @@ +import LSS.common_tools as common +from astropy.table import Table, join, vstack +from astropy.coordinates import SkyCoord +import astropy.units as u +import argparse +import numpy as np +from numpy import ma +import glob + + + +parser.add_argument("--basedir", help="base directory for catalogs",default='/dvs_ro/cfs/cdirs/desi/survey/catalogs/') +parser.add_argument("--version", help="catalog version",default='test') +parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='Y1') +parser.add_argument("--tracers", help="all runs all for given survey",default='all') +parser.add_argument("--verspec",help="version for redshifts",default='iron') +parser.add_argument("--data",help="LSS or mock directory",default='LSS') +parser.add_argument("--blinded",help="blinded or unblinded catalogs",default='unblinded') +parser.add_argument("--nran",help="number of randoms to process",default=1,type=int) +args = parser.parse_args() + + + +indir = args.basedir+args.survey+'/'+args.data+'/'+args.verspec+'/LSScats/'+args.version+'/'+args.blinded+'/' +regl = ['NGC','SGC'] +tracers = ['LRG','ELG_LOPnotqso','QSO','BGS_BRIGHT-21.5'] +if args.tracers != 'all' + tracers = [args.tracers] +for tracer in tracers: + # load catalogs + for reg in regl: + data_cat_fn = indir tracer+'_'+reg+'_clustering.dat.fits' + data = Table(fitsio.read(data_cat_fn)) + data_cat.rename_column('TARGETID', 'TARGETID_DATA') + data.keep_columns(['Z','WEIGHT_COMP','TARGETID_DATA']) + for rann in range(0,args.nran): + ran_cat_fn = indir tracer+'_'+reg+'_'+str(rann)+'_clustering.dat.fits' + ran = Table(fitsio.read(ran_cat_fn)) + # remove current random TARGETID_DATA + ran.remove_column('TARGETID_DATA') + # join catalogs + join_cat = join(ran, data, keys = ['Z','WEIGHT_COMP'], join_type ='left') + + print(tracer,reg,str(rann)) + print(f"number of null TARGETID_DATA in joined catalog = {len(join_cat[join_cat['TARGETID_DATA'] == ma.masked])}") + print(f"length of original random catalog = {len(rand_cat)}") + print(f"length of new joined catalog ... = {len(join_cat)}") + + + + + + + + \ No newline at end of file From cbc101dbac31d350d7fcddc761bed10e44428735 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 25 Mar 2024 22:54:57 -0400 Subject: [PATCH 181/297] Update patch_rand_targetid_data.py --- scripts/patch_rand_targetid_data.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/scripts/patch_rand_targetid_data.py b/scripts/patch_rand_targetid_data.py index fac2be2ac..ef06f9a68 100644 --- a/scripts/patch_rand_targetid_data.py +++ b/scripts/patch_rand_targetid_data.py @@ -24,7 +24,7 @@ indir = args.basedir+args.survey+'/'+args.data+'/'+args.verspec+'/LSScats/'+args.version+'/'+args.blinded+'/' regl = ['NGC','SGC'] tracers = ['LRG','ELG_LOPnotqso','QSO','BGS_BRIGHT-21.5'] -if args.tracers != 'all' +if args.tracers != 'all': tracers = [args.tracers] for tracer in tracers: # load catalogs @@ -36,15 +36,15 @@ for rann in range(0,args.nran): ran_cat_fn = indir tracer+'_'+reg+'_'+str(rann)+'_clustering.dat.fits' ran = Table(fitsio.read(ran_cat_fn)) - # remove current random TARGETID_DATA - ran.remove_column('TARGETID_DATA') - # join catalogs - join_cat = join(ran, data, keys = ['Z','WEIGHT_COMP'], join_type ='left') - - print(tracer,reg,str(rann)) - print(f"number of null TARGETID_DATA in joined catalog = {len(join_cat[join_cat['TARGETID_DATA'] == ma.masked])}") - print(f"length of original random catalog = {len(rand_cat)}") - print(f"length of new joined catalog ... = {len(join_cat)}") + # remove current random TARGETID_DATA + ran.remove_column('TARGETID_DATA') + # join catalogs + join_cat = join(ran, data, keys = ['Z','WEIGHT_COMP'], join_type ='left') + + print(tracer,reg,str(rann)) + print(f"number of null TARGETID_DATA in joined catalog = {len(join_cat[join_cat['TARGETID_DATA'] == ma.masked])}") + print(f"length of original random catalog = {len(rand_cat)}") + print(f"length of new joined catalog ... = {len(join_cat)}") From 9b009094daf102755b6ded2b3d1545767ec2224b Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 25 Mar 2024 22:55:17 -0400 Subject: [PATCH 182/297] Update patch_rand_targetid_data.py --- scripts/patch_rand_targetid_data.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/patch_rand_targetid_data.py b/scripts/patch_rand_targetid_data.py index ef06f9a68..6eb647083 100644 --- a/scripts/patch_rand_targetid_data.py +++ b/scripts/patch_rand_targetid_data.py @@ -29,12 +29,12 @@ for tracer in tracers: # load catalogs for reg in regl: - data_cat_fn = indir tracer+'_'+reg+'_clustering.dat.fits' + data_cat_fn = indir +tracer+'_'+reg+'_clustering.dat.fits' data = Table(fitsio.read(data_cat_fn)) data_cat.rename_column('TARGETID', 'TARGETID_DATA') data.keep_columns(['Z','WEIGHT_COMP','TARGETID_DATA']) for rann in range(0,args.nran): - ran_cat_fn = indir tracer+'_'+reg+'_'+str(rann)+'_clustering.dat.fits' + ran_cat_fn = indir +tracer+'_'+reg+'_'+str(rann)+'_clustering.dat.fits' ran = Table(fitsio.read(ran_cat_fn)) # remove current random TARGETID_DATA ran.remove_column('TARGETID_DATA') From fe93a13acaa73f7daea877d9ab8eebce16970a8d Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 25 Mar 2024 22:57:15 -0400 Subject: [PATCH 183/297] Update patch_rand_targetid_data.py --- scripts/patch_rand_targetid_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/patch_rand_targetid_data.py b/scripts/patch_rand_targetid_data.py index 6eb647083..d4138eba4 100644 --- a/scripts/patch_rand_targetid_data.py +++ b/scripts/patch_rand_targetid_data.py @@ -8,7 +8,7 @@ import glob - +parser = argparse.ArgumentParser() parser.add_argument("--basedir", help="base directory for catalogs",default='/dvs_ro/cfs/cdirs/desi/survey/catalogs/') parser.add_argument("--version", help="catalog version",default='test') parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='Y1') From be32657994c0d5b773ea55177fac2b8729f56c01 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 25 Mar 2024 22:57:45 -0400 Subject: [PATCH 184/297] Update patch_rand_targetid_data.py --- scripts/patch_rand_targetid_data.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/patch_rand_targetid_data.py b/scripts/patch_rand_targetid_data.py index d4138eba4..e42e00038 100644 --- a/scripts/patch_rand_targetid_data.py +++ b/scripts/patch_rand_targetid_data.py @@ -6,6 +6,7 @@ import numpy as np from numpy import ma import glob +import fitsio parser = argparse.ArgumentParser() From ebd1fc38d57ff6f34922b0fcb5ff68c11e23c43c Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 25 Mar 2024 22:58:25 -0400 Subject: [PATCH 185/297] Update patch_rand_targetid_data.py --- scripts/patch_rand_targetid_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/patch_rand_targetid_data.py b/scripts/patch_rand_targetid_data.py index e42e00038..528c7517c 100644 --- a/scripts/patch_rand_targetid_data.py +++ b/scripts/patch_rand_targetid_data.py @@ -32,7 +32,7 @@ for reg in regl: data_cat_fn = indir +tracer+'_'+reg+'_clustering.dat.fits' data = Table(fitsio.read(data_cat_fn)) - data_cat.rename_column('TARGETID', 'TARGETID_DATA') + data.rename_column('TARGETID', 'TARGETID_DATA') data.keep_columns(['Z','WEIGHT_COMP','TARGETID_DATA']) for rann in range(0,args.nran): ran_cat_fn = indir +tracer+'_'+reg+'_'+str(rann)+'_clustering.dat.fits' From da177ed399bab546ad04916bbc218a709d6b35ef Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 25 Mar 2024 22:59:04 -0400 Subject: [PATCH 186/297] Update patch_rand_targetid_data.py --- scripts/patch_rand_targetid_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/patch_rand_targetid_data.py b/scripts/patch_rand_targetid_data.py index 528c7517c..71fc5911b 100644 --- a/scripts/patch_rand_targetid_data.py +++ b/scripts/patch_rand_targetid_data.py @@ -35,7 +35,7 @@ data.rename_column('TARGETID', 'TARGETID_DATA') data.keep_columns(['Z','WEIGHT_COMP','TARGETID_DATA']) for rann in range(0,args.nran): - ran_cat_fn = indir +tracer+'_'+reg+'_'+str(rann)+'_clustering.dat.fits' + ran_cat_fn = indir +tracer+'_'+reg+'_'+str(rann)+'_clustering.ran.fits' ran = Table(fitsio.read(ran_cat_fn)) # remove current random TARGETID_DATA ran.remove_column('TARGETID_DATA') From ca28afc9128cba7e17faf136fdd974f7266bbddd Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 25 Mar 2024 23:00:16 -0400 Subject: [PATCH 187/297] Update patch_rand_targetid_data.py --- scripts/patch_rand_targetid_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/patch_rand_targetid_data.py b/scripts/patch_rand_targetid_data.py index 71fc5911b..1e20b3577 100644 --- a/scripts/patch_rand_targetid_data.py +++ b/scripts/patch_rand_targetid_data.py @@ -44,7 +44,7 @@ print(tracer,reg,str(rann)) print(f"number of null TARGETID_DATA in joined catalog = {len(join_cat[join_cat['TARGETID_DATA'] == ma.masked])}") - print(f"length of original random catalog = {len(rand_cat)}") + print(f"length of original random catalog = {len(ran)}") print(f"length of new joined catalog ... = {len(join_cat)}") From 6f93fdc59370676b40baf1ec4843c029b03e208d Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 25 Mar 2024 23:03:31 -0400 Subject: [PATCH 188/297] Update patch_rand_targetid_data.py --- scripts/patch_rand_targetid_data.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/patch_rand_targetid_data.py b/scripts/patch_rand_targetid_data.py index 1e20b3577..99a149a6f 100644 --- a/scripts/patch_rand_targetid_data.py +++ b/scripts/patch_rand_targetid_data.py @@ -31,12 +31,12 @@ # load catalogs for reg in regl: data_cat_fn = indir +tracer+'_'+reg+'_clustering.dat.fits' - data = Table(fitsio.read(data_cat_fn)) + data = Table.read(data_cat_fn,memmap=True) data.rename_column('TARGETID', 'TARGETID_DATA') data.keep_columns(['Z','WEIGHT_COMP','TARGETID_DATA']) for rann in range(0,args.nran): ran_cat_fn = indir +tracer+'_'+reg+'_'+str(rann)+'_clustering.ran.fits' - ran = Table(fitsio.read(ran_cat_fn)) + ran = Table.read(ran_cat_fn,memmap=True) # remove current random TARGETID_DATA ran.remove_column('TARGETID_DATA') # join catalogs From 928a2463031f26b0b230a13e1ee18e920fa661c5 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 25 Mar 2024 23:08:32 -0400 Subject: [PATCH 189/297] Update patch_rand_targetid_data.py --- scripts/patch_rand_targetid_data.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scripts/patch_rand_targetid_data.py b/scripts/patch_rand_targetid_data.py index 99a149a6f..e80cfd3eb 100644 --- a/scripts/patch_rand_targetid_data.py +++ b/scripts/patch_rand_targetid_data.py @@ -29,11 +29,15 @@ tracers = [args.tracers] for tracer in tracers: # load catalogs + datal = [] for reg in regl: data_cat_fn = indir +tracer+'_'+reg+'_clustering.dat.fits' data = Table.read(data_cat_fn,memmap=True) data.rename_column('TARGETID', 'TARGETID_DATA') data.keep_columns(['Z','WEIGHT_COMP','TARGETID_DATA']) + datal.append(data) + data = vstack(datal) + for reg in regl: for rann in range(0,args.nran): ran_cat_fn = indir +tracer+'_'+reg+'_'+str(rann)+'_clustering.ran.fits' ran = Table.read(ran_cat_fn,memmap=True) From 1a2dcb8d4d6c1e95c064899d47727a3f98acbbf3 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 25 Mar 2024 23:12:32 -0400 Subject: [PATCH 190/297] Update patch_rand_targetid_data.py --- scripts/patch_rand_targetid_data.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/patch_rand_targetid_data.py b/scripts/patch_rand_targetid_data.py index e80cfd3eb..7e7744141 100644 --- a/scripts/patch_rand_targetid_data.py +++ b/scripts/patch_rand_targetid_data.py @@ -34,7 +34,7 @@ data_cat_fn = indir +tracer+'_'+reg+'_clustering.dat.fits' data = Table.read(data_cat_fn,memmap=True) data.rename_column('TARGETID', 'TARGETID_DATA') - data.keep_columns(['Z','WEIGHT_COMP','TARGETID_DATA']) + data.keep_columns(['Z','WEIGHT_COMP','TARGETID_DATA,WEIGHT_SYS']) datal.append(data) data = vstack(datal) for reg in regl: @@ -44,7 +44,7 @@ # remove current random TARGETID_DATA ran.remove_column('TARGETID_DATA') # join catalogs - join_cat = join(ran, data, keys = ['Z','WEIGHT_COMP'], join_type ='left') + join_cat = join(ran, data, keys = ['Z','WEIGHT_COMP','WEIGHT_SYS'], join_type ='left') print(tracer,reg,str(rann)) print(f"number of null TARGETID_DATA in joined catalog = {len(join_cat[join_cat['TARGETID_DATA'] == ma.masked])}") From 244a80c7b9197764af0567c660759794475d84ac Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 25 Mar 2024 23:14:01 -0400 Subject: [PATCH 191/297] Update patch_rand_targetid_data.py --- scripts/patch_rand_targetid_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/patch_rand_targetid_data.py b/scripts/patch_rand_targetid_data.py index 7e7744141..7640d47c3 100644 --- a/scripts/patch_rand_targetid_data.py +++ b/scripts/patch_rand_targetid_data.py @@ -34,7 +34,7 @@ data_cat_fn = indir +tracer+'_'+reg+'_clustering.dat.fits' data = Table.read(data_cat_fn,memmap=True) data.rename_column('TARGETID', 'TARGETID_DATA') - data.keep_columns(['Z','WEIGHT_COMP','TARGETID_DATA,WEIGHT_SYS']) + data.keep_columns(['Z','WEIGHT_COMP','TARGETID_DATA','WEIGHT_SYS']) datal.append(data) data = vstack(datal) for reg in regl: From 24c6f613cee15b6040ee592a74b3af5df9816b5f Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 26 Mar 2024 16:03:59 -0400 Subject: [PATCH 192/297] Update patch_rand_targetid_data.py --- scripts/patch_rand_targetid_data.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/patch_rand_targetid_data.py b/scripts/patch_rand_targetid_data.py index 7640d47c3..d5c32389a 100644 --- a/scripts/patch_rand_targetid_data.py +++ b/scripts/patch_rand_targetid_data.py @@ -46,8 +46,11 @@ # join catalogs join_cat = join(ran, data, keys = ['Z','WEIGHT_COMP','WEIGHT_SYS'], join_type ='left') + len_mask = len(join_cat[join_cat['TARGETID_DATA'] == ma.masked]) + if len_mask == 0 and len(ran) == len(join_cat): + common.write_LSS(join_cat,ran_cat_fn) print(tracer,reg,str(rann)) - print(f"number of null TARGETID_DATA in joined catalog = {len(join_cat[join_cat['TARGETID_DATA'] == ma.masked])}") + print(f"number of null TARGETID_DATA in joined catalog = {len_mask}") print(f"length of original random catalog = {len(ran)}") print(f"length of new joined catalog ... = {len(join_cat)}") From f940b95e31d8e0cf9a50f5b5837495d27c4ce90a Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 26 Mar 2024 16:21:59 -0400 Subject: [PATCH 193/297] Update patch_rand_targetid_data.py --- scripts/patch_rand_targetid_data.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/scripts/patch_rand_targetid_data.py b/scripts/patch_rand_targetid_data.py index d5c32389a..6c0ae281c 100644 --- a/scripts/patch_rand_targetid_data.py +++ b/scripts/patch_rand_targetid_data.py @@ -49,10 +49,15 @@ len_mask = len(join_cat[join_cat['TARGETID_DATA'] == ma.masked]) if len_mask == 0 and len(ran) == len(join_cat): common.write_LSS(join_cat,ran_cat_fn) - print(tracer,reg,str(rann)) - print(f"number of null TARGETID_DATA in joined catalog = {len_mask}") - print(f"length of original random catalog = {len(ran)}") - print(f"length of new joined catalog ... = {len(join_cat)}") + + print(tracer,reg,str(rann)) + print('SUCCESS') + else: + print(tracer,reg,str(rann)) + print('FAILURE!') + print(f"number of null TARGETID_DATA in joined catalog = {len_mask}") + print(f"length of original random catalog = {len(ran)}") + print(f"length of new joined catalog ... = {len(join_cat)}") From e783fdaccfca9cacaabdf47670517e7a64ccb8b0 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 26 Mar 2024 16:29:41 -0400 Subject: [PATCH 194/297] Update patch_rand_targetid_data.py --- scripts/patch_rand_targetid_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/patch_rand_targetid_data.py b/scripts/patch_rand_targetid_data.py index 6c0ae281c..fa4a7b2de 100644 --- a/scripts/patch_rand_targetid_data.py +++ b/scripts/patch_rand_targetid_data.py @@ -48,7 +48,7 @@ len_mask = len(join_cat[join_cat['TARGETID_DATA'] == ma.masked]) if len_mask == 0 and len(ran) == len(join_cat): - common.write_LSS(join_cat,ran_cat_fn) + common.write_LSS(join_cat,ran_cat_fn.replace('dvs_ro','global')) print(tracer,reg,str(rann)) print('SUCCESS') From 7f37108c8194b429bae6d2eafe1f1baa3a6aedb4 Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Wed, 27 Mar 2024 03:11:28 -0700 Subject: [PATCH 195/297] Changes to mock processing --- bin/MakeBitweights_mock.py | 35 ++++++++++-- bin/Y1ALTMTLRealizationsDARK_mock.sh | 14 ++--- bin/runAltMTLRealizations.py | 5 +- py/LSS/SV3/altmtltools.py | 71 +++++++++++++++++++++---- scripts/mock_tools/run1_prepmock_LSS.sh | 6 +++ 5 files changed, 107 insertions(+), 24 deletions(-) create mode 100755 scripts/mock_tools/run1_prepmock_LSS.sh diff --git a/bin/MakeBitweights_mock.py b/bin/MakeBitweights_mock.py index 153864cd7..cce25002b 100755 --- a/bin/MakeBitweights_mock.py +++ b/bin/MakeBitweights_mock.py @@ -12,6 +12,10 @@ from multiprocessing import Pool import logging import atexit +import fitsio +import LSS.common_tools as common +from astropy.table import Table + log = get_logger() parser = argparse.ArgumentParser( prog = 'MakeBitweights', @@ -42,13 +46,36 @@ HPList = np.array(open(args.HPListFile,'r').readlines()[0].split(',')).astype(int) print(len(HPList), HPList) +'''path_to_compare = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl1_R64/BitweightFiles/main/dark' +HPListtemp = [] +for hp in HPList: + if not os.path.isfile(os.path.join(path_to_compare, 'mainbw-dark-hp-%d.fits' % hp)): + HPListtemp.append(hp) + +HPList = np.array(HPListtemp) +print(len(HPList), HPList) +''' + + + +from LSS.globals import main +mainp = main('All', 'iron', survey='Y1') +specf = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/datcomb_dark_spec_zdone.fits' +with fitsio.FITS(specf.replace('global', 'dvs_ro')) as hdulist: + fs = hdulist[1].read() #specf.replace('global', 'dvs_ro')) +fs = common.cut_specdat(fs, mainp.badfib) +fs = Table(fs) +fs['TILELOCID'] = 10000*fs['TILEID'] +fs['LOCATION'] +gtl = np.unique(fs['TILELOCID']) + def procFunc(nproc): +# print(gtl) # thisHPList = np.array_split(HPList, args.ProcPerNode)[nproc] #print('suma todo ---', len(thisHPList), '---') for hp in nproc: - writeBitweights(mtlBaseDir, ndirs = args.ndir, hplist = [hp], debug = args.debug, verbose = args.verbose, outdir = args.outdir, survey = args.survey, obscon = args.obscon.lower(), allFiles = False, overwrite = args.overwrite) + writeBitweights(mtlBaseDir, ndirs = args.ndir, hplist = [hp], debug = args.debug, verbose = args.verbose, outdir = args.outdir, survey = args.survey, obscon = args.obscon.lower(), allFiles = False, overwrite = args.overwrite, gtl=gtl) try: NNodes = int(os.getenv('SLURM_JOB_NUM_NODES')) @@ -62,7 +89,6 @@ def procFunc(nproc): log.debug('requested number of nodes: {0:d}'.format(NNodes)) log.debug('requested number of directories/realizations: {0:d}'.format(args.ndir)) log.debug('requested number of processes: {0:d}'.format(NProc)) - '''inds = [] start = int(NodeID*NProc/SlurmNProcs) end = int((NodeID + 1)*NProc/SlurmNProcs) @@ -81,12 +107,13 @@ def procFunc(nproc): log.info(i) inds.append(i) ''' - +##NProc = 1 batches_lims = np.array_split(HPList, NProc) - +print(batches_lims) #NProc = len(inds) #assert(len(inds)) + log.info('running on NProc = {0} processes'.format(NProc)) p = Pool(NProc) atexit.register(p.close) diff --git a/bin/Y1ALTMTLRealizationsDARK_mock.sh b/bin/Y1ALTMTLRealizationsDARK_mock.sh index 7959a10b4..6796e3303 100755 --- a/bin/Y1ALTMTLRealizationsDARK_mock.sh +++ b/bin/Y1ALTMTLRealizationsDARK_mock.sh @@ -5,16 +5,16 @@ start=`date +%s.%N` #simName is the subdirectory within ALTMTLHOME where this specific set of alt MTLs will be written #simName=JL_DebugReprocReprod2 -simName="altmtl{mock_number}" +simName="altmtl{mock_number}_R128" #Location where you have cloned the LSS Repo path2LSS=/pscratch/sd/a/acarnero/codes/LSS/bin/ # Flags for debug/verbose mode/profiling code time usage. # Uncomment second set of options to turn on the modes -#debug='' +debug='' #verbose='' profile='' -debug='--debug' +#debug='--debug' verbose='--verbose' #profile='--profile' @@ -46,7 +46,7 @@ mock='--mock' #Uncomment the following line to set your own/nonscratch directory #ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/ #ALTMTLHOME=/pscratch/sd/a/acarnero/test_main/ -ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/ +ALTMTLHOME=/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/ if [[ "${NERSC_HOST}" == "cori" ]]; then CVal='haswell' @@ -86,8 +86,8 @@ seed=3593589 #Number of realizations to generate. Ideally a multiple of 64 for bitweights #However, you can choose smaller numbers for debugging #Mock realization -mockinit=0 -mockend=1 +mockinit=1 +mockend=2 let ndir=$mockend-$mockinit @@ -218,7 +218,7 @@ secondary='' #targfile='' #CHANGEME IF RUNNING ON MOCKS #targfile='--targfile=/global/cfs/cdirs/desi/target/catalogs/dr9/1.1.1/targets/main/resolve/' #Main survey target directory #targfile="--targfile=/pscratch/sd/a/acarnero/test_main/forFA{mock_number}.fits" -targfile="--targfile=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/forFA{mock_number}.fits" +targfile="--targfile=/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/forFA{mock_number}.fits" #targfile='--targfile=CHANGEME IF RUNNING ON MOCKS' #/pscratch/sd/j/jlasker/MockAMTLY1/FirstGenMocks/AbacusSummit/forFA2.fits' diff --git a/bin/runAltMTLRealizations.py b/bin/runAltMTLRealizations.py index fe03c4340..96378e015 100755 --- a/bin/runAltMTLRealizations.py +++ b/bin/runAltMTLRealizations.py @@ -72,6 +72,7 @@ Path(args.altMTLBaseDir + '/GETOSUBPTRUE').touch() #Get information about environment for multiprocessing +##TEMP NodeID = int(os.getenv('SLURM_NODEID')) SlurmNProcs = int(os.getenv('SLURM_NPROCS')) try: @@ -94,7 +95,6 @@ - # These should be constant/default. # If this changes, add these to argparse @@ -154,7 +154,8 @@ def procFunc(nproc): continue inds.append(i) -assert(len(inds)) +###assert(len(inds)) +##p = Pool(1) p = Pool(NProc) atexit.register(p.close) result = p.map(procFunc,inds) diff --git a/py/LSS/SV3/altmtltools.py b/py/LSS/SV3/altmtltools.py index 1a0be7884..9c52d940b 100644 --- a/py/LSS/SV3/altmtltools.py +++ b/py/LSS/SV3/altmtltools.py @@ -16,7 +16,7 @@ import astropy import astropy.io import astropy.io.fits as pf -from astropy.table import Table,join +from astropy.table import Table,join,vstack import memory_profiler from memory_profiler import profile @@ -35,6 +35,7 @@ from desiutil.log import get_logger import fitsio +import LSS.common_tools as common import healpy as hp @@ -1570,8 +1571,14 @@ def plotMTLProb(mtlBaseDir, ndirs = 10, hplist = None, obscon = 'dark', survey = if not jupyter: plt.close() +def return_path_fba(zz, path): + import glob + curr = glob.glob(os.path.join(path,'*','fba-%s.fits' % zz))[0] + return curr + + #@profile -def makeBitweights(mtlBaseDir, ndirs = 64, hplist = None, obscon = 'dark', survey = 'sv3', debug = False, obsprob = False, splitByReal = False, verbose = False): +def makeBitweights(mtlBaseDir, ndirs = 64, hplist = None, obscon = 'dark', survey = 'sv3', debug = False, obsprob = False, splitByReal = False, verbose = False, gtl = None): """Takes a set of {ndirs} realizations of DESI/SV3 and converts their MTLs into bitweights and an optional PROBOBS, the probability that the target was observed over the realizations @@ -1612,7 +1619,7 @@ def makeBitweights(mtlBaseDir, ndirs = 64, hplist = None, obscon = 'dark', surve Array of probabilities a target gets observed over {ndirs} realizations """ - + TIDs = None if splitByReal: @@ -1632,6 +1639,10 @@ def makeBitweights(mtlBaseDir, ndirs = 64, hplist = None, obscon = 'dark', surve realizations = np.arange(ndirs, dtype=np.int32) my_realizations = np.array_split(realizations, mpi_procs)[mpi_rank] MyObsFlagList = np.empty((my_realizations.shape[0], ntar), dtype = bool) + + + + #MTL = np.sort(desitarget.io.read_mtl_in_hp(mtldir, 32, hplist, unique=True, isodate=None, returnfn=False, initial=False, leq=False), order = 'TARGETID') for i, r in enumerate(my_realizations): mtldir = mtlBaseDir.format(i) + '/' + survey + '/' + obscon @@ -1640,7 +1651,6 @@ def makeBitweights(mtlBaseDir, ndirs = 64, hplist = None, obscon = 'dark', surve TIDs = MTL['TARGETID'] else: assert(np.array_equal(TIDs, MTL['TARGETID'])) - MyObsFlagList[i][:] = MTL['NUMOBS'] > 0.5 ObsFlagList = None @@ -1675,16 +1685,52 @@ def makeBitweights(mtlBaseDir, ndirs = 64, hplist = None, obscon = 'dark', surve for i in range(ndirs): mtldir = mtlBaseDir.format(i) + '/' + survey + '/' + obscon MTL = np.sort(desitarget.io.read_mtl_in_hp(mtldir, 32, hplist, unique=True, isodate=None, returnfn=False, initial=False, leq=False), order = 'TARGETID') + + if gtl is not None: + ztile = MTL['ZTILEID'] + unique_tiles = np.unique(ztile) + unique_tiles = unique_tiles[unique_tiles != -1] + + if len(unique_tiles) == 0: + new_column_data = np.full(len(MTL), -1, dtype=int) + new_MTL = np.empty(MTL.shape, dtype = MTL.dtype.descr + [('TILELOCID', int)]) + for field in MTL.dtype.names: + new_MTL[field] = MTL[field] # Copy existing columns + new_MTL['TILELOCID'] = new_column_data # Add the new column filled with -1 + MTL = new_MTL + + else: + cat = Table() + for zz in unique_tiles: + fba_file = return_path_fba(str(zz).zfill(6), os.path.join(mtlBaseDir.format(i), 'fa', 'MAIN')) + with fitsio.FITS(fba_file.replace('global', 'dvs_ro')) as hdulist: + ff_temp = hdulist['FASSIGN'].read() + ff_temp = Table(ff_temp) + # print(ff_temp.columns) + ff_temp['TILELOCID'] = 10000*zz +ff_temp['LOCATION'] + ff_temp['ZTILEID'] = [zz]*len(ff_temp) + cat = vstack([cat, ff_temp]) + MTL = join(MTL, cat, join_type='left', keys=['TARGETID', 'ZTILEID']) + del cat + if TIDs is None: TIDs = MTL['TARGETID'] else: assert(np.array_equal(TIDs, MTL['TARGETID'])) try: - ObsFlagList = np.column_stack((ObsFlagList,MTL['NUMOBS'] > 0.5)) + if gtl is not None: + print('Create ObsFlagList') + ObsFlagList = np.column_stack((ObsFlagList, (MTL['NUMOBS'] > 0.5)&(np.isin(MTL['TILELOCID'], gtl)))) + else: + ObsFlagList = np.column_stack((ObsFlagList, (MTL['NUMOBS'] > 0.5))) + except: log.info('hplist[0] = {0:d}'.format(hplist[0])) log.info('This message should only appear once for the first realization.') - ObsFlagList = MTL['NUMOBS'] > 0.5 + if gtl is not None: + ObsFlagList = (MTL['NUMOBS'] > 0.5)&(np.isin(MTL['TILELOCID'], gtl)) + else: + ObsFlagList = (MTL['NUMOBS'] > 0.5) if debug or verbose: log.info(ObsFlagList.shape) ObsArr = np.sum(ObsFlagList, axis = 1) @@ -1706,7 +1752,7 @@ def makeBitweights(mtlBaseDir, ndirs = 64, hplist = None, obscon = 'dark', surve -def writeBitweights(mtlBaseDir, ndirs = None, hplist = None, debug = False, outdir = None, obscon = "dark", survey = 'sv3', overwrite = False, allFiles = False, splitByReal = False, splitNChunks = None, verbose = False): +def writeBitweights(mtlBaseDir, ndirs = None, hplist = None, debug = False, outdir = None, obscon = "dark", survey = 'sv3', overwrite = False, allFiles = False, splitByReal = False, splitNChunks = None, verbose = False, gtl=None): """Takes a set of {ndirs} realizations of DESI/SV3 and converts their MTLs into bitweights and an optional PROBOBS, the probability that the target was observed over the realizations. Then writes them to (a) file(s) @@ -1772,7 +1818,10 @@ def writeBitweights(mtlBaseDir, ndirs = None, hplist = None, debug = False, outd if not os.path.exists(outdir + '/BitweightFiles/' + survey + '/' + obscon): os.makedirs(outdir + '/BitweightFiles/' + survey + '/' + obscon) elif not os.path.exists(outdir + '/BitweightFiles/' + survey + '/' + obscon): - os.makedirs(outdir + '/BitweightFiles/' + survey + '/' + obscon) + try: + os.makedirs(outdir + '/BitweightFiles/' + survey + '/' + obscon) + except: + log.info('makedir exist already for %s/BitweightFiles/%s/%s' %(outdir, survey, obscon)) if type(hplist) == int: hplist = [hplist] if allFiles: @@ -1803,9 +1852,9 @@ def writeBitweights(mtlBaseDir, ndirs = None, hplist = None, debug = False, outd log.info('split {0}'.format(i)) log.info(split) if i == 0: - TIDs, bitweights, obsprobs = makeBitweights(mtlBaseDir, ndirs = ndirs, hplist = split, debug = False, obsprob = True, obscon = obscon, survey = survey, splitByReal = splitByReal) + TIDs, bitweights, obsprobs = makeBitweights(mtlBaseDir, ndirs = ndirs, hplist = split, debug = False, obsprob = True, obscon = obscon, survey = survey, splitByReal = splitByReal, gtl=gtl) else: - TIDsTemp, bitweightsTemp, obsprobsTemp = makeBitweights(mtlBaseDir, ndirs = ndirs, hplist = split, debug = False, obsprob = True, obscon = obscon, survey = survey, splitByReal = splitByReal) + TIDsTemp, bitweightsTemp, obsprobsTemp = makeBitweights(mtlBaseDir, ndirs = ndirs, hplist = split, debug = False, obsprob = True, obscon = obscon, survey = survey, splitByReal = splitByReal, gtl=gtl) if mpi_rank == 0: if debug or verbose: @@ -1825,7 +1874,7 @@ def writeBitweights(mtlBaseDir, ndirs = None, hplist = None, debug = False, outd else: if debug or verbose: log.info('makeBitweights2') - TIDs, bitweights, obsprobs = makeBitweights(mtlBaseDir, ndirs = ndirs, hplist = hplist, debug = False, obsprob = True, obscon = obscon, survey = survey, splitByReal = splitByReal) + TIDs, bitweights, obsprobs = makeBitweights(mtlBaseDir, ndirs = ndirs, hplist = hplist, debug = False, obsprob = True, obscon = obscon, survey = survey, splitByReal = splitByReal, gtl=gtl) if splitByReal: if debug or verbose: log.info('----') diff --git a/scripts/mock_tools/run1_prepmock_LSS.sh b/scripts/mock_tools/run1_prepmock_LSS.sh new file mode 100755 index 000000000..bac1bf39a --- /dev/null +++ b/scripts/mock_tools/run1_prepmock_LSS.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +num=$(($1 + 1)) + +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/prepare_mocks_Y1_dark.py --mockver ab_secondgen --mockpath /global/cfs/cdirs/desi/cosmosim/SecondGenMocks/AbacusSummit/CutSky_v4_1 --realmin $1 --realmax $num --isProduction y --split_snapshot y --new_version AbacusSummit_v4_1 + From a7632a6decc400f75692cfa036ca678fa2d685a1 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 27 Mar 2024 11:40:59 -0400 Subject: [PATCH 196/297] apply tsnr2 cut to good hardware list --- py/LSS/common_tools.py | 6 +++++- py/LSS/main/cattools.py | 35 +++++++++++++++++----------------- scripts/main/mkCat_main_ran.py | 2 +- 3 files changed, 24 insertions(+), 19 deletions(-) diff --git a/py/LSS/common_tools.py b/py/LSS/common_tools.py index b72ac04b4..f44d46f99 100644 --- a/py/LSS/common_tools.py +++ b/py/LSS/common_tools.py @@ -27,7 +27,7 @@ def thphi2radec(theta,phi): #functions that shouldn't have any dependence on survey go here -def cut_specdat(dz,badfib=None): +def cut_specdat(dz,badfib=None,tsnr_min=0,tsnr_col='TSNR2_ELG'): from desitarget.targetmask import zwarn_mask selz = dz['ZWARN'] != 999999 selz &= dz['ZWARN']*0 == 0 #just in case of nans @@ -47,6 +47,10 @@ def cut_specdat(dz,badfib=None): bad = np.isin(fs['FIBER'],badfib) print('number at bad fibers '+str(sum(bad))) wfqa &= ~bad + if tsnr_min > 0: + low_tsnr = dz[tsnr_col] < tsnr_min + wfqa &= ~low_tsnr + print('number at low tsnr2 '+str(sum(low_tsnr))) return fs[wfqa] def goodz_infull(tp,dz,zcol='Z_not4clus'): diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index 7f3a19820..0d34e5caf 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -2188,16 +2188,16 @@ def mkfullran(gtl,lznp,indir,rann,imbits,outf,tp,pd,notqso='',maxp=3400,min_tsnr dz['GOODPRI'] = np.zeros(len(dz)).astype('bool') sel = dz['PRIORITY'] <= maxp dz['GOODPRI'][sel] = 1 - t0 = dz[tscol]*0 != 0 - t0 |= dz[tscol] == 999999 - t0 |= dz[tscol] == 1.e20 - dz[tscol][t0] = 0 + #t0 = dz[tscol]*0 != 0 + #t0 |= dz[tscol] == 999999 + #t0 |= dz[tscol] == 1.e20 + #dz[tscol][t0] = 0 - dz['GOODTSNR'] = np.zeros(len(dz)).astype('bool') - sel = dz[tscol] > min_tsnr2 - dz['GOODTSNR'][sel] = 1 - dz['sort'] = dz['GOODPRI']*dz['GOODHARDLOC']*dz['ZPOSSLOC']*dz['GOODTSNR']*1+dz['GOODPRI']*dz['GOODHARDLOC']*dz['GOODTSNR']*1#-0.5*dz['LOCFULL']#*(1+dz[tsnr]) - + #dz['GOODTSNR'] = np.zeros(len(dz)).astype('bool') + #sel = dz[tscol] > min_tsnr2 + #dz['GOODTSNR'][sel] = 1 + #dz['sort'] = dz['GOODPRI']*dz['GOODHARDLOC']*dz['ZPOSSLOC']*dz['GOODTSNR']*1+dz['GOODPRI']*dz['GOODHARDLOC']*dz['GOODTSNR']*1#-0.5*dz['LOCFULL']#*(1+dz[tsnr]) + dz['sort'] = dz['GOODPRI']*dz['GOODHARDLOC']*dz['ZPOSSLOC']*1+dz['GOODPRI']*dz['GOODHARDLOC']*dz['GOODTSNR']*1 #dz['sort'] = dz['GOODPRI']*dz['GOODHARDLOC']*dz['ZPOSSLOC']#*(1+dz[tsnr]) logger.info(dz.dtype.names) logger.info(str(rann)+' about to do sort') @@ -2781,7 +2781,7 @@ def mkfulldat(zf,imbits,ftar,tp,bit,outf,ftiles,maxp=3400,azf='',azfm='cumul',de specf = specdir+'datcomb_'+prog+'_spec_zdone.fits' print(specf) fs = fitsio.read(specf) - fs = common.cut_specdat(fs,badfib) + fs = common.cut_specdat(fs,badfib,tsnr_min=min_tsnr2,tsnr_col=tscol) fs = Table(fs) fs['TILELOCID'] = 10000*fs['TILEID'] +fs['LOCATION'] gtl = np.unique(fs['TILELOCID']) @@ -2830,10 +2830,10 @@ def mkfulldat(zf,imbits,ftar,tp,bit,outf,ftiles,maxp=3400,azf='',azfm='cumul',de wnts |= dz[tscol] == 999999 dz[tscol][wnts] = 0 print(np.max(dz[tscol])) - dz['GOODTSNR'] = np.ones(len(dz)).astype('bool') - if min_tsnr2 > 0: - sel = dz[tscol] > min_tsnr2 - dz['GOODTSNR'][sel] = 1 + #dz['GOODTSNR'] = np.ones(len(dz)).astype('bool') + #if min_tsnr2 > 0: + # sel = dz[tscol] > min_tsnr2 + # dz['GOODTSNR'][sel] = 1 if ftiles is None: dtl = count_tiles_input(dz[wg]) @@ -2845,10 +2845,11 @@ def mkfulldat(zf,imbits,ftar,tp,bit,outf,ftiles,maxp=3400,azf='',azfm='cumul',de selnp = dz['LOCATION_ASSIGNED'] == 0 pv = dz['PRIORITY'] #we will multiply by priority in order to keep priority 3400 over lya follow-up pv[selnp] = 0 - dz['sort'] = dz['LOCATION_ASSIGNED']*dz['GOODTSNR']*dz['GOODHARDLOC']*dz['GOODPRI']*pv+dz['TILELOCID_ASSIGNED']*dz['GOODHARDLOC']*dz['GOODPRI']*1 + dz['GOODHARDLOC']*1 + dz['GOODPRI']*1#*(1+np.clip(dz[tscol],0,200))*1+dz['TILELOCID_ASSIGNED']*dz['GOODHARDLOC']*1+dz['GOODHARDLOC']*1 + #dz['sort'] = dz['LOCATION_ASSIGNED']*dz['GOODTSNR']*dz['GOODHARDLOC']*dz['GOODPRI']*pv+dz['TILELOCID_ASSIGNED']*dz['GOODHARDLOC']*dz['GOODPRI']*1 + dz['GOODHARDLOC']*1 + dz['GOODPRI']*1#*(1+np.clip(dz[tscol],0,200))*1+dz['TILELOCID_ASSIGNED']*dz['GOODHARDLOC']*1+dz['GOODHARDLOC']*1 + dz['sort'] = dz['LOCATION_ASSIGNED']*dz['GOODHARDLOC']*dz['GOODPRI']*pv+dz['TILELOCID_ASSIGNED']*dz['GOODHARDLOC']*dz['GOODPRI']*1 + dz['GOODHARDLOC']*1 + dz['GOODPRI']*1# else: - dz['sort'] = dz['LOCATION_ASSIGNED']*dz['GOODTSNR']*dz['GOODHARDLOC']*dz['GOODPRI']*1+dz['TILELOCID_ASSIGNED']*dz['GOODHARDLOC']*dz['GOODPRI']*1 + dz['GOODHARDLOC']*1 + dz['GOODPRI']*1#*(1+np.clip(dz[tscol],0,200))*1+dz['TILELOCID_ASSIGNED']*dz['GOODHARDLOC']*1+dz['GOODHARDLOC']*1 - + #dz['sort'] = dz['LOCATION_ASSIGNED']*dz['GOODTSNR']*dz['GOODHARDLOC']*dz['GOODPRI']*1+dz['TILELOCID_ASSIGNED']*dz['GOODHARDLOC']*dz['GOODPRI']*1 + dz['GOODHARDLOC']*1 + dz['GOODPRI']*1#*(1+np.clip(dz[tscol],0,200))*1+dz['TILELOCID_ASSIGNED']*dz['GOODHARDLOC']*1+dz['GOODHARDLOC']*1 + dz['sort'] = dz['LOCATION_ASSIGNED']*dz['GOODHARDLOC']*dz['GOODPRI']*1+dz['TILELOCID_ASSIGNED']*dz['GOODHARDLOC']*dz['GOODPRI']*1 + dz['GOODHARDLOC']*1 + dz['GOODPRI']*1 #else: # selnp = dz['LOCATION_ASSIGNED'] == 0 # pv = dz['PRIORITY'] diff --git a/scripts/main/mkCat_main_ran.py b/scripts/main/mkCat_main_ran.py index 0e81762f1..80c51c4ee 100644 --- a/scripts/main/mkCat_main_ran.py +++ b/scripts/main/mkCat_main_ran.py @@ -230,7 +230,7 @@ specf['TILELOCID'] = 10000*specf['TILEID'] +specf['LOCATION'] logger.info('loaded specf file '+specfo) -specfc = common.cut_specdat(specf,badfib=mainp.badfib) +specfc = common.cut_specdat(specf,badfib=mainp.badfib,tsnr_min=tsnrcut,tsnr_col=tnsrcol) gtl = np.unique(specfc['TILELOCID']) del specfc From a7689f736cee4b10707b771ad075620a8ced65cc Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 27 Mar 2024 12:00:16 -0400 Subject: [PATCH 197/297] Update cattools.py --- py/LSS/main/cattools.py | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index 0d34e5caf..0493c01ab 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -2144,8 +2144,7 @@ def mkfullran(gtl,lznp,indir,rann,imbits,outf,tp,pd,notqso='',maxp=3400,min_tsnr dz = Table.read(zf) logger.info(dz.dtype.names) - zfpd = indir.replace('global','dvs_ro')+'/rancomb_'+str(rann)+pd+'_Alltilelocinfo.fits' - dzpd = Table.read(zfpd) + #dz = join(dz,dzpd,keys=['TARGETID']) #print('length including duplicates '+str(len(dz))) @@ -2177,6 +2176,16 @@ def mkfullran(gtl,lznp,indir,rann,imbits,outf,tp,pd,notqso='',maxp=3400,min_tsnr dz['GOODHARDLOC'] = np.zeros(len(dz)).astype('bool') dz['GOODHARDLOC'][wg] = 1 + if ftiles is None: + dzpd = count_tiles_input(np.array(dz[wg].keep_columns(['TARGETID','TILEID','TILELOCID'])) + else: + dzpd = Table.read(ftiles) + + + #zfpd = indir.replace('global','dvs_ro')+'/rancomb_'+str(rann)+pd+'_Alltilelocinfo.fits' + #dzpd = Table.read(zfpd) + + #dzpd = count_tiles_input(np.array(dz[wg].keep_columns(['TARGETID','TILEID','TILELOCID'])) #dz['LOCFULL'] = np.zeros(len(dz)).astype('bool') @@ -2821,15 +2830,15 @@ def mkfulldat(zf,imbits,ftar,tp,bit,outf,ftiles,maxp=3400,azf='',azfm='cumul',de print(len(np.unique(dz[wtl]['TARGETID']))) cols = list(dz.dtype.names) - if tscol not in cols: - dz[tscol] = np.ones(len(dz)) - print('added '+tscol+' and set all to 1') + #if tscol not in cols: + # dz[tscol] = np.ones(len(dz)) + # print('added '+tscol+' and set all to 1') - wnts = dz[tscol]*0 != 0 - wnts |= dz[tscol] == 999999 - dz[tscol][wnts] = 0 - print(np.max(dz[tscol])) + #wnts = dz[tscol]*0 != 0 + #wnts |= dz[tscol] == 999999 + #dz[tscol][wnts] = 0 + #print(np.max(dz[tscol])) #dz['GOODTSNR'] = np.ones(len(dz)).astype('bool') #if min_tsnr2 > 0: # sel = dz[tscol] > min_tsnr2 From 69195fe5cf89cbbb560976584e578be3d27e83ae Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 27 Mar 2024 12:01:46 -0400 Subject: [PATCH 198/297] Update cattools.py --- py/LSS/main/cattools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index 0493c01ab..e233b0f1c 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -2177,7 +2177,7 @@ def mkfullran(gtl,lznp,indir,rann,imbits,outf,tp,pd,notqso='',maxp=3400,min_tsnr dz['GOODHARDLOC'][wg] = 1 if ftiles is None: - dzpd = count_tiles_input(np.array(dz[wg].keep_columns(['TARGETID','TILEID','TILELOCID'])) + dzpd = count_tiles_input(np.array(dz[wg]).keep_columns(['TARGETID','TILEID','TILELOCID'])) else: dzpd = Table.read(ftiles) From 66dc856d38d3b2270415c7d30be14dd78f98088d Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 27 Mar 2024 14:14:12 -0400 Subject: [PATCH 199/297] Update cattools.py --- py/LSS/main/cattools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index e233b0f1c..6b5ee8408 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -2126,7 +2126,7 @@ def combran(tiles,rann,randir,ddir,tp,tmask,tc='SV3_DESI_TARGET',imask=False): fu.write(randir+str(rann)+'/rancomb_'+tp+'_Alltiles.fits',format='fits', overwrite=True) -def mkfullran(gtl,lznp,indir,rann,imbits,outf,tp,pd,notqso='',maxp=3400,min_tsnr2=0,tlid_full=None,badfib=None): +def mkfullran(gtl,lznp,indir,rann,imbits,outf,tp,pd,notqso='',maxp=3400,min_tsnr2=0,tlid_full=None,badfib=None,ftiles=None): import LSS.common_tools as common #import logging logger = logging.getLogger('LSSran') From fb9b85a66717b6c93e7779a0eaf57024070204f2 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 27 Mar 2024 14:19:20 -0400 Subject: [PATCH 200/297] Update cattools.py --- py/LSS/main/cattools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index 6b5ee8408..b4bb5aae4 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -2177,7 +2177,7 @@ def mkfullran(gtl,lznp,indir,rann,imbits,outf,tp,pd,notqso='',maxp=3400,min_tsnr dz['GOODHARDLOC'][wg] = 1 if ftiles is None: - dzpd = count_tiles_input(np.array(dz[wg]).keep_columns(['TARGETID','TILEID','TILELOCID'])) + dzpd = count_tiles_input(np.array(dz[wg])).keep_columns(['TARGETID','TILEID','TILELOCID']) else: dzpd = Table.read(ftiles) From 9e52464e2b5d1b20287459a8aa10fed2ad5e1d93 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 27 Mar 2024 14:31:27 -0400 Subject: [PATCH 201/297] Update cattools.py --- py/LSS/main/cattools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index b4bb5aae4..dcae42459 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -2177,7 +2177,7 @@ def mkfullran(gtl,lznp,indir,rann,imbits,outf,tp,pd,notqso='',maxp=3400,min_tsnr dz['GOODHARDLOC'][wg] = 1 if ftiles is None: - dzpd = count_tiles_input(np.array(dz[wg])).keep_columns(['TARGETID','TILEID','TILELOCID']) + dzpd = count_tiles_input(np.array(dz[wg].keep_columns(['TARGETID','TILEID','TILELOCID']))) else: dzpd = Table.read(ftiles) From 6039fc91fe83dbec9a274f8fa87ecd4fe88e2c4d Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 27 Mar 2024 14:50:40 -0400 Subject: [PATCH 202/297] Update cattools.py --- py/LSS/main/cattools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index dcae42459..7678f4ebc 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -2177,7 +2177,7 @@ def mkfullran(gtl,lznp,indir,rann,imbits,outf,tp,pd,notqso='',maxp=3400,min_tsnr dz['GOODHARDLOC'][wg] = 1 if ftiles is None: - dzpd = count_tiles_input(np.array(dz[wg].keep_columns(['TARGETID','TILEID','TILELOCID']))) + dzpd = count_tiles_input(dz[wg].keep_columns(['TARGETID','TILEID','TILELOCID'])) else: dzpd = Table.read(ftiles) From 85eac1013c64896740cc309d76e16f6754e61516 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 27 Mar 2024 15:26:08 -0400 Subject: [PATCH 203/297] Update cattools.py --- py/LSS/main/cattools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index 7678f4ebc..1c737fdbb 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -1242,7 +1242,7 @@ def count_tiles_input(fjg): return table with unique TARGETID and the number of tiles it showed up on (NTILE), the TILES and the TILELOCIDS ''' - + print(fjg.dtype.names) fjg = fjg[np.argsort(fjg['TARGETID'])] tids = np.unique(fjg['TARGETID']) From ef1d15e9edaa3f8f06af82ae3671f64e81b2fa9b Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 27 Mar 2024 15:33:11 -0400 Subject: [PATCH 204/297] Update cattools.py --- py/LSS/main/cattools.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index 1c737fdbb..c4dbb30c3 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -2177,7 +2177,8 @@ def mkfullran(gtl,lznp,indir,rann,imbits,outf,tp,pd,notqso='',maxp=3400,min_tsnr dz['GOODHARDLOC'][wg] = 1 if ftiles is None: - dzpd = count_tiles_input(dz[wg].keep_columns(['TARGETID','TILEID','TILELOCID'])) + logger.info('counting tiles from dz with columns '+str(dz.dtype.names)) + dzpd = count_tiles_input(dz[wg])#.keep_columns(['TARGETID','TILEID','TILELOCID'])) else: dzpd = Table.read(ftiles) From 2f53e7cb426b3b131a3997cfa8c6961e1c5d15cc Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 27 Mar 2024 16:15:09 -0400 Subject: [PATCH 205/297] Update cattools.py --- py/LSS/main/cattools.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index c4dbb30c3..eb0c95b9b 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -2175,10 +2175,11 @@ def mkfullran(gtl,lznp,indir,rann,imbits,outf,tp,pd,notqso='',maxp=3400,min_tsnr dz['GOODHARDLOC'] = np.zeros(len(dz)).astype('bool') dz['GOODHARDLOC'][wg] = 1 - + dzasub = np.array(dz[wg].keep_columns(['TARGETID','TILEID','TILELOCID'])) + logger.info(str(dzasub.dtype.names) if ftiles is None: logger.info('counting tiles from dz with columns '+str(dz.dtype.names)) - dzpd = count_tiles_input(dz[wg])#.keep_columns(['TARGETID','TILEID','TILELOCID'])) + dzpd = count_tiles_input(dzasub)#dz[wg])#.keep_columns(['TARGETID','TILEID','TILELOCID'])) else: dzpd = Table.read(ftiles) From ecf12242880390b3bc4d8131381732af96c7a5f6 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 27 Mar 2024 16:16:05 -0400 Subject: [PATCH 206/297] Update cattools.py --- py/LSS/main/cattools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index eb0c95b9b..f0c62f810 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -2176,7 +2176,7 @@ def mkfullran(gtl,lznp,indir,rann,imbits,outf,tp,pd,notqso='',maxp=3400,min_tsnr dz['GOODHARDLOC'] = np.zeros(len(dz)).astype('bool') dz['GOODHARDLOC'][wg] = 1 dzasub = np.array(dz[wg].keep_columns(['TARGETID','TILEID','TILELOCID'])) - logger.info(str(dzasub.dtype.names) + logger.info(str(dzasub.dtype.names)) if ftiles is None: logger.info('counting tiles from dz with columns '+str(dz.dtype.names)) dzpd = count_tiles_input(dzasub)#dz[wg])#.keep_columns(['TARGETID','TILEID','TILELOCID'])) From 9349db44cfe0d2fe4fe7d14b0802595659c58806 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 27 Mar 2024 16:35:49 -0400 Subject: [PATCH 207/297] Update cattools.py --- py/LSS/main/cattools.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index f0c62f810..662a3f570 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -1241,7 +1241,8 @@ def count_tiles_input(fjg): take input array with require columns TARGETID TILEID TILELOCID return table with unique TARGETID and the number of tiles it showed up on (NTILE), the TILES and the TILELOCIDS ''' - + fjg.keep_columns(['TARGETID','TILEID','TILELOCID']) + fjg = np.arrau(fjg) print(fjg.dtype.names) fjg = fjg[np.argsort(fjg['TARGETID'])] @@ -2175,11 +2176,9 @@ def mkfullran(gtl,lznp,indir,rann,imbits,outf,tp,pd,notqso='',maxp=3400,min_tsnr dz['GOODHARDLOC'] = np.zeros(len(dz)).astype('bool') dz['GOODHARDLOC'][wg] = 1 - dzasub = np.array(dz[wg].keep_columns(['TARGETID','TILEID','TILELOCID'])) - logger.info(str(dzasub.dtype.names)) if ftiles is None: logger.info('counting tiles from dz with columns '+str(dz.dtype.names)) - dzpd = count_tiles_input(dzasub)#dz[wg])#.keep_columns(['TARGETID','TILEID','TILELOCID'])) + dzpd = count_tiles_input(dz[wg])#.keep_columns(['TARGETID','TILEID','TILELOCID'])) else: dzpd = Table.read(ftiles) @@ -2195,7 +2194,7 @@ def mkfullran(gtl,lznp,indir,rann,imbits,outf,tp,pd,notqso='',maxp=3400,min_tsnr # wf = np.isin(dz['TILELOCID'],tlid_full) # dz['LOCFULL'][wf] = 1 - + logger.info(str(dz.dtype.names)) dz['GOODPRI'] = np.zeros(len(dz)).astype('bool') sel = dz['PRIORITY'] <= maxp dz['GOODPRI'][sel] = 1 From 5f05b0801c8a7e0cf5547680ba7d19cbb16416c0 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 27 Mar 2024 16:41:18 -0400 Subject: [PATCH 208/297] Update cattools.py --- py/LSS/main/cattools.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index 662a3f570..ff694bedd 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -1242,8 +1242,8 @@ def count_tiles_input(fjg): return table with unique TARGETID and the number of tiles it showed up on (NTILE), the TILES and the TILELOCIDS ''' fjg.keep_columns(['TARGETID','TILEID','TILELOCID']) - fjg = np.arrau(fjg) - print(fjg.dtype.names) + fjg = np.array(fjg) + #print(fjg.dtype.names) fjg = fjg[np.argsort(fjg['TARGETID'])] tids = np.unique(fjg['TARGETID']) From d93c145c0d49ae49bd7fe192efbb3ce21a73621d Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 27 Mar 2024 16:55:29 -0400 Subject: [PATCH 209/297] Update cattools.py --- py/LSS/main/cattools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index ff694bedd..ffcc759d3 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -2207,7 +2207,7 @@ def mkfullran(gtl,lznp,indir,rann,imbits,outf,tp,pd,notqso='',maxp=3400,min_tsnr #sel = dz[tscol] > min_tsnr2 #dz['GOODTSNR'][sel] = 1 #dz['sort'] = dz['GOODPRI']*dz['GOODHARDLOC']*dz['ZPOSSLOC']*dz['GOODTSNR']*1+dz['GOODPRI']*dz['GOODHARDLOC']*dz['GOODTSNR']*1#-0.5*dz['LOCFULL']#*(1+dz[tsnr]) - dz['sort'] = dz['GOODPRI']*dz['GOODHARDLOC']*dz['ZPOSSLOC']*1+dz['GOODPRI']*dz['GOODHARDLOC']*dz['GOODTSNR']*1 + dz['sort'] = dz['GOODPRI']*dz['GOODHARDLOC']*dz['ZPOSSLOC']*1+dz['GOODPRI']*dz['GOODHARDLOC']*1 #dz['sort'] = dz['GOODPRI']*dz['GOODHARDLOC']*dz['ZPOSSLOC']#*(1+dz[tsnr]) logger.info(dz.dtype.names) logger.info(str(rann)+' about to do sort') From da39abf3bb253e88ab33356040d673503c3dda9c Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 28 Mar 2024 11:42:53 -0400 Subject: [PATCH 210/297] Update LSSpipe_Y1.txt --- Sandbox/LSSpipe_Y1.txt | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/Sandbox/LSSpipe_Y1.txt b/Sandbox/LSSpipe_Y1.txt index b25768c64..2d3945689 100644 --- a/Sandbox/LSSpipe_Y1.txt +++ b/Sandbox/LSSpipe_Y1.txt @@ -76,16 +76,15 @@ for tp,notqso in zip(tps,notqsos): for tp,notqso in zip(tps,notqsos): srun -N 1 -C cpu -t 04:00:00 -q interactive python scripts/main/mkCat_main_ran.py --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --type tp --combwspec n --fullr y --survey Y1 -#this adds vetos to both data and randoms (could put randoms in separate script and parallize) +#this adds vetos to both data (and randoms if --maxr is greater than 0) #only necessary for LRGs for now -python mkCat_main.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --add_veto y --verspec himalayas --survey Y1 -for tp,notqso in zip(tps,notqsos): - python mkCat_main.py --type tp --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --add_veto y --verspec himalayas --survey Y1 --notqso notqso -#can just run one random and then add --maxr 1 +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --add_veto y --verspec iron --survey Y1 --maxr 0 +#do randoms below #fill randoms and apply vetoes to them: +#include --add_veto y for at least LRG for tp,notqso in zip(tps,notqsos): - srun -N 1 -C cpu -t 04:00:00 -q interactive python scripts/main/mkCat_main_ran.py --type tp --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --survey Y1 --fillran y --apply_veto y + srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --type tp --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --survey Y1 --fillran y --apply_veto y (--add_veto y) #make healpix maps based on randoms for tp,notqso in zip(tps,notqsos): From b8d4c1a118c6bacaa14043cb80cc5ae211ce1602 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 28 Mar 2024 16:19:52 -0400 Subject: [PATCH 211/297] Update cattools.py --- py/LSS/main/cattools.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/py/LSS/main/cattools.py b/py/LSS/main/cattools.py index ffcc759d3..7a4e1abb5 100644 --- a/py/LSS/main/cattools.py +++ b/py/LSS/main/cattools.py @@ -3246,7 +3246,8 @@ def add_zfail_weight2full(indir,tp='',tsnrcut=80,readpars=False,hpmapcut='_HPmap if tp == 'LRG': selobs &= ff['TSNR2_ELG'] > tsnrcut mintsnr=500/12.15 - maxtsnr =2000/12.15 + #maxtsnr =2000/12.15 + maxtsnr =1700/12.15 band = 'Z' if tp[:3] == 'BGS': From 8361bc7e6abe2b734baa4108f2c5c11090d9acf8 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 29 Mar 2024 12:17:37 -0400 Subject: [PATCH 212/297] Create LRG_LSScat_pipe.sh --- scripts/LRG_LSScat_pipe.sh | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 scripts/LRG_LSScat_pipe.sh diff --git a/scripts/LRG_LSScat_pipe.sh b/scripts/LRG_LSScat_pipe.sh new file mode 100644 index 000000000..8e1527643 --- /dev/null +++ b/scripts/LRG_LSScat_pipe.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +set -e + +source /global/common/software/desi/desi_environment.sh main +export LSSCODE=$HOME +PYTHONPATH=$PYTHONPATH:$LSSCODE/LSS/py + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld y --verspec iron --survey Y1 --version $1 + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --type LRG --combwspec n --fullr y --survey Y1 --maxr 18 --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --add_veto y --verspec iron --survey Y1 --maxr 0 --version $1 + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --survey Y1 --fillran y --apply_veto y --add_veto y --maxr 18 --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --verspec iron --survey Y1 --mkHPmaps y --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --apply_veto y --verspec iron --survey Y1 --maxr 0 --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --survey Y1 --add_tl y --maxr 18 --par n --version $1 + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --apply_map_veto y --verspec iron --survey Y1 --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --verspec iron --add_weight_zfail y --survey Y1 --use_map_veto _HPmapcut --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --swap20211212 y --verspec iron --survey Y1 --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --imsys y --survey Y1 --verspec iron --imsys_zbin y --use_map_veto _HPmapcut --version $1 + From 5a88ed5756f9a616a0a93bf25504d6b7ca1c3487 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 29 Mar 2024 12:18:22 -0400 Subject: [PATCH 213/297] Update LRG_LSScat_pipe.sh --- scripts/LRG_LSScat_pipe.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 scripts/LRG_LSScat_pipe.sh diff --git a/scripts/LRG_LSScat_pipe.sh b/scripts/LRG_LSScat_pipe.sh old mode 100644 new mode 100755 From 61e684aa2a084eacb397ba86c014dbb4d2b7bc1d Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 29 Mar 2024 12:20:37 -0400 Subject: [PATCH 214/297] Update mkCat_main.py --- scripts/main/mkCat_main.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/scripts/main/mkCat_main.py b/scripts/main/mkCat_main.py index fe81251de..9d2a69997 100644 --- a/scripts/main/mkCat_main.py +++ b/scripts/main/mkCat_main.py @@ -237,6 +237,12 @@ print('made '+ldirspec+'LSScats') dirout = ldirspec+'LSScats/'+version+'/' + +if not os.path.exists(dirout): + os.mkdir(dirout) + print('made '+dirout) + + logfn = dirout+'log.txt' if os.path.isfile(logfn): logf = open(logfn,'a') From ba196668546ec64b3a6e49b4126b4b655aa1683c Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 29 Mar 2024 17:18:54 -0400 Subject: [PATCH 215/297] Create comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 102 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 scripts/comp_amtl_asgn.py diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py new file mode 100644 index 000000000..cc0e0b9eb --- /dev/null +++ b/scripts/comp_amtl_asgn.py @@ -0,0 +1,102 @@ +import numpy as np +import fitsio +import glob +from astropy.table import Table + +import LSS.common_tools as common +from LSS.globals import main + +#just start with the mock 1 v3_1 altmtl as an example + +alltids = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/forFA1.fits',columns=['TARGETID']) +alltids = np.unique(alltids['TARGETID']) + +def removeLeadingZeros(num): + + # traverse the entire string + for i in range(len(num)): + + # check for the first non-zero character + if num[i] != '0': + # return the remaining string + res = num[i::]; + return res; + + # If the entire string is traversed + # that means it didn't have a single + # non-zero character, hence return "0" + return "0"; + +def get_all_asgn(indir): + fls = glob.glob(indir+'/*/fba*.fits') + assign_list = [] + for fl in fls: + asgn = Table(fitsio.read(fl,columns=['FIBER', 'TARGETID', 'LOCATION'])) + sp = fl.split('-') + tid = int(removeLeadingZeros(sp[-1].strip('.fits'))) + #print(tid) + asgn['TILEID'] = tid + sel = asgn['TARGETID'] > 0 + assign_list.append(asgn[sel]) + all_asgn = np.concatenate(assign_list) + return all_asgn + +#get the list of good tilelocid + +mainp = main('LRG','iron') +pdir = 'dark' +mt = mainp.mtld +tiles = mainp.tiles + + +tsnrcut = mainp.tsnrcut +dchi2 = mainp.dchi2 +tnsrcol = mainp.tsnrcol +badfib = mainp.badfib + + +wd = mt['SURVEY'] == 'main' +wd &= mt['ZDONE'] == 'true' +wd &= mt['FAPRGRM'] == pdir +wd &=mt['ZDATE'] < 20220900 #Y1 cutoff + +mtld = mt[wd] + +specfo = ldirspec+'datcomb_'+pdir+'_spec_zdone.fits' +logger.info('loading specf file '+specfo) +specf = Table(fitsio.read(specfo)) +sel = np.isin(specf['TILEID'],mtld['TILEID']) +specf = specf[sel] +specf['TILELOCID'] = 10000*specf['TILEID'] +specf['LOCATION'] + +logger.info('loaded specf file '+specfo) +specfc = common.cut_specdat(specf,badfib=mainp.badfib,tsnr_min=tsnrcut,tsnr_col=tnsrcol) +gtl = np.unique(specfc['TILELOCID']) + +assign_real_dic = {} + +def get_good_real(real_num) + indir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl1_R64/Univ'+str(real_num).zfill(3)+'/fa/MAIN' + all_asgn = get_all_asgn(indir) + asgn_tloc = 10000*all_asgn['TILEID'] +all_asgn['LOCATION'] + good_asgn = np.isin(asgn_tloc,gtl) + good_tids = all_asgn['TARGETID'][gtl] + asgn_real = np.isin(alltids,good_tids) + assign_real_dic[real_num] = asgn_real + +from multiprocessing import Pool +Nreal = 64 +inds = np.arange(0,Nreal) +pool = sharedmem.MapReduce(np=6) +with pool: + pool.map(get_good_real,inds) + +print('got all realizations') + +probl = np.zeros(len(alltids)) +for i in range(0,len(alltids)): + nt = 0 + for real in inds: + nt += assign_real_dic[real][i] + prob = nt/Nreal + probl[i] = prob From 657ccc803b3c7567f06018f4dc3d9fbd37aa5e5a Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 29 Mar 2024 17:20:40 -0400 Subject: [PATCH 216/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index cc0e0b9eb..2a86db4ec 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -75,7 +75,7 @@ def get_all_asgn(indir): assign_real_dic = {} -def get_good_real(real_num) +def get_good_real(real_num): indir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl1_R64/Univ'+str(real_num).zfill(3)+'/fa/MAIN' all_asgn = get_all_asgn(indir) asgn_tloc = 10000*all_asgn['TILEID'] +all_asgn['LOCATION'] From 87229967e9313a121b9b1adf292ef5c9c0f3f15c Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 29 Mar 2024 18:57:23 -0400 Subject: [PATCH 217/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index 2a86db4ec..c2876ff95 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -61,7 +61,7 @@ def get_all_asgn(indir): wd &=mt['ZDATE'] < 20220900 #Y1 cutoff mtld = mt[wd] - +ldirspec = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/' specfo = ldirspec+'datcomb_'+pdir+'_spec_zdone.fits' logger.info('loading specf file '+specfo) specf = Table(fitsio.read(specfo)) From d2b1fb504e5fb60e51587efdebfcd1e3588224e2 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 29 Mar 2024 19:37:50 -0400 Subject: [PATCH 218/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index c2876ff95..1234f3084 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -63,13 +63,13 @@ def get_all_asgn(indir): mtld = mt[wd] ldirspec = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/' specfo = ldirspec+'datcomb_'+pdir+'_spec_zdone.fits' -logger.info('loading specf file '+specfo) +#logger.info('loading specf file '+specfo) specf = Table(fitsio.read(specfo)) sel = np.isin(specf['TILEID'],mtld['TILEID']) specf = specf[sel] specf['TILELOCID'] = 10000*specf['TILEID'] +specf['LOCATION'] -logger.info('loaded specf file '+specfo) +#logger.info('loaded specf file '+specfo) specfc = common.cut_specdat(specf,badfib=mainp.badfib,tsnr_min=tsnrcut,tsnr_col=tnsrcol) gtl = np.unique(specfc['TILELOCID']) From 1c8d03514c9ac59fcdbaa670483cd6e3c5331b1e Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 29 Mar 2024 21:41:21 -0400 Subject: [PATCH 219/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index 1234f3084..0a4b2ece9 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -6,6 +6,27 @@ import LSS.common_tools as common from LSS.globals import main +import logging + +# create logger +logname = 'bitweights' +logger = logging.getLogger(logname) +logger.setLevel(logging.INFO) + +# create console handler and set level to debug +ch = logging.StreamHandler() +ch.setLevel(logging.INFO) + +# create formatter +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + +# add formatter to ch +ch.setFormatter(formatter) + +# add ch to logger +logger.addHandler(ch) + + #just start with the mock 1 v3_1 altmtl as an example alltids = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/forFA1.fits',columns=['TARGETID']) @@ -63,13 +84,13 @@ def get_all_asgn(indir): mtld = mt[wd] ldirspec = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/' specfo = ldirspec+'datcomb_'+pdir+'_spec_zdone.fits' -#logger.info('loading specf file '+specfo) +logger.info('loading specf file '+specfo) specf = Table(fitsio.read(specfo)) sel = np.isin(specf['TILEID'],mtld['TILEID']) specf = specf[sel] specf['TILELOCID'] = 10000*specf['TILEID'] +specf['LOCATION'] -#logger.info('loaded specf file '+specfo) +logger.info('loaded specf file '+specfo) specfc = common.cut_specdat(specf,badfib=mainp.badfib,tsnr_min=tsnrcut,tsnr_col=tnsrcol) gtl = np.unique(specfc['TILELOCID']) @@ -83,15 +104,17 @@ def get_good_real(real_num): good_tids = all_asgn['TARGETID'][gtl] asgn_real = np.isin(alltids,good_tids) assign_real_dic[real_num] = asgn_real + del asgn_real from multiprocessing import Pool Nreal = 64 inds = np.arange(0,Nreal) -pool = sharedmem.MapReduce(np=6) +pool = sharedmem.MapReduce() +logger.info('about to get '+str(Nreal)+' realizations in parallel') with pool: pool.map(get_good_real,inds) -print('got all realizations') +logger.info('got all realizations') probl = np.zeros(len(alltids)) for i in range(0,len(alltids)): From c74c4ddefafe26b9ad78c1a3d61f1169a293345f Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 29 Mar 2024 21:42:50 -0400 Subject: [PATCH 220/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index 0a4b2ece9..df706d539 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -31,6 +31,7 @@ alltids = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/forFA1.fits',columns=['TARGETID']) alltids = np.unique(alltids['TARGETID']) +logger.info(str(len(alltids))+ ' TARGETID will get their number of assignments tracked') def removeLeadingZeros(num): From e2820df8249cb4678401a652a4f3fd9793c1f318 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 29 Mar 2024 22:51:02 -0400 Subject: [PATCH 221/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index df706d539..4f9e454fd 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -26,7 +26,7 @@ # add ch to logger logger.addHandler(ch) - +logger.info('script is starting') #just start with the mock 1 v3_1 altmtl as an example alltids = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/forFA1.fits',columns=['TARGETID']) @@ -110,9 +110,9 @@ def get_good_real(real_num): from multiprocessing import Pool Nreal = 64 inds = np.arange(0,Nreal) -pool = sharedmem.MapReduce() +#pool = sharedmem.MapReduce() logger.info('about to get '+str(Nreal)+' realizations in parallel') -with pool: +with Pool() as pool: pool.map(get_good_real,inds) logger.info('got all realizations') From e79a711191a7338d2c433521122c96275e336c12 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 29 Mar 2024 23:02:27 -0400 Subject: [PATCH 222/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index 4f9e454fd..547eca5a0 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -102,7 +102,7 @@ def get_good_real(real_num): all_asgn = get_all_asgn(indir) asgn_tloc = 10000*all_asgn['TILEID'] +all_asgn['LOCATION'] good_asgn = np.isin(asgn_tloc,gtl) - good_tids = all_asgn['TARGETID'][gtl] + good_tids = all_asgn['TARGETID'][good_asgn] asgn_real = np.isin(alltids,good_tids) assign_real_dic[real_num] = asgn_real del asgn_real From 188c2417cdc02b1d20d0a930a0f3592816555d53 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 29 Mar 2024 23:12:50 -0400 Subject: [PATCH 223/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index 547eca5a0..1df128e3a 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -116,7 +116,7 @@ def get_good_real(real_num): pool.map(get_good_real,inds) logger.info('got all realizations') - +logger.info('dictionary keys are '+str(assign_real_dic.keys())) probl = np.zeros(len(alltids)) for i in range(0,len(alltids)): nt = 0 From 0f06c71d33bc43e8f443555fae3662e7368fe4d4 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 29 Mar 2024 23:22:25 -0400 Subject: [PATCH 224/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index 1df128e3a..b2b0fd50d 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -96,6 +96,10 @@ def get_all_asgn(indir): gtl = np.unique(specfc['TILELOCID']) assign_real_dic = {} +testl = [] + +def test(real_num): + testl.append((real_num,np.random.random(10))) def get_good_real(real_num): indir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl1_R64/Univ'+str(real_num).zfill(3)+'/fa/MAIN' @@ -113,9 +117,13 @@ def get_good_real(real_num): #pool = sharedmem.MapReduce() logger.info('about to get '+str(Nreal)+' realizations in parallel') with Pool() as pool: - pool.map(get_good_real,inds) + #pool.map(get_good_real,inds) + pool.map(test) logger.info('got all realizations') +logger.info(str(len(testl))) +import sys +sys.exit() logger.info('dictionary keys are '+str(assign_real_dic.keys())) probl = np.zeros(len(alltids)) for i in range(0,len(alltids)): From d9eb5905e1aff9b42126d0765eae616091e429af Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 29 Mar 2024 23:30:15 -0400 Subject: [PATCH 225/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index b2b0fd50d..b9cce2cc4 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -118,7 +118,7 @@ def get_good_real(real_num): logger.info('about to get '+str(Nreal)+' realizations in parallel') with Pool() as pool: #pool.map(get_good_real,inds) - pool.map(test) + pool.map(test,inds) logger.info('got all realizations') logger.info(str(len(testl))) From 8da4a0086ce1d383494c16f368755247cb6f614a Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 29 Mar 2024 23:47:10 -0400 Subject: [PATCH 226/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index b9cce2cc4..8a66ecbf3 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -98,8 +98,9 @@ def get_all_asgn(indir): assign_real_dic = {} testl = [] -def test(real_num): - testl.append((real_num,np.random.random(10))) +def test(d,real_num): + d[real_num] = np.random.random(10) + #testl.append((real_num,np.random.random(10))) def get_good_real(real_num): indir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl1_R64/Univ'+str(real_num).zfill(3)+'/fa/MAIN' @@ -116,12 +117,19 @@ def get_good_real(real_num): inds = np.arange(0,Nreal) #pool = sharedmem.MapReduce() logger.info('about to get '+str(Nreal)+' realizations in parallel') -with Pool() as pool: - #pool.map(get_good_real,inds) - pool.map(test,inds) +#with Pool() as pool: +# #pool.map(get_good_real,inds) +# pool.map(test,inds) + +from multiprocess import Process, Manager +manager = Manager() +d = manager.dict() +job = [Process(target=test, args=(d, i)) for i in inds] +_ = [p.start() for p in job] +_ = [p.join() for p in job] logger.info('got all realizations') -logger.info(str(len(testl))) +logger.info('dictionary keys are '+str(d.keys())) import sys sys.exit() logger.info('dictionary keys are '+str(assign_real_dic.keys())) From 01bccc23a6b1df59e0b87b511c3ade5145f6f106 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 29 Mar 2024 23:49:53 -0400 Subject: [PATCH 227/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index 8a66ecbf3..37859277c 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -121,7 +121,7 @@ def get_good_real(real_num): # #pool.map(get_good_real,inds) # pool.map(test,inds) -from multiprocess import Process, Manager +from multiprocessing import Process, Manager manager = Manager() d = manager.dict() job = [Process(target=test, args=(d, i)) for i in inds] From 633ab8973eeb0b84f55b1fb1ceeae80d37c93715 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 29 Mar 2024 23:55:05 -0400 Subject: [PATCH 228/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index 37859277c..d416d61a4 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -123,15 +123,15 @@ def get_good_real(real_num): from multiprocessing import Process, Manager manager = Manager() -d = manager.dict() -job = [Process(target=test, args=(d, i)) for i in inds] +assign_real_dic = manager.dict() +job = [Process(target=get_good_real, args=(assign_real_dic, i)) for i in inds] _ = [p.start() for p in job] _ = [p.join() for p in job] logger.info('got all realizations') logger.info('dictionary keys are '+str(d.keys())) import sys -sys.exit() +#sys.exit() logger.info('dictionary keys are '+str(assign_real_dic.keys())) probl = np.zeros(len(alltids)) for i in range(0,len(alltids)): @@ -140,3 +140,5 @@ def get_good_real(real_num): nt += assign_real_dic[real][i] prob = nt/Nreal probl[i] = prob + if i%1e5 == 0: + logger.info(str(i)) \ No newline at end of file From 00b5337cebf7b677c8f7075f9af591604e153fdd Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Sat, 30 Mar 2024 00:00:05 -0400 Subject: [PATCH 229/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index d416d61a4..fbda548e3 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -129,7 +129,7 @@ def get_good_real(real_num): _ = [p.join() for p in job] logger.info('got all realizations') -logger.info('dictionary keys are '+str(d.keys())) +#logger.info('dictionary keys are '+str(d.keys())) import sys #sys.exit() logger.info('dictionary keys are '+str(assign_real_dic.keys())) From f8fc4507b7c923f938f69c00f4203ea1279a7c29 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Sat, 30 Mar 2024 00:00:30 -0400 Subject: [PATCH 230/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index fbda548e3..248d1eb73 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -102,7 +102,7 @@ def test(d,real_num): d[real_num] = np.random.random(10) #testl.append((real_num,np.random.random(10))) -def get_good_real(real_num): +def get_good_real(dic,real_num): indir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl1_R64/Univ'+str(real_num).zfill(3)+'/fa/MAIN' all_asgn = get_all_asgn(indir) asgn_tloc = 10000*all_asgn['TILEID'] +all_asgn['LOCATION'] From 7c2d42b6bc5eae7ef7f5b8f8906a897b69ed393a Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Sat, 30 Mar 2024 00:12:48 -0400 Subject: [PATCH 231/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index 248d1eb73..f179c0094 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -141,4 +141,4 @@ def get_good_real(dic,real_num): prob = nt/Nreal probl[i] = prob if i%1e5 == 0: - logger.info(str(i)) \ No newline at end of file + logger.info(str(i)+' '+str(prob)) \ No newline at end of file From 7a3e2b9571bf147e688aaa9379460661e7275c8f Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Sat, 30 Mar 2024 00:19:07 -0400 Subject: [PATCH 232/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index f179c0094..3c0dffa85 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -134,11 +134,16 @@ def get_good_real(dic,real_num): #sys.exit() logger.info('dictionary keys are '+str(assign_real_dic.keys())) probl = np.zeros(len(alltids)) -for i in range(0,len(alltids)): - nt = 0 - for real in inds: - nt += assign_real_dic[real][i] - prob = nt/Nreal - probl[i] = prob - if i%1e5 == 0: - logger.info(str(i)+' '+str(prob)) \ No newline at end of file +for real in inds: + probl += assign_real_dic[real]*1. +probl = probl/64 +h = np.histogram(probl) +print(h) +#for i in range(0,len(alltids)): +# nt = 0 +# for real in inds: +# nt += assign_real_dic[real][i] +# prob = nt/Nreal +# probl[i] = prob +# if i%1e5 == 0: +# logger.info(str(i)+' '+str(prob)) \ No newline at end of file From 028e4cc69c435b776e96284b2f331d3aa8703448 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Sun, 31 Mar 2024 17:55:43 -0400 Subject: [PATCH 233/297] Create ELG_LOPnotqso_LSScat_pipe.sh --- scripts/ELG_LOPnotqso_LSScat_pipe.sh | 34 ++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100755 scripts/ELG_LOPnotqso_LSScat_pipe.sh diff --git a/scripts/ELG_LOPnotqso_LSScat_pipe.sh b/scripts/ELG_LOPnotqso_LSScat_pipe.sh new file mode 100755 index 000000000..5519aec58 --- /dev/null +++ b/scripts/ELG_LOPnotqso_LSScat_pipe.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +set -e + +source /global/common/software/desi/desi_environment.sh main +export LSSCODE=$HOME +PYTHONPATH=$PYTHONPATH:$LSSCODE/LSS/py + +TRACER='ELG_LOP' + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --notqso y --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld y --verspec iron --survey Y1 --version $1 + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --type $TRACER --notqso y--combwspec n --fullr y --survey Y1 --maxr 18 --version $1 + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --type $TRACER --notqso y --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --survey Y1 --fillran y --apply_veto y --maxr 18 --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --notqso y --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --verspec iron --survey Y1 --mkHPmaps y --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --notqso y --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --apply_veto y --verspec iron --survey Y1 --maxr 0 --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --type $TRACER --notqso y --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --survey Y1 --add_tl y --maxr 18 --par n --version $1 + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --notqso y --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --apply_map_veto y --verspec iron --survey Y1 --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --notqso y --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --verspec iron --add_weight_zfail y --survey Y1 --use_map_veto _HPmapcut --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --notqso y --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --swap20211212 y --verspec iron --survey Y1 --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --notqso y --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --imsys y --survey Y1 --verspec iron --imsys_zbin y --use_map_veto _HPmapcut --version $1 + +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi scripts/Y1_sysnetELG_zbins_new.sh $1 + +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type $TRACER --notqso y --fulld n --survey Y1 --verspec iron --version $1 --clusd y --clusran y --splitGC y --nz y --par y --imsys_colname WEIGHT_SN --basedir /global/cfs/cdirs/desi/survey/catalogs/ + From 88414f9c409a3081bff3e368a848404549d40912 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Sun, 31 Mar 2024 18:53:14 -0400 Subject: [PATCH 234/297] Update ELG_LOPnotqso_LSScat_pipe.sh --- scripts/ELG_LOPnotqso_LSScat_pipe.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/ELG_LOPnotqso_LSScat_pipe.sh b/scripts/ELG_LOPnotqso_LSScat_pipe.sh index 5519aec58..fce0765e1 100755 --- a/scripts/ELG_LOPnotqso_LSScat_pipe.sh +++ b/scripts/ELG_LOPnotqso_LSScat_pipe.sh @@ -8,7 +8,7 @@ PYTHONPATH=$PYTHONPATH:$LSSCODE/LSS/py TRACER='ELG_LOP' -python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --notqso y --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld y --verspec iron --survey Y1 --version $1 +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --notqso y --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld y --verspec iron --survey Y1 --version $1 srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --type $TRACER --notqso y--combwspec n --fullr y --survey Y1 --maxr 18 --version $1 From dfcf8f0d61944fece3031794829c7f58ee8b38ed Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Sun, 31 Mar 2024 19:40:15 -0400 Subject: [PATCH 235/297] Update ELG_LOPnotqso_LSScat_pipe.sh --- scripts/ELG_LOPnotqso_LSScat_pipe.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/ELG_LOPnotqso_LSScat_pipe.sh b/scripts/ELG_LOPnotqso_LSScat_pipe.sh index fce0765e1..c1764c61c 100755 --- a/scripts/ELG_LOPnotqso_LSScat_pipe.sh +++ b/scripts/ELG_LOPnotqso_LSScat_pipe.sh @@ -8,9 +8,9 @@ PYTHONPATH=$PYTHONPATH:$LSSCODE/LSS/py TRACER='ELG_LOP' -srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --notqso y --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld y --verspec iron --survey Y1 --version $1 +#srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --notqso y --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld y --verspec iron --survey Y1 --version $1 -srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --type $TRACER --notqso y--combwspec n --fullr y --survey Y1 --maxr 18 --version $1 +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --type $TRACER --notqso y --combwspec n --fullr y --survey Y1 --maxr 18 --version $1 srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --type $TRACER --notqso y --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --survey Y1 --fillran y --apply_veto y --maxr 18 --version $1 From bcdd1043e9b7ca6a2dddad1828364172189ea1f0 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 1 Apr 2024 11:27:17 -0400 Subject: [PATCH 236/297] Update LRG_LSScat_pipe.sh --- scripts/LRG_LSScat_pipe.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/LRG_LSScat_pipe.sh b/scripts/LRG_LSScat_pipe.sh index 8e1527643..415a3b4f5 100755 --- a/scripts/LRG_LSScat_pipe.sh +++ b/scripts/LRG_LSScat_pipe.sh @@ -28,3 +28,5 @@ python $LSSCODE/LSS/scripts/main/mkCat_main.py --type LRG --basedir /global/cfs python $LSSCODE/LSS/scripts/main/mkCat_main.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --imsys y --survey Y1 --verspec iron --imsys_zbin y --use_map_veto _HPmapcut --version $1 +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type LRG --fulld n --survey Y1 --verspec iron --version $1 --clusd y --clusran y --splitGC y --nz y --par y --imsys_colname WEIGHT_IMLIN --basedir /global/cfs/cdirs/desi/survey/catalogs/ + From 77d11730188a6ba2c4c186cfc3a807df197f522d Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 1 Apr 2024 12:43:01 -0400 Subject: [PATCH 237/297] tracer pipelines --- scripts/ELG_LOPnotqso_LSScat_pipe.sh | 2 +- scripts/LRG_LSScat_steps.txt | 19 ++++++++++++++ scripts/QSO_LSScat_pipe.sh | 38 ++++++++++++++++++++++++++++ 3 files changed, 58 insertions(+), 1 deletion(-) create mode 100644 scripts/LRG_LSScat_steps.txt create mode 100755 scripts/QSO_LSScat_pipe.sh diff --git a/scripts/ELG_LOPnotqso_LSScat_pipe.sh b/scripts/ELG_LOPnotqso_LSScat_pipe.sh index c1764c61c..e08635d0f 100755 --- a/scripts/ELG_LOPnotqso_LSScat_pipe.sh +++ b/scripts/ELG_LOPnotqso_LSScat_pipe.sh @@ -8,7 +8,7 @@ PYTHONPATH=$PYTHONPATH:$LSSCODE/LSS/py TRACER='ELG_LOP' -#srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --notqso y --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld y --verspec iron --survey Y1 --version $1 +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --notqso y --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld y --verspec iron --survey Y1 --version $1 srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --type $TRACER --notqso y --combwspec n --fullr y --survey Y1 --maxr 18 --version $1 diff --git a/scripts/LRG_LSScat_steps.txt b/scripts/LRG_LSScat_steps.txt new file mode 100644 index 000000000..6e72adb40 --- /dev/null +++ b/scripts/LRG_LSScat_steps.txt @@ -0,0 +1,19 @@ +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld y --verspec iron --survey Y1 + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --type LRG --combwspec n --fullr y --survey Y1 --maxr 18 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --add_veto y --verspec iron --survey Y1 --maxr 0 + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --survey Y1 --fillran y --apply_veto y --add_veto y --maxr 18 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --verspec iron --survey Y1 --mkHPmaps y + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --apply_veto y --verspec iron --survey Y1 --maxr 0 + +python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --survey Y1 --add_tl y --maxr 18 --par n + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --apply_map_veto y --verspec iron --survey Y1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --verspec iron --add_weight_zfail y --survey Y1 --use_map_veto _HPmapcut + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type LRG --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --swap20211212 y --verspec iron --survey Y1 \ No newline at end of file diff --git a/scripts/QSO_LSScat_pipe.sh b/scripts/QSO_LSScat_pipe.sh new file mode 100755 index 000000000..973131440 --- /dev/null +++ b/scripts/QSO_LSScat_pipe.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +set -e + +source /global/common/software/desi/desi_environment.sh main +export LSSCODE=$HOME +PYTHONPATH=$PYTHONPATH:$LSSCODE/LSS/py + +TRACER='QSO' + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld y --verspec iron --survey Y1 --version $1 + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --type $TRACER --combwspec n --fullr y --survey Y1 --maxr 18 --version $1 + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --survey Y1 --fillran y --apply_veto y --maxr 18 --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --verspec iron --survey Y1 --mkHPmaps y --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --apply_veto y --verspec iron --survey Y1 --maxr 0 --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --survey Y1 --add_tl y --maxr 18 --par n --version $1 + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --apply_map_veto y --verspec iron --survey Y1 --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --verspec iron --add_weight_zfail y --survey Y1 --use_map_veto _HPmapcut --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --swap20211212 y --verspec iron --survey Y1 --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --imsys y --survey Y1 --verspec iron --imsys_zbin y --use_map_veto _HPmapcut --version $1 + +source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main +export LSSCODE=$HOME +PYTHONPATH=$PYTHONPATH:$LSSCODE/LSS/py + +python scripts/main/mkCat_main.py --type $TRACER --version $1 --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --regressis y --add_regressis y --survey Y1 --verspec iron --imsys_zbin y + +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type $TRACER --notqso y --fulld n --survey Y1 --verspec iron --version $1 --clusd y --clusran y --splitGC y --nz y --par y --imsys_colname WEIGHT_SN --basedir /global/cfs/cdirs/desi/survey/catalogs/ + From d099f916dbb8fa578f20955530886588a78e595a Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 1 Apr 2024 13:29:34 -0400 Subject: [PATCH 238/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index 3c0dffa85..941b63bbb 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -5,6 +5,7 @@ import LSS.common_tools as common from LSS.globals import main +from LSS.bitweights import pack_bitweights import logging @@ -133,12 +134,28 @@ def get_good_real(dic,real_num): import sys #sys.exit() logger.info('dictionary keys are '+str(assign_real_dic.keys())) +bool_list = [] +for key in assign_real_dic.keys(): + bool_list.append(assign_real_dic[key]) +bool_2d = np.vstack(bool_list) + +bitweights = pack_bitweights(bool_2d) + probl = np.zeros(len(alltids)) for real in inds: probl += assign_real_dic[real]*1. probl = probl/64 -h = np.histogram(probl) -print(h) + +outf = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v3_1/altmtl1_R64/bitweights.fits' +out_tab = Table() +out_tab['TARGETID'] = alltids +out_tab['BITWEIGHTS'] = bitweights +out_tab['PROB_OBS'] = probl + +commone.write_LSS(out_tab,outf) + +#h = np.histogram(probl) +#print(h) #for i in range(0,len(alltids)): # nt = 0 # for real in inds: From 71c55ff3148d8dde13825cd9137e867fe2e620da Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 1 Apr 2024 13:41:49 -0400 Subject: [PATCH 239/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index 941b63bbb..31cf155f6 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -137,8 +137,8 @@ def get_good_real(dic,real_num): bool_list = [] for key in assign_real_dic.keys(): bool_list.append(assign_real_dic[key]) -bool_2d = np.vstack(bool_list) - +bool_2d = np.vstack(bool_list).transpose() +logger.info('about to pack bitweights from array of shape '+str(np.shape(bool_2d))) bitweights = pack_bitweights(bool_2d) probl = np.zeros(len(alltids)) From 655c5acac00b5a47b0d807c695c370b1be20b8fa Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 1 Apr 2024 13:48:44 -0400 Subject: [PATCH 240/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index 31cf155f6..4d42ae7bc 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -135,8 +135,8 @@ def get_good_real(dic,real_num): #sys.exit() logger.info('dictionary keys are '+str(assign_real_dic.keys())) bool_list = [] -for key in assign_real_dic.keys(): - bool_list.append(assign_real_dic[key]) +for real in inds:: + bool_list.append(assign_real_dic[real]) bool_2d = np.vstack(bool_list).transpose() logger.info('about to pack bitweights from array of shape '+str(np.shape(bool_2d))) bitweights = pack_bitweights(bool_2d) @@ -152,7 +152,7 @@ def get_good_real(dic,real_num): out_tab['BITWEIGHTS'] = bitweights out_tab['PROB_OBS'] = probl -commone.write_LSS(out_tab,outf) +common.write_LSS(out_tab,outf) #h = np.histogram(probl) #print(h) From 650ea85858b98e9b6ce2366b1cf0b9563b574a86 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 1 Apr 2024 13:51:50 -0400 Subject: [PATCH 241/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index 4d42ae7bc..aa92ad347 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -135,7 +135,7 @@ def get_good_real(dic,real_num): #sys.exit() logger.info('dictionary keys are '+str(assign_real_dic.keys())) bool_list = [] -for real in inds:: +for real in inds: bool_list.append(assign_real_dic[real]) bool_2d = np.vstack(bool_list).transpose() logger.info('about to pack bitweights from array of shape '+str(np.shape(bool_2d))) From e99e6c46ec96f7dfa1cba829a6ce021cabafbbbf Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 1 Apr 2024 14:11:04 -0400 Subject: [PATCH 242/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index aa92ad347..389902591 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -93,7 +93,7 @@ def get_all_asgn(indir): specf['TILELOCID'] = 10000*specf['TILEID'] +specf['LOCATION'] logger.info('loaded specf file '+specfo) -specfc = common.cut_specdat(specf,badfib=mainp.badfib,tsnr_min=tsnrcut,tsnr_col=tnsrcol) +specfc = common.cut_specdat(specf,badfib=mainp.badfib)#,tsnr_min=tsnrcut,tsnr_col=tnsrcol) gtl = np.unique(specfc['TILELOCID']) assign_real_dic = {} From 8f5619d39ffaf0f71bc0d301628ac359804bdf5b Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 1 Apr 2024 14:38:14 -0400 Subject: [PATCH 243/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index 389902591..e60c16278 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -4,6 +4,7 @@ from astropy.table import Table import LSS.common_tools as common +import LSS.cattools as ct from LSS.globals import main from LSS.bitweights import pack_bitweights @@ -78,23 +79,30 @@ def get_all_asgn(indir): badfib = mainp.badfib -wd = mt['SURVEY'] == 'main' -wd &= mt['ZDONE'] == 'true' -wd &= mt['FAPRGRM'] == pdir -wd &=mt['ZDATE'] < 20220900 #Y1 cutoff +#wd = mt['SURVEY'] == 'main' +#wd &= mt['ZDONE'] == 'true' +#wd &= mt['FAPRGRM'] == pdir +#wd &=mt['ZDATE'] < 20220900 #Y1 cutoff -mtld = mt[wd] -ldirspec = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/' -specfo = ldirspec+'datcomb_'+pdir+'_spec_zdone.fits' -logger.info('loading specf file '+specfo) -specf = Table(fitsio.read(specfo)) -sel = np.isin(specf['TILEID'],mtld['TILEID']) -specf = specf[sel] -specf['TILELOCID'] = 10000*specf['TILEID'] +specf['LOCATION'] +#mtld = mt[wd] +#ldirspec = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/' +#specfo = ldirspec+'datcomb_'+pdir+'_spec_zdone.fits' +#logger.info('loading specf file '+specfo) +#specf = Table(fitsio.read(specfo)) +#sel = np.isin(specf['TILEID'],mtld['TILEID']) +#specf = specf[sel] +#specf['TILELOCID'] = 10000*specf['TILEID'] +specf['LOCATION'] -logger.info('loaded specf file '+specfo) -specfc = common.cut_specdat(specf,badfib=mainp.badfib)#,tsnr_min=tsnrcut,tsnr_col=tnsrcol) -gtl = np.unique(specfc['TILELOCID']) +#logger.info('loaded specf file '+specfo) +#specfc = common.cut_specdat(specf,badfib=mainp.badfib)#,tsnr_min=tsnrcut,tsnr_col=tnsrcol) +#gtl = np.unique(specfc['TILELOCID']) + +specdat = ct.get_specdat(ldirspec,pdir,'iron',badfib= main('LRG', 'iron', survey='Y1').badfib) +tlocid = 10000*specdat['TILEID'] +specdat['LOCATION'] +gtl = np.unique(tlocid)#np.unique(specdat['TILELOCID']) + + +logger.info('good goodhardware list') assign_real_dic = {} testl = [] From 66827bd627a5f6fadef8ac334f6f3e56ce2a674d Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 1 Apr 2024 14:39:27 -0400 Subject: [PATCH 244/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index e60c16278..6edd3a5da 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -4,7 +4,7 @@ from astropy.table import Table import LSS.common_tools as common -import LSS.cattools as ct +import LSS.main.cattools as ct from LSS.globals import main from LSS.bitweights import pack_bitweights From 943a056cf53316036c191c9246c2d42527f8f39a Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 1 Apr 2024 14:41:59 -0400 Subject: [PATCH 245/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index 6edd3a5da..4749f7e5b 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -85,7 +85,7 @@ def get_all_asgn(indir): #wd &=mt['ZDATE'] < 20220900 #Y1 cutoff #mtld = mt[wd] -#ldirspec = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/' +ldirspec = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/' #specfo = ldirspec+'datcomb_'+pdir+'_spec_zdone.fits' #logger.info('loading specf file '+specfo) #specf = Table(fitsio.read(specfo)) From 6ca26a82a3029fa643cd667d4b85746e05616285 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 1 Apr 2024 15:00:54 -0400 Subject: [PATCH 246/297] Update comp_amtl_asgn.py --- scripts/comp_amtl_asgn.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/comp_amtl_asgn.py b/scripts/comp_amtl_asgn.py index 4749f7e5b..921e2d58e 100644 --- a/scripts/comp_amtl_asgn.py +++ b/scripts/comp_amtl_asgn.py @@ -151,6 +151,7 @@ def get_good_real(dic,real_num): probl = np.zeros(len(alltids)) for real in inds: + logger.info('number of assignments in realization '+str(real)+' '+str(np.sum(assign_real_dic[real]))) probl += assign_real_dic[real]*1. probl = probl/64 From d105f9eb0239dcfad4107f8e6e2148fdf6b7af5e Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 1 Apr 2024 18:08:51 -0400 Subject: [PATCH 247/297] Update QSO_LSScat_pipe.sh --- scripts/QSO_LSScat_pipe.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/QSO_LSScat_pipe.sh b/scripts/QSO_LSScat_pipe.sh index 973131440..261b1c214 100755 --- a/scripts/QSO_LSScat_pipe.sh +++ b/scripts/QSO_LSScat_pipe.sh @@ -34,5 +34,5 @@ PYTHONPATH=$PYTHONPATH:$LSSCODE/LSS/py python scripts/main/mkCat_main.py --type $TRACER --version $1 --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --regressis y --add_regressis y --survey Y1 --verspec iron --imsys_zbin y -srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type $TRACER --notqso y --fulld n --survey Y1 --verspec iron --version $1 --clusd y --clusran y --splitGC y --nz y --par y --imsys_colname WEIGHT_SN --basedir /global/cfs/cdirs/desi/survey/catalogs/ +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type $TRACER --fulld n --survey Y1 --verspec iron --version $1 --clusd y --clusran y --splitGC y --nz y --par y --imsys_colname WEIGHT_RF--basedir /global/cfs/cdirs/desi/survey/catalogs/ From 4be3a822361917702cee3d375db41493a7210a86 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 1 Apr 2024 19:27:25 -0400 Subject: [PATCH 248/297] Create get_DR1_bitweights.py --- scripts/get_DR1_bitweights.py | 186 ++++++++++++++++++++++++++++++++++ 1 file changed, 186 insertions(+) create mode 100644 scripts/get_DR1_bitweights.py diff --git a/scripts/get_DR1_bitweights.py b/scripts/get_DR1_bitweights.py new file mode 100644 index 000000000..90c6bdadb --- /dev/null +++ b/scripts/get_DR1_bitweights.py @@ -0,0 +1,186 @@ +import numpy as np +import fitsio +import glob +from astropy.table import Table + +import LSS.common_tools as common +from LSS.globals import main +from LSS.bitweights import pack_bitweights + +import logging + +import argparse +parser = argparse.ArgumentParser() +parser.add_argument("--prog", choices=['DARK','BRIGHT']) +parser.add_argument("--amtl_version",default='JL_Y1Run2') +parser.add_argument("--cat_version",default='test') +parser.add_argument("--nreal",default=128,dtype=int) +args = parser.parse_args() + + +# create logger +logname = 'bitweights' +logger = logging.getLogger(logname) +logger.setLevel(logging.INFO) + +# create console handler and set level to debug +ch = logging.StreamHandler() +ch.setLevel(logging.INFO) + +# create formatter +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + +# add formatter to ch +ch.setFormatter(formatter) + +# add ch to logger +logger.addHandler(ch) + +logger.info('script is starting') +#just start with the mock 1 v3_1 altmtl as an example + +lssdir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/LSScats/'+args.cat_version+'/' + +if args.prog == 'BRIGHT': + alltids = fitsio.read(lssdir+tp+'BGS_ANY_full_noveto.dat.fits',columns=['TARGETID']) + alltids = np.unique(alltids['TARGETID']) + + +if args.prog == 'DARK': + tpl = ['LRG','QSO','ELG_LOPnotqso'] + tidl = [] + for tp in tpl: + f = fitsio.read(lssdir+tp+'_full_noveto.dat.fits',columns=['TARGETID']) + tidl.append(f) + alltids = np.concatenate(tidl) + alltids = np.unique(alltids['TARGETID']) + +logger.info(str(len(alltids))+ ' TARGETID will get their number of assignments tracked') + +def removeLeadingZeros(num): + + # traverse the entire string + for i in range(len(num)): + + # check for the first non-zero character + if num[i] != '0': + # return the remaining string + res = num[i::]; + return res; + + # If the entire string is traversed + # that means it didn't have a single + # non-zero character, hence return "0" + return "0"; + +def get_all_asgn(indir): + fls = glob.glob(indir+'/*/fba*.fits') + assign_list = [] + for fl in fls: + asgn = Table(fitsio.read(fl,columns=['FIBER', 'TARGETID', 'LOCATION'])) + sp = fl.split('-') + tid = int(removeLeadingZeros(sp[-1].strip('.fits'))) + #print(tid) + asgn['TILEID'] = tid + sel = asgn['TARGETID'] > 0 + assign_list.append(asgn[sel]) + all_asgn = np.concatenate(assign_list) + return all_asgn + +#get the list of good tilelocid + +mainp = main('LRG','iron') +pdir = 'dark' +mt = mainp.mtld +tiles = mainp.tiles + + +tsnrcut = mainp.tsnrcut +dchi2 = mainp.dchi2 +tnsrcol = mainp.tsnrcol +badfib = mainp.badfib + + +wd = mt['SURVEY'] == 'main' +wd &= mt['ZDONE'] == 'true' +wd &= mt['FAPRGRM'] == pdir +wd &=mt['ZDATE'] < 20220900 #Y1 cutoff + +mtld = mt[wd] +ldirspec = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/' +specfo = ldirspec+'datcomb_'+pdir+'_spec_zdone.fits' +logger.info('loading specf file '+specfo) +specf = Table(fitsio.read(specfo)) +sel = np.isin(specf['TILEID'],mtld['TILEID']) +specf = specf[sel] +specf['TILELOCID'] = 10000*specf['TILEID'] +specf['LOCATION'] + +logger.info('loaded specf file '+specfo) +specfc = common.cut_specdat(specf,badfib=mainp.badfib)#,tsnr_min=tsnrcut,tsnr_col=tnsrcol) +gtl = np.unique(specfc['TILELOCID']) + +assign_real_dic = {} + + + +def get_good_real(dic,real_num): + indir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/'+args.amtl_ver+args.prog+'/Univ'+str(real_num).zfill(3)+'/fa/MAIN' + all_asgn = get_all_asgn(indir) + asgn_tloc = 10000*all_asgn['TILEID'] +all_asgn['LOCATION'] + good_asgn = np.isin(asgn_tloc,gtl) + good_tids = all_asgn['TARGETID'][good_asgn] + asgn_real = np.isin(alltids,good_tids) + assign_real_dic[real_num] = asgn_real + del asgn_real + +from multiprocessing import Pool +Nreal = args.nreal +inds = np.arange(0,Nreal) +#pool = sharedmem.MapReduce() +logger.info('about to get '+str(Nreal)+' realizations in parallel') +#with Pool() as pool: +# #pool.map(get_good_real,inds) +# pool.map(test,inds) + +from multiprocessing import Process, Manager +manager = Manager() +assign_real_dic = manager.dict() +job = [Process(target=get_good_real, args=(assign_real_dic, i)) for i in inds] +_ = [p.start() for p in job] +_ = [p.join() for p in job] + +logger.info('got all realizations') +#logger.info('dictionary keys are '+str(d.keys())) +import sys +#sys.exit() +logger.info('dictionary keys are '+str(assign_real_dic.keys())) +bool_list = [] +for real in inds: + bool_list.append(assign_real_dic[real]) +bool_2d = np.vstack(bool_list).transpose() +logger.info('about to pack bitweights from array of shape '+str(np.shape(bool_2d))) +bitweights = pack_bitweights(bool_2d) + +probl = np.zeros(len(alltids)) +for real in inds: + probl += assign_real_dic[real]*1. +probl = probl/64 + +outf = lssdir+args.prog+'_bitweights.fits' +out_tab = Table() +out_tab['TARGETID'] = alltids +out_tab['BITWEIGHTS'] = bitweights +out_tab['PROB_OBS'] = probl + +common.write_LSS(out_tab,outf) + +#h = np.histogram(probl) +#print(h) +#for i in range(0,len(alltids)): +# nt = 0 +# for real in inds: +# nt += assign_real_dic[real][i] +# prob = nt/Nreal +# probl[i] = prob +# if i%1e5 == 0: +# logger.info(str(i)+' '+str(prob)) \ No newline at end of file From f1c2561995db5dbc8584e17190d09069e9ed5977 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 1 Apr 2024 19:32:19 -0400 Subject: [PATCH 249/297] Update get_DR1_bitweights.py --- scripts/get_DR1_bitweights.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/get_DR1_bitweights.py b/scripts/get_DR1_bitweights.py index 90c6bdadb..d5b3c1876 100644 --- a/scripts/get_DR1_bitweights.py +++ b/scripts/get_DR1_bitweights.py @@ -14,7 +14,7 @@ parser.add_argument("--prog", choices=['DARK','BRIGHT']) parser.add_argument("--amtl_version",default='JL_Y1Run2') parser.add_argument("--cat_version",default='test') -parser.add_argument("--nreal",default=128,dtype=int) +parser.add_argument("--nreal",default=128,type=int) args = parser.parse_args() From d0124be75077221b6fc0224285dbbaf49d18a57f Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 1 Apr 2024 19:42:45 -0400 Subject: [PATCH 250/297] Update get_DR1_bitweights.py --- scripts/get_DR1_bitweights.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/get_DR1_bitweights.py b/scripts/get_DR1_bitweights.py index d5b3c1876..e817aadbb 100644 --- a/scripts/get_DR1_bitweights.py +++ b/scripts/get_DR1_bitweights.py @@ -124,7 +124,7 @@ def get_all_asgn(indir): def get_good_real(dic,real_num): - indir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/'+args.amtl_ver+args.prog+'/Univ'+str(real_num).zfill(3)+'/fa/MAIN' + indir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/altmtl/'+args.amtl_version+args.prog+'/Univ'+str(real_num).zfill(3)+'/fa/MAIN' all_asgn = get_all_asgn(indir) asgn_tloc = 10000*all_asgn['TILEID'] +all_asgn['LOCATION'] good_asgn = np.isin(asgn_tloc,gtl) From 092f98082c5df8928ce0400cf18529a4433996d5 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 1 Apr 2024 19:50:16 -0400 Subject: [PATCH 251/297] Create BGS_BRIGHT_LSScat_pipe.sh --- scripts/BGS_BRIGHT_LSScat_pipe.sh | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100755 scripts/BGS_BRIGHT_LSScat_pipe.sh diff --git a/scripts/BGS_BRIGHT_LSScat_pipe.sh b/scripts/BGS_BRIGHT_LSScat_pipe.sh new file mode 100755 index 000000000..768e9f718 --- /dev/null +++ b/scripts/BGS_BRIGHT_LSScat_pipe.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +set -e + +source /global/common/software/desi/desi_environment.sh main +export LSSCODE=$HOME +PYTHONPATH=$PYTHONPATH:$LSSCODE/LSS/py + +TRACER='BGS_BRIGHT' + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld y --verspec iron --survey Y1 --version $1 + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --type $TRACER --combwspec n --fullr y --survey Y1 --maxr 18 --version $1 + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --survey Y1 --fillran y --apply_veto y --maxr 18 --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --verspec iron --survey Y1 --mkHPmaps y --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --apply_veto y --verspec iron --survey Y1 --maxr 0 --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --survey Y1 --add_tl y --maxr 18 --par n --version $1 + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --apply_map_veto y --verspec iron --survey Y1 --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --verspec iron --add_weight_zfail y --survey Y1 --use_map_veto _HPmapcut --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --swap20211212 y --verspec iron --survey Y1 --version $1 + + +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type $TRACER --fulld n --survey Y1 --verspec iron --version $1 --clusd y --clusran y --splitGC y --nz y --par y --imsys_colname WEIGHT_RF--basedir /global/cfs/cdirs/desi/survey/catalogs/ + From 86e9c0a0659fe6d7a126e8a4717704eda08597d0 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 1 Apr 2024 20:03:35 -0400 Subject: [PATCH 252/297] Update get_DR1_bitweights.py --- scripts/get_DR1_bitweights.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/get_DR1_bitweights.py b/scripts/get_DR1_bitweights.py index e817aadbb..dd8b12732 100644 --- a/scripts/get_DR1_bitweights.py +++ b/scripts/get_DR1_bitweights.py @@ -42,7 +42,7 @@ lssdir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/LSScats/'+args.cat_version+'/' if args.prog == 'BRIGHT': - alltids = fitsio.read(lssdir+tp+'BGS_ANY_full_noveto.dat.fits',columns=['TARGETID']) + alltids = fitsio.read(lssdir+'BGS_ANY_full_noveto.dat.fits',columns=['TARGETID']) alltids = np.unique(alltids['TARGETID']) From 8c5e5b427e062072c2c8633781d3556050992393 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 1 Apr 2024 20:14:03 -0400 Subject: [PATCH 253/297] Create BGS_ANY_LSScat_pipe.sh --- scripts/BGS_ANY_LSScat_pipe.sh | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100755 scripts/BGS_ANY_LSScat_pipe.sh diff --git a/scripts/BGS_ANY_LSScat_pipe.sh b/scripts/BGS_ANY_LSScat_pipe.sh new file mode 100755 index 000000000..36c300a45 --- /dev/null +++ b/scripts/BGS_ANY_LSScat_pipe.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +set -e + +source /global/common/software/desi/desi_environment.sh main +export LSSCODE=$HOME +PYTHONPATH=$PYTHONPATH:$LSSCODE/LSS/py + +TRACER='BGS_ANY' + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld y --verspec iron --survey Y1 --version $1 + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --type $TRACER --combwspec n --fullr y --survey Y1 --maxr 18 --version $1 + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --survey Y1 --fillran y --apply_veto y --maxr 18 --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --verspec iron --survey Y1 --mkHPmaps y --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --apply_veto y --verspec iron --survey Y1 --maxr 0 --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main_ran.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --survey Y1 --add_tl y --maxr 18 --par n --version $1 + +srun -N 1 -C cpu -t 04:00:00 -q interactive python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --apply_map_veto y --verspec iron --survey Y1 --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --verspec iron --add_weight_zfail y --survey Y1 --use_map_veto _HPmapcut --version $1 + +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --swap20211212 y --verspec iron --survey Y1 --version $1 + + +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type $TRACER --fulld n --survey Y1 --verspec iron --version $1 --clusd y --clusran y --splitGC y --nz y --par y --imsys_colname WEIGHT_RF--basedir /global/cfs/cdirs/desi/survey/catalogs/ + From dbed6fdf4ae37e5a9d8a12489aac6b854789c496 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Mon, 1 Apr 2024 20:21:15 -0400 Subject: [PATCH 254/297] Update get_DR1_bitweights.py --- scripts/get_DR1_bitweights.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/get_DR1_bitweights.py b/scripts/get_DR1_bitweights.py index dd8b12732..40f32ba30 100644 --- a/scripts/get_DR1_bitweights.py +++ b/scripts/get_DR1_bitweights.py @@ -116,7 +116,7 @@ def get_all_asgn(indir): specf['TILELOCID'] = 10000*specf['TILEID'] +specf['LOCATION'] logger.info('loaded specf file '+specfo) -specfc = common.cut_specdat(specf,badfib=mainp.badfib)#,tsnr_min=tsnrcut,tsnr_col=tnsrcol) +specfc = common.cut_specdat(specf,badfib=mainp.badfib,tsnr_min=tsnrcut,tsnr_col=tnsrcol) gtl = np.unique(specfc['TILELOCID']) assign_real_dic = {} @@ -164,7 +164,7 @@ def get_good_real(dic,real_num): probl = np.zeros(len(alltids)) for real in inds: probl += assign_real_dic[real]*1. -probl = probl/64 +probl = probl/Nreal outf = lssdir+args.prog+'_bitweights.fits' out_tab = Table() From 0a4dd300cfecd5e69abe8198dbb14381002061a3 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 2 Apr 2024 11:57:43 -0400 Subject: [PATCH 255/297] LSScat pipelines --- scripts/BGS_ANY_LSScat_pipe.sh | 2 +- scripts/BGS_BRIGHT-21.5_LSScat_pipe.sh | 18 ++++++++++++++++++ scripts/BGS_BRIGHT_LSScat_pipe.sh | 2 +- 3 files changed, 20 insertions(+), 2 deletions(-) create mode 100755 scripts/BGS_BRIGHT-21.5_LSScat_pipe.sh diff --git a/scripts/BGS_ANY_LSScat_pipe.sh b/scripts/BGS_ANY_LSScat_pipe.sh index 36c300a45..118aad306 100755 --- a/scripts/BGS_ANY_LSScat_pipe.sh +++ b/scripts/BGS_ANY_LSScat_pipe.sh @@ -27,5 +27,5 @@ python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --swap20211212 y --verspec iron --survey Y1 --version $1 -srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type $TRACER --fulld n --survey Y1 --verspec iron --version $1 --clusd y --clusran y --splitGC y --nz y --par y --imsys_colname WEIGHT_RF--basedir /global/cfs/cdirs/desi/survey/catalogs/ +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type $TRACER --fulld n --survey Y1 --verspec iron --version $1 --clusd y --clusran y --splitGC y --nz y --par y --basedir /global/cfs/cdirs/desi/survey/catalogs/ diff --git a/scripts/BGS_BRIGHT-21.5_LSScat_pipe.sh b/scripts/BGS_BRIGHT-21.5_LSScat_pipe.sh new file mode 100755 index 000000000..3b3d5dedb --- /dev/null +++ b/scripts/BGS_BRIGHT-21.5_LSScat_pipe.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -e + +source /global/common/software/desi/desi_environment.sh main +export LSSCODE=$HOME +PYTHONPATH=$PYTHONPATH:$LSSCODE/LSS/py + +python $LSSCODE/LSSscripts/main/mkCat_main.py --type BGS_BRIGHT --verspec iron --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --add_fs y --survey Y1 --version $1 + +python $LSSCODE/LSSscripts/main/mkCat_main.py --type BGS_BRIGHT-21.5 --verspec iron --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --survey Y1 --version $1 + +python $LSSCODE/LSSscripts/main/mkCat_main.py --type BGS_BRIGHT-21.5 --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --imsys y --survey Y1 --verspec iron --imsys_zbin y --version $1 --use_map_veto _HPmapcut + +python $LSSCODE/LSS/scripts/validation/validation_improp_full.py --tracers BGS_BRIGHT-21.5 --version $1 --weight_col WEIGHT_IMLIN + +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type BGS_BRIGHT-21.5 --fulld n --survey Y1 --verspec iron --version $1 --clusd y --clusran y --splitGC y --nz y --par y --basedir /global/cfs/cdirs/desi/survey/catalogs/ + diff --git a/scripts/BGS_BRIGHT_LSScat_pipe.sh b/scripts/BGS_BRIGHT_LSScat_pipe.sh index 768e9f718..01b0a4683 100755 --- a/scripts/BGS_BRIGHT_LSScat_pipe.sh +++ b/scripts/BGS_BRIGHT_LSScat_pipe.sh @@ -27,5 +27,5 @@ python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global python $LSSCODE/LSS/scripts/main/mkCat_main.py --type $TRACER --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --swap20211212 y --verspec iron --survey Y1 --version $1 -srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type $TRACER --fulld n --survey Y1 --verspec iron --version $1 --clusd y --clusran y --splitGC y --nz y --par y --imsys_colname WEIGHT_RF--basedir /global/cfs/cdirs/desi/survey/catalogs/ +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type $TRACER --fulld n --survey Y1 --verspec iron --version $1 --clusd y --clusran y --splitGC y --nz y --par y --basedir /global/cfs/cdirs/desi/survey/catalogs/ From 7c9f5144cdb00d58d6fe0a42e1945702e77edcfb Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 2 Apr 2024 11:59:07 -0400 Subject: [PATCH 256/297] Update BGS_BRIGHT-21.5_LSScat_pipe.sh --- scripts/BGS_BRIGHT-21.5_LSScat_pipe.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/BGS_BRIGHT-21.5_LSScat_pipe.sh b/scripts/BGS_BRIGHT-21.5_LSScat_pipe.sh index 3b3d5dedb..0f72620d5 100755 --- a/scripts/BGS_BRIGHT-21.5_LSScat_pipe.sh +++ b/scripts/BGS_BRIGHT-21.5_LSScat_pipe.sh @@ -6,13 +6,13 @@ source /global/common/software/desi/desi_environment.sh main export LSSCODE=$HOME PYTHONPATH=$PYTHONPATH:$LSSCODE/LSS/py -python $LSSCODE/LSSscripts/main/mkCat_main.py --type BGS_BRIGHT --verspec iron --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --add_fs y --survey Y1 --version $1 +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type BGS_BRIGHT --verspec iron --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --add_fs y --survey Y1 --version $1 -python $LSSCODE/LSSscripts/main/mkCat_main.py --type BGS_BRIGHT-21.5 --verspec iron --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --survey Y1 --version $1 +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type BGS_BRIGHT-21.5 --verspec iron --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --survey Y1 --version $1 -python $LSSCODE/LSSscripts/main/mkCat_main.py --type BGS_BRIGHT-21.5 --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --imsys y --survey Y1 --verspec iron --imsys_zbin y --version $1 --use_map_veto _HPmapcut +python $LSSCODE/LSS/scripts/main/mkCat_main.py --type BGS_BRIGHT-21.5 --basedir /global/cfs/cdirs/desi/survey/catalogs/ --fulld n --imsys y --survey Y1 --verspec iron --imsys_zbin y --version $1 --use_map_veto _HPmapcut python $LSSCODE/LSS/scripts/validation/validation_improp_full.py --tracers BGS_BRIGHT-21.5 --version $1 --weight_col WEIGHT_IMLIN -srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python scripts/main/mkCat_main.py --type BGS_BRIGHT-21.5 --fulld n --survey Y1 --verspec iron --version $1 --clusd y --clusran y --splitGC y --nz y --par y --basedir /global/cfs/cdirs/desi/survey/catalogs/ +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python $LSSCODE/LSS/cripts/main/mkCat_main.py --type BGS_BRIGHT-21.5 --fulld n --survey Y1 --verspec iron --version $1 --clusd y --clusran y --splitGC y --nz y --par y --basedir /global/cfs/cdirs/desi/survey/catalogs/ From c7b9b3330b6997a7cd703d41533784ad073fc860 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 2 Apr 2024 12:03:31 -0400 Subject: [PATCH 257/297] Update LSSpipe_Y1.txt --- Sandbox/LSSpipe_Y1.txt | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Sandbox/LSSpipe_Y1.txt b/Sandbox/LSSpipe_Y1.txt index 2d3945689..896aeac1c 100644 --- a/Sandbox/LSSpipe_Y1.txt +++ b/Sandbox/LSSpipe_Y1.txt @@ -23,6 +23,13 @@ python $LSSCODE/LSS/scripts/main/combdata_main.py --basedir /global/cfs/cdirs/de #to combine the emission line files: python $LSSCODE/LSS/scripts/main/combdata_main.py --basedir /global/cfs/cdirs/desi/survey/catalogs/ --verspec iron --prog dark --mkemlin y --dospec n --combpix n --survey Y1 +#!!!pipelines that run everything for v1, except for the addition of bitweights, starting from here, are +scripts/LRG_LSScat_pipe.sh +scripts/QSO_LSScat_pipe.sh +scripts/ELG_LSScat_pipe.sh +scripts/BGS_BRIGHT_LSScat_pipe.sh +scripts/BGS_BRIGHT-21.5_LSScat_pipe.sh #needs to be after BGS_BRIGHT + #the below script is run for this list of target types + notqso combinations in order to generate the "full" LSS catalogs, pre veto masks #in this step, only unique targetid are kept, prioritizing those with an observation and then those with the greatest tsnr2 From 6fe53d94313fec34bcba89f0428f8ee971acec22 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 2 Apr 2024 14:56:51 -0400 Subject: [PATCH 258/297] Update BGS_BRIGHT-21.5_LSScat_pipe.sh --- scripts/BGS_BRIGHT-21.5_LSScat_pipe.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/BGS_BRIGHT-21.5_LSScat_pipe.sh b/scripts/BGS_BRIGHT-21.5_LSScat_pipe.sh index 0f72620d5..01a126600 100755 --- a/scripts/BGS_BRIGHT-21.5_LSScat_pipe.sh +++ b/scripts/BGS_BRIGHT-21.5_LSScat_pipe.sh @@ -2,7 +2,7 @@ set -e -source /global/common/software/desi/desi_environment.sh main +source /global/common/software/desi/users/adematti/cosmodesi_environment.sh main export LSSCODE=$HOME PYTHONPATH=$PYTHONPATH:$LSSCODE/LSS/py @@ -14,5 +14,5 @@ python $LSSCODE/LSS/scripts/main/mkCat_main.py --type BGS_BRIGHT-21.5 --basedir python $LSSCODE/LSS/scripts/validation/validation_improp_full.py --tracers BGS_BRIGHT-21.5 --version $1 --weight_col WEIGHT_IMLIN -srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python $LSSCODE/LSS/cripts/main/mkCat_main.py --type BGS_BRIGHT-21.5 --fulld n --survey Y1 --verspec iron --version $1 --clusd y --clusran y --splitGC y --nz y --par y --basedir /global/cfs/cdirs/desi/survey/catalogs/ +srun -N 1 -C cpu -t 04:00:00 --qos interactive --account desi python $LSSCODE/LSS/scripts/main/mkCat_main.py --type BGS_BRIGHT-21.5 --fulld n --survey Y1 --verspec iron --version $1 --clusd y --clusran y --splitGC y --nz y --par y --basedir /global/cfs/cdirs/desi/survey/catalogs/ From 8b2a3a1269f95b6b1f6ad5d8e950c9b410dd0a14 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 2 Apr 2024 14:56:56 -0400 Subject: [PATCH 259/297] Create add_DR1_bitweights_all.py --- scripts/add_DR1_bitweights_all.py | 45 +++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 scripts/add_DR1_bitweights_all.py diff --git a/scripts/add_DR1_bitweights_all.py b/scripts/add_DR1_bitweights_all.py new file mode 100644 index 000000000..bded044d5 --- /dev/null +++ b/scripts/add_DR1_bitweights_all.py @@ -0,0 +1,45 @@ +import numpy as np +import fitsio +import glob +from astropy.table import Table,join + +import LSS.common_tools as common +from LSS.globals import main +from LSS.bitweights import pack_bitweights + +import logging + +import argparse +parser = argparse.ArgumentParser() +parser.add_argument("--prog", choices=['DARK','BRIGHT'],default='DARK') +parser.add_argument("--cat_version",default='test') +args = parser.parse_args() + + + +lssdir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/LSScats/'+args.cat_version+'/' + +if args.prog == 'BRIGHT': + sys.exit('needs to be written') + #alltids = fitsio.read(lssdir+'BGS_ANY_full_noveto.dat.fits',columns=['TARGETID']) + #alltids = np.unique(alltids['TARGETID']) + + +if args.prog == 'DARK': + tpl = ['LRG','QSO','ELG_LOPnotqso'] + +bitf = fitsio.read(lssdir+args.prog+'_bitweights.fits') +fl = ['full_noveto','full','full_HPmapcut','clustering','NGC_clustering','SGC_clustering'] +for tp in tpl: + for ft in fl: + inflnm = lssdir+tp+'_'+ft+'.dat.fits' + infl = fitsio.read(lssdir+tp+'_'+ft+'.dat.fits') + li = len(infl) + infl = join(infl,bitf,keys=['TARGETID'],join_type='left') + lij = len(infl) + if li == lij: + common.write(infl,infnm) + else: + print('mismatch after join!') + print(tp,li,lij) + From f1b592a72c528d1ed9d527a80d079a664c2ecdfa Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 2 Apr 2024 14:59:36 -0400 Subject: [PATCH 260/297] Update add_DR1_bitweights_all.py --- scripts/add_DR1_bitweights_all.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/add_DR1_bitweights_all.py b/scripts/add_DR1_bitweights_all.py index bded044d5..942825305 100644 --- a/scripts/add_DR1_bitweights_all.py +++ b/scripts/add_DR1_bitweights_all.py @@ -38,7 +38,7 @@ infl = join(infl,bitf,keys=['TARGETID'],join_type='left') lij = len(infl) if li == lij: - common.write(infl,infnm) + common.write_LSS(infl,infnm) else: print('mismatch after join!') print(tp,li,lij) From dedab7cf7180d90810605716950f03d7b0505e29 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 2 Apr 2024 15:01:22 -0400 Subject: [PATCH 261/297] Update add_DR1_bitweights_all.py --- scripts/add_DR1_bitweights_all.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/add_DR1_bitweights_all.py b/scripts/add_DR1_bitweights_all.py index 942825305..9a9ab7df6 100644 --- a/scripts/add_DR1_bitweights_all.py +++ b/scripts/add_DR1_bitweights_all.py @@ -38,7 +38,7 @@ infl = join(infl,bitf,keys=['TARGETID'],join_type='left') lij = len(infl) if li == lij: - common.write_LSS(infl,infnm) + common.write_LSS(infl,inflnm) else: print('mismatch after join!') print(tp,li,lij) From 57754b10fd12c18f565f7eca37cb50f0a5427639 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 2 Apr 2024 15:20:29 -0400 Subject: [PATCH 262/297] Update add_DR1_bitweights_all.py --- scripts/add_DR1_bitweights_all.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/add_DR1_bitweights_all.py b/scripts/add_DR1_bitweights_all.py index 9a9ab7df6..99055a99e 100644 --- a/scripts/add_DR1_bitweights_all.py +++ b/scripts/add_DR1_bitweights_all.py @@ -13,6 +13,7 @@ parser = argparse.ArgumentParser() parser.add_argument("--prog", choices=['DARK','BRIGHT'],default='DARK') parser.add_argument("--cat_version",default='test') +parser.add_argument("--tracers",default='all') args = parser.parse_args() @@ -26,7 +27,10 @@ if args.prog == 'DARK': - tpl = ['LRG','QSO','ELG_LOPnotqso'] + if args.tracers == 'all': + tpl = ['LRG','QSO','ELG_LOPnotqso'] + else: + tpl = [args.tracers] bitf = fitsio.read(lssdir+args.prog+'_bitweights.fits') fl = ['full_noveto','full','full_HPmapcut','clustering','NGC_clustering','SGC_clustering'] From b0f4e3fb89a83ddc2ec97545e7ab5441a08c20c7 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 2 Apr 2024 17:57:12 -0400 Subject: [PATCH 263/297] Update patch_HPmapcut.py --- scripts/main/patch_HPmapcut.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/scripts/main/patch_HPmapcut.py b/scripts/main/patch_HPmapcut.py index 4c6726883..6a343617a 100644 --- a/scripts/main/patch_HPmapcut.py +++ b/scripts/main/patch_HPmapcut.py @@ -30,16 +30,16 @@ tps = [args.tracers] if args.tracers == 'all': - tps = ['ELG_LOPnotqso','QSO','LRG']#'BGS_BRIGHT' + tps = ['QSO','LRG','ELG_LOPnotqso']#'BGS_BRIGHT' for tp in tps: mainp = main(tp,args.verspec,survey=args.survey) df_cutdisk = Table(fitsio.read(indir+tp+'_full_HPmapcut.dat.fits')) df_cutdisk_cols = list(df_cutdisk.dtype.names) - if 'BITWEIGHTS' in df_cutdisk_cols: - df_cutdisk.remove_columns(['BITWEIGHTS','PROB_OBS']) - if 'BITWEIGHTS_1' in df_cutdisk_cols: - df_cutdisk.remove_columns(['BITWEIGHTS_1','PROB_OBS_1','BITWEIGHTS_2','PROB_OBS_2']) + #if 'BITWEIGHTS' in df_cutdisk_cols: + # df_cutdisk.remove_columns(['BITWEIGHTS','PROB_OBS']) + #if 'BITWEIGHTS_1' in df_cutdisk_cols: + # df_cutdisk.remove_columns(['BITWEIGHTS_1','PROB_OBS_1','BITWEIGHTS_2','PROB_OBS_2']) df_cutdisk_cols = list(df_cutdisk.dtype.names) df = Table(fitsio.read(indir+tp+'_full.dat.fits')) df_cols = list(df.dtype.names) @@ -65,11 +65,11 @@ print(df_cutdisk.dtype.names,df_cutnomatch.dtype.names) df_comb = vstack((df_cutdisk,df_cutnomatch)) print(tp,len(df_comb),len(np.unique(df_comb['TARGETID']))) - if tp[:3] != 'BGS': - bitf = fitsio.read(mainp.darkbitweightfile) - else: - bitf = fitsio.read(mainp.brightbitweightfile) - df_comb = join(df_comb,bitf,keys=['TARGETID'],join_type='left') + #if tp[:3] != 'BGS': + # bitf = fitsio.read(mainp.darkbitweightfile) + #else: + # bitf = fitsio.read(mainp.brightbitweightfile) + #df_comb = join(df_comb,bitf,keys=['TARGETID'],join_type='left') print(len(df_comb['TARGETID'])) print(df_comb.dtype.names) common.write_LSS(df_comb,indir+tp+'_full_HPmapcut.dat.fits') From 6b77b2a4089cec19a3b09d37a1cf55f4c691db2c Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 2 Apr 2024 19:12:50 -0400 Subject: [PATCH 264/297] Update cosmodesi_io_tools.py --- py/LSS/cosmodesi_io_tools.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index 99dcffda3..e40af6826 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -254,7 +254,8 @@ def get_clustering_positions_weights(catalog, distance, zlim=(0., np.inf),maglim if name == 'data' and 'bitwise' in weight_type: if 'default' in weight_type: - weights /= catalog['WEIGHT_COMP'][mask] + #weights /= catalog['WEIGHT_COMP'][mask] + weights = catalog['WEIGHT_SYS']*catalog['WEIGHT_ZFAIL'] print('dividing weights by WEIGHT_COMP') weights = _format_bitweights(catalog['BITWEIGHTS'][mask]) + [weights] @@ -409,6 +410,11 @@ def read_positions_weights(name): p, w = get_full_positions_weights(catalog, name=name, weight_type=weight_type, fibered=fibered, region=reg, weight_attrs=weight_attrs) positions.append(p) weights.append(w) + if fibered: + logger.info('loaded fibered full for '+name ) + else: + logger.info('loaded parent full for '+name) + logger.info(str(len(p))+' entries') return positions, weights if isinstance(name, (tuple, list)): From 0abaa87865674e973b3332bd4a1d8d93b7d03e8d Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 2 Apr 2024 19:15:16 -0400 Subject: [PATCH 265/297] Update cosmodesi_io_tools.py --- py/LSS/cosmodesi_io_tools.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index e40af6826..ec59bcab9 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -259,12 +259,13 @@ def get_clustering_positions_weights(catalog, distance, zlim=(0., np.inf),maglim print('dividing weights by WEIGHT_COMP') weights = _format_bitweights(catalog['BITWEIGHTS'][mask]) + [weights] - if name == 'randoms': + #if name == 'randoms': #if 'default' in weight_type: # weights *= catalog['WEIGHT'][mask] - if 'bitwise' in weight_type and 'default' in weight_type: - weights /= catalog['FRAC_TLOBS_TILES'][mask] - print('dividing weights by FRAC_TLOBS_TILES') + #if 'bitwise' in weight_type and 'default' in weight_type: + + #weights /= catalog['FRAC_TLOBS_TILES'][mask] + #print('dividing weights by FRAC_TLOBS_TILES') # if 'RF' in weight_type: # weights *= catalog['WEIGHT_RF'][mask]*catalog['WEIGHT_COMP'][mask] # if 'zfail' in weight_type: From de22a1028fef252cfee553a32972f1d9eda58e50 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 2 Apr 2024 19:18:18 -0400 Subject: [PATCH 266/297] Update cosmodesi_io_tools.py --- py/LSS/cosmodesi_io_tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index ec59bcab9..ef1a65d2d 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -255,7 +255,7 @@ def get_clustering_positions_weights(catalog, distance, zlim=(0., np.inf),maglim if name == 'data' and 'bitwise' in weight_type: if 'default' in weight_type: #weights /= catalog['WEIGHT_COMP'][mask] - weights = catalog['WEIGHT_SYS']*catalog['WEIGHT_ZFAIL'] + weights = catalog['WEIGHT_SYS'][mask]*catalog['WEIGHT_ZFAIL'][mask] print('dividing weights by WEIGHT_COMP') weights = _format_bitweights(catalog['BITWEIGHTS'][mask]) + [weights] From 58b6cd0669105df206a33b4b85c83328936f7d8e Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 2 Apr 2024 19:29:02 -0400 Subject: [PATCH 267/297] Update cosmodesi_io_tools.py --- py/LSS/cosmodesi_io_tools.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index ef1a65d2d..6703672e6 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -259,11 +259,11 @@ def get_clustering_positions_weights(catalog, distance, zlim=(0., np.inf),maglim print('dividing weights by WEIGHT_COMP') weights = _format_bitweights(catalog['BITWEIGHTS'][mask]) + [weights] - #if name == 'randoms': + if name == 'randoms': #if 'default' in weight_type: # weights *= catalog['WEIGHT'][mask] - #if 'bitwise' in weight_type and 'default' in weight_type: - + if 'bitwise' in weight_type and 'default' in weight_type: + weights = catalog['WEIGHT_SYS'][mask]*catalog['WEIGHT_ZFAIL'][mask] #weights /= catalog['FRAC_TLOBS_TILES'][mask] #print('dividing weights by FRAC_TLOBS_TILES') # if 'RF' in weight_type: From d4c60e00b5cc9f17f6b5ca5d9fea60a7a22a0175 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 2 Apr 2024 19:40:58 -0400 Subject: [PATCH 268/297] Update cosmodesi_io_tools.py --- py/LSS/cosmodesi_io_tools.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index 6703672e6..d1c7ca681 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -256,14 +256,14 @@ def get_clustering_positions_weights(catalog, distance, zlim=(0., np.inf),maglim if 'default' in weight_type: #weights /= catalog['WEIGHT_COMP'][mask] weights = catalog['WEIGHT_SYS'][mask]*catalog['WEIGHT_ZFAIL'][mask] - print('dividing weights by WEIGHT_COMP') + #print('dividing weights by WEIGHT_COMP') weights = _format_bitweights(catalog['BITWEIGHTS'][mask]) + [weights] if name == 'randoms': #if 'default' in weight_type: # weights *= catalog['WEIGHT'][mask] if 'bitwise' in weight_type and 'default' in weight_type: - weights = catalog['WEIGHT_SYS'][mask]*catalog['WEIGHT_ZFAIL'][mask] + weights = np.ones_like(positions[0])#catalog['WEIGHT_SYS'][mask]*catalog['WEIGHT_ZFAIL'][mask] #weights /= catalog['FRAC_TLOBS_TILES'][mask] #print('dividing weights by FRAC_TLOBS_TILES') # if 'RF' in weight_type: From ea3203f0ee8ff40198c8ef3b6c9fc2257d63c2af Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 2 Apr 2024 19:42:27 -0400 Subject: [PATCH 269/297] Update xirunpc.py --- scripts/xirunpc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/xirunpc.py b/scripts/xirunpc.py index 29828bbd3..c45845a83 100644 --- a/scripts/xirunpc.py +++ b/scripts/xirunpc.py @@ -540,7 +540,7 @@ def get_edges(corr_type='smu', bin_type='lin'): if bin_type == 'lin': edges = (sedges, np.linspace(-40., 40, 101)) #transverse and radial separations are coded to be the same here else: - edges = (sedges, np.linspace(0., 40., 41)) + edges = (sedges, np.linspace(-40., 40., 81)) elif corr_type == 'theta': edges = (np.linspace(0., 4., 101),) else: From a0f19b06a8b21db9a68419e7d1b712ad394ee851 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 2 Apr 2024 19:47:11 -0400 Subject: [PATCH 270/297] Update cosmodesi_io_tools.py --- py/LSS/cosmodesi_io_tools.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index d1c7ca681..e9644a9f2 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -258,12 +258,20 @@ def get_clustering_positions_weights(catalog, distance, zlim=(0., np.inf),maglim weights = catalog['WEIGHT_SYS'][mask]*catalog['WEIGHT_ZFAIL'][mask] #print('dividing weights by WEIGHT_COMP') weights = _format_bitweights(catalog['BITWEIGHTS'][mask]) + [weights] + if name == 'data' and 'IIP' in weight_type: + weights = 129/(1+128*catalog['PROB_OBS'][mask]) + if 'default' in weight_type: + #weights /= catalog['WEIGHT_COMP'][mask] + weights *= catalog['WEIGHT_SYS'][mask]*catalog['WEIGHT_ZFAIL'][mask] if name == 'randoms': #if 'default' in weight_type: # weights *= catalog['WEIGHT'][mask] if 'bitwise' in weight_type and 'default' in weight_type: weights = np.ones_like(positions[0])#catalog['WEIGHT_SYS'][mask]*catalog['WEIGHT_ZFAIL'][mask] + if 'IIP' in weight_type and 'default' in weight_type: + weights = np.ones_like(positions[0])#catalog['WEIGHT_SYS'][mask]*catalog['WEIGHT_ZFAIL'][mask] + logger.info('all random weights set to 1') #weights /= catalog['FRAC_TLOBS_TILES'][mask] #print('dividing weights by FRAC_TLOBS_TILES') # if 'RF' in weight_type: From 6870058b3eaba4e566dda3d0edadc9690822a014 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 2 Apr 2024 20:27:01 -0400 Subject: [PATCH 271/297] Update cosmodesi_io_tools.py --- py/LSS/cosmodesi_io_tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index e9644a9f2..6585750b6 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -255,7 +255,7 @@ def get_clustering_positions_weights(catalog, distance, zlim=(0., np.inf),maglim if name == 'data' and 'bitwise' in weight_type: if 'default' in weight_type: #weights /= catalog['WEIGHT_COMP'][mask] - weights = catalog['WEIGHT_SYS'][mask]*catalog['WEIGHT_ZFAIL'][mask] + weights = catalog['WEIGHT_SYS'][mask]*catalog['WEIGHT_ZFAIL'][mask]*129/(1+128*catalog['PROB_OBS'][mask]) #print('dividing weights by WEIGHT_COMP') weights = _format_bitweights(catalog['BITWEIGHTS'][mask]) + [weights] if name == 'data' and 'IIP' in weight_type: From 75d57504725998a22f1944fdb69ecd40f0650e49 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 2 Apr 2024 20:32:51 -0400 Subject: [PATCH 272/297] Update cosmodesi_io_tools.py --- py/LSS/cosmodesi_io_tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index 6585750b6..af8f409f3 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -255,7 +255,7 @@ def get_clustering_positions_weights(catalog, distance, zlim=(0., np.inf),maglim if name == 'data' and 'bitwise' in weight_type: if 'default' in weight_type: #weights /= catalog['WEIGHT_COMP'][mask] - weights = catalog['WEIGHT_SYS'][mask]*catalog['WEIGHT_ZFAIL'][mask]*129/(1+128*catalog['PROB_OBS'][mask]) + weights = catalog['WEIGHT_SYS'][mask]*catalog['WEIGHT_ZFAIL'][mask]/(129/(1+128*catalog['PROB_OBS'][mask])) #print('dividing weights by WEIGHT_COMP') weights = _format_bitweights(catalog['BITWEIGHTS'][mask]) + [weights] if name == 'data' and 'IIP' in weight_type: From 4e811bb697379b59b0373fba64e394f635ed9d37 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Tue, 2 Apr 2024 20:34:30 -0400 Subject: [PATCH 273/297] Update cosmodesi_io_tools.py --- py/LSS/cosmodesi_io_tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py/LSS/cosmodesi_io_tools.py b/py/LSS/cosmodesi_io_tools.py index af8f409f3..7d540b926 100755 --- a/py/LSS/cosmodesi_io_tools.py +++ b/py/LSS/cosmodesi_io_tools.py @@ -255,7 +255,7 @@ def get_clustering_positions_weights(catalog, distance, zlim=(0., np.inf),maglim if name == 'data' and 'bitwise' in weight_type: if 'default' in weight_type: #weights /= catalog['WEIGHT_COMP'][mask] - weights = catalog['WEIGHT_SYS'][mask]*catalog['WEIGHT_ZFAIL'][mask]/(129/(1+128*catalog['PROB_OBS'][mask])) + weights = catalog['WEIGHT_SYS'][mask]*catalog['WEIGHT_ZFAIL'][mask]#/(129/(1+128*catalog['PROB_OBS'][mask])) #print('dividing weights by WEIGHT_COMP') weights = _format_bitweights(catalog['BITWEIGHTS'][mask]) + [weights] if name == 'data' and 'IIP' in weight_type: From 61dcab5de302cacc0387dcdf6dfa0c7fa706d6c7 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 3 Apr 2024 12:41:40 -0400 Subject: [PATCH 274/297] Update add_DR1_bitweights_all.py --- scripts/add_DR1_bitweights_all.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/scripts/add_DR1_bitweights_all.py b/scripts/add_DR1_bitweights_all.py index 99055a99e..847981890 100644 --- a/scripts/add_DR1_bitweights_all.py +++ b/scripts/add_DR1_bitweights_all.py @@ -32,10 +32,20 @@ else: tpl = [args.tracers] +if args.prog == 'BRIGHT': + if args.tracers == 'all': + tpl = ['BGS_BRIGHT-21.5','BGS_BRIGHT','BGS_ANY'] + else: + tpl = [args.tracers] + + bitf = fitsio.read(lssdir+args.prog+'_bitweights.fits') fl = ['full_noveto','full','full_HPmapcut','clustering','NGC_clustering','SGC_clustering'] for tp in tpl: - for ft in fl: + fll = fl + if tp == 'BGS_BRIGHT-21.5': + fll = ['clustering','NGC_clustering','SGC_clustering'] #full catalogs don't really make sense for abs mag cut because they require a redshift + for ft in fll: inflnm = lssdir+tp+'_'+ft+'.dat.fits' infl = fitsio.read(lssdir+tp+'_'+ft+'.dat.fits') li = len(infl) From 35121c53951e12679153a717e8be695ccfdb82d2 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 3 Apr 2024 12:43:10 -0400 Subject: [PATCH 275/297] Update add_DR1_bitweights_all.py --- scripts/add_DR1_bitweights_all.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/scripts/add_DR1_bitweights_all.py b/scripts/add_DR1_bitweights_all.py index 847981890..0c7568799 100644 --- a/scripts/add_DR1_bitweights_all.py +++ b/scripts/add_DR1_bitweights_all.py @@ -20,11 +20,6 @@ lssdir = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/LSScats/'+args.cat_version+'/' -if args.prog == 'BRIGHT': - sys.exit('needs to be written') - #alltids = fitsio.read(lssdir+'BGS_ANY_full_noveto.dat.fits',columns=['TARGETID']) - #alltids = np.unique(alltids['TARGETID']) - if args.prog == 'DARK': if args.tracers == 'all': From ab6b7e4099effbaf33c1ee6598cc5148783c59eb Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 3 Apr 2024 14:53:22 -0400 Subject: [PATCH 276/297] Update get_DR1_bitweights.py --- scripts/get_DR1_bitweights.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/scripts/get_DR1_bitweights.py b/scripts/get_DR1_bitweights.py index 40f32ba30..49a1e3148 100644 --- a/scripts/get_DR1_bitweights.py +++ b/scripts/get_DR1_bitweights.py @@ -89,8 +89,13 @@ def get_all_asgn(indir): #get the list of good tilelocid -mainp = main('LRG','iron') -pdir = 'dark' +if args.prog == 'DARK': + mainp = main('LRG','iron') + pdir = 'dark' +else: + mainp = main('BGS','iron') + pdir = 'bright' + mt = mainp.mtld tiles = mainp.tiles From 90ee526ff36a32a32ee721cd0ea0c1fbc93f699f Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 3 Apr 2024 15:27:49 -0400 Subject: [PATCH 277/297] Update add_DR1_bitweights_all.py --- scripts/add_DR1_bitweights_all.py | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/scripts/add_DR1_bitweights_all.py b/scripts/add_DR1_bitweights_all.py index 0c7568799..5e6c14a5f 100644 --- a/scripts/add_DR1_bitweights_all.py +++ b/scripts/add_DR1_bitweights_all.py @@ -14,6 +14,7 @@ parser.add_argument("--prog", choices=['DARK','BRIGHT'],default='DARK') parser.add_argument("--cat_version",default='test') parser.add_argument("--tracers",default='all') +parser.add_argument("--replace",default='y') args = parser.parse_args() @@ -42,13 +43,23 @@ fll = ['clustering','NGC_clustering','SGC_clustering'] #full catalogs don't really make sense for abs mag cut because they require a redshift for ft in fll: inflnm = lssdir+tp+'_'+ft+'.dat.fits' - infl = fitsio.read(lssdir+tp+'_'+ft+'.dat.fits') - li = len(infl) - infl = join(infl,bitf,keys=['TARGETID'],join_type='left') - lij = len(infl) - if li == lij: - common.write_LSS(infl,inflnm) - else: - print('mismatch after join!') - print(tp,li,lij) + infl = Table(fitsio.read(lssdir+tp+'_'+ft+'.dat.fits')) + cols = list(infl.dtype.names) + dojoin = 'y' + if 'PROB_OBS' in cols + if args.replace == 'y': + print('removing columns before adding info back') + infl.remove_columns(['PROB_OBS','BITWEIGHTS']) + else: + dojoin = 'n' + print('PROB_OBS is in original and replace is set to n, so just moving to next file') + if dojoin == 'y': + li = len(infl) + infl = join(infl,bitf,keys=['TARGETID'],join_type='left') + lij = len(infl) + if li == lij: + common.write_LSS(infl,inflnm) + else: + print('mismatch after join!') + print(tp,li,lij) From 2dd5baa853aba4f875cd7b161b714fdec6a896ed Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 3 Apr 2024 15:28:25 -0400 Subject: [PATCH 278/297] Update add_DR1_bitweights_all.py --- scripts/add_DR1_bitweights_all.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/add_DR1_bitweights_all.py b/scripts/add_DR1_bitweights_all.py index 5e6c14a5f..8b3a3b37b 100644 --- a/scripts/add_DR1_bitweights_all.py +++ b/scripts/add_DR1_bitweights_all.py @@ -46,7 +46,7 @@ infl = Table(fitsio.read(lssdir+tp+'_'+ft+'.dat.fits')) cols = list(infl.dtype.names) dojoin = 'y' - if 'PROB_OBS' in cols + if 'PROB_OBS' in cols: if args.replace == 'y': print('removing columns before adding info back') infl.remove_columns(['PROB_OBS','BITWEIGHTS']) From f5475950502ffb275455b2b3fcf4c1d7dfee2e6d Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Wed, 3 Apr 2024 15:28:52 -0400 Subject: [PATCH 279/297] Update add_DR1_bitweights_all.py --- scripts/add_DR1_bitweights_all.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/scripts/add_DR1_bitweights_all.py b/scripts/add_DR1_bitweights_all.py index 8b3a3b37b..24e5e1e4e 100644 --- a/scripts/add_DR1_bitweights_all.py +++ b/scripts/add_DR1_bitweights_all.py @@ -54,12 +54,12 @@ dojoin = 'n' print('PROB_OBS is in original and replace is set to n, so just moving to next file') if dojoin == 'y': - li = len(infl) - infl = join(infl,bitf,keys=['TARGETID'],join_type='left') - lij = len(infl) - if li == lij: - common.write_LSS(infl,inflnm) - else: - print('mismatch after join!') - print(tp,li,lij) + li = len(infl) + infl = join(infl,bitf,keys=['TARGETID'],join_type='left') + lij = len(infl) + if li == lij: + common.write_LSS(infl,inflnm) + else: + print('mismatch after join!') + print(tp,li,lij) From 4217b4b5ff4a13786699e2659bf1fec37f26b91f Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 4 Apr 2024 11:42:02 -0400 Subject: [PATCH 280/297] Update readwrite_pixel_bitmasknobs.py --- scripts/readwrite_pixel_bitmasknobs.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/scripts/readwrite_pixel_bitmasknobs.py b/scripts/readwrite_pixel_bitmasknobs.py index 3a2f23b2f..b280ecea2 100644 --- a/scripts/readwrite_pixel_bitmasknobs.py +++ b/scripts/readwrite_pixel_bitmasknobs.py @@ -28,7 +28,7 @@ parser.add_argument('--mock_number', default = 0, required = False) parser.add_argument('--outdir', default = '', required=False ) parser.add_argument('--overwrite', default = 'n', required=False ) -parser.add_argument('--n_processes', default = 32, required=False ,type=int) +parser.add_argument('--n_processes', default = 128, required=False ,type=int) args = parser.parse_args() @@ -183,7 +183,10 @@ def wrapper(bid_index): res.remove_column('idx') #if output_path.endswith('.fits'): -res.write(output_path,overwrite=True) +ow = False +if args.overwrite == 'y': + ow = True +res.write(output_path,overwrite=ow) #else: # np.write(output_path, np.array(res['masknobs'])) From 7680f47beb48a5a996ab89001d6aa28d51a38215 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 4 Apr 2024 11:46:38 -0400 Subject: [PATCH 281/297] Update readwrite_pixel_bitmasknobs.py --- scripts/readwrite_pixel_bitmasknobs.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/readwrite_pixel_bitmasknobs.py b/scripts/readwrite_pixel_bitmasknobs.py index b280ecea2..ca1a10366 100644 --- a/scripts/readwrite_pixel_bitmasknobs.py +++ b/scripts/readwrite_pixel_bitmasknobs.py @@ -28,6 +28,7 @@ parser.add_argument('--mock_number', default = 0, required = False) parser.add_argument('--outdir', default = '', required=False ) parser.add_argument('--overwrite', default = 'n', required=False ) +parser.add_argument('--test', default = 'n', required=False ) parser.add_argument('--n_processes', default = 128, required=False ,type=int) args = parser.parse_args() @@ -70,8 +71,10 @@ # output_path = '/global/cscratch1/sd/rongpu/temp/randoms-1-0-lrgmask_v1.fits' if os.path.isfile(output_path): - if args.overwrite == 'n': + if args.overwrite == 'n' and args.test == 'n': raise ValueError(output_path+' already exists!') + if args.overwrite == 'n' and args.test == 'y': + print('will run and get timing, but no output will be written (because path exists)') if args.overwrite == 'y': print('will overwrite '+output_path) From 8b3eb0f07d437bc1c53ec1ab53fe19dbd7396bd2 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 4 Apr 2024 12:12:18 -0400 Subject: [PATCH 282/297] Update mkCat_main.py --- scripts/main/mkCat_main.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/main/mkCat_main.py b/scripts/main/mkCat_main.py index 9d2a69997..2d983c8d8 100644 --- a/scripts/main/mkCat_main.py +++ b/scripts/main/mkCat_main.py @@ -64,7 +64,7 @@ parser.add_argument("--fillran", help="add imaging properties to randoms",default='n') parser.add_argument("--clusd", help="make the 'clustering' catalog intended for paircounts",default='n') parser.add_argument("--clusran", help="make the random clustering files; these are cut to a small subset of columns",default='n') -parser.add_argument("--minr", help="minimum number for random files",default=0) +parser.add_argument("--minr", help="minimum number for random files",default=0,type=int) parser.add_argument("--maxr", help="maximum for random files, 18 are available (use parallel script for all)",default=18,type=int) parser.add_argument("--nz", help="get n(z) for type and all subtypes",default='n') parser.add_argument("--nzfull", help="get n(z) from full files",default='n') @@ -1077,7 +1077,7 @@ def _wrapper(N): if mkclusdat: ct.mkclusdat(dirout+type+notqso,tp=type,dchi2=dchi2,tsnrcut=tsnrcut,zmin=zmin,zmax=zmax,wsyscol=args.imsys_colname,use_map_veto=args.use_map_veto)#,ntilecut=ntile,ccut=ccut) -inds = np.arange(args.minr,args.maxr) +inds = np.arange(rm,rx) if mkclusran: print('doing clustering randoms (possibly a 2nd time to get sys columns in)') # tsnrcol = 'TSNR2_ELG' @@ -1112,7 +1112,7 @@ def _parfun_cr(ii): if args.NStoGC == 'y': fb = dirout+tracer_clus+'_' - ct.clusNStoGC(fb, args.maxr - args.minr)#,par=args.par) + ct.clusNStoGC(fb, rx - rm)#,par=args.par) if type == 'QSO': @@ -1133,7 +1133,7 @@ def _parfun_cr(ii): if type[:3] == 'BGS': P0 = 7000 -nran = args.maxr-args.minr +nran = rx-rm regions = ['NGC', 'SGC'] def splitGC(flroot,datran='.dat',rann=0): From 77549ccbd530c955082e0d7996a3d64de56694b5 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 4 Apr 2024 12:26:53 -0400 Subject: [PATCH 283/297] Update readwrite_pixel_bitmasknobs.py --- scripts/readwrite_pixel_bitmasknobs.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/scripts/readwrite_pixel_bitmasknobs.py b/scripts/readwrite_pixel_bitmasknobs.py index ca1a10366..5d7690e83 100644 --- a/scripts/readwrite_pixel_bitmasknobs.py +++ b/scripts/readwrite_pixel_bitmasknobs.py @@ -69,8 +69,9 @@ # input_path = '/global/cfs/cdirs/desi/target/catalogs/dr9/0.49.0/randoms/resolve/randoms-1-0.fits' # output_path = '/global/cscratch1/sd/rongpu/temp/randoms-1-0-lrgmask_v1.fits' - +fe = False if os.path.isfile(output_path): + fe = True if args.overwrite == 'n' and args.test == 'n': raise ValueError(output_path+' already exists!') if args.overwrite == 'n' and args.test == 'y': @@ -186,10 +187,10 @@ def wrapper(bid_index): res.remove_column('idx') #if output_path.endswith('.fits'): -ow = False -if args.overwrite == 'y': - ow = True -res.write(output_path,overwrite=ow) +#ow = False +if fe == False or args.overwrite == 'y': + #ow = True + res.write(output_path,overwrite=True) #else: # np.write(output_path, np.array(res['masknobs'])) From c22858a608cfef43ff074eb49325b4a9994f865b Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 4 Apr 2024 22:41:36 -0400 Subject: [PATCH 284/297] Update readwrite_pixel_bitmasknobs.py --- scripts/readwrite_pixel_bitmasknobs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/readwrite_pixel_bitmasknobs.py b/scripts/readwrite_pixel_bitmasknobs.py index 5d7690e83..4a94aab9e 100644 --- a/scripts/readwrite_pixel_bitmasknobs.py +++ b/scripts/readwrite_pixel_bitmasknobs.py @@ -43,8 +43,8 @@ if args.do_randoms == 'n': fa_num = args.abacus_fa_num str_fa_num = str(fa_num) - input_dir = "/global/cfs/cdirs/desi/survey/catalogs/main/mocks/FirstGenMocks/AbacusSummit/" - input_path = '/global/cfs/cdirs/desi/survey/catalogs/main/mocks/FirstGenMocks/AbacusSummit/forFA' + str_fa_num + '.fits' + input_dir = "/global/cfs/cdirs/desi/survey/catalogs/main/mocks/FirstGenMocks/AbacusSummit_v4_1/" + input_path = '/global/cfs/cdirs/desi/survey/catalogs/main/mocks/FirstGenMocks/AbacusSummit_v4_1/forFA' + str_fa_num + '.fits' if args.outdir != '': output_path = args.outdir + "/" + "forFA" + str_fa_num + "_matched_input_full_masknobs.fits" elif args.outdir == '': From 7d99acbd47e2b76820596c98453d387ca6262489 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 4 Apr 2024 22:46:50 -0400 Subject: [PATCH 285/297] Update readwrite_pixel_bitmasknobs.py --- scripts/readwrite_pixel_bitmasknobs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/readwrite_pixel_bitmasknobs.py b/scripts/readwrite_pixel_bitmasknobs.py index 4a94aab9e..a9f20a7fd 100644 --- a/scripts/readwrite_pixel_bitmasknobs.py +++ b/scripts/readwrite_pixel_bitmasknobs.py @@ -43,8 +43,8 @@ if args.do_randoms == 'n': fa_num = args.abacus_fa_num str_fa_num = str(fa_num) - input_dir = "/global/cfs/cdirs/desi/survey/catalogs/main/mocks/FirstGenMocks/AbacusSummit_v4_1/" - input_path = '/global/cfs/cdirs/desi/survey/catalogs/main/mocks/FirstGenMocks/AbacusSummit_v4_1/forFA' + str_fa_num + '.fits' + input_dir = "/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/" + input_path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/forFA' + str_fa_num + '.fits' if args.outdir != '': output_path = args.outdir + "/" + "forFA" + str_fa_num + "_matched_input_full_masknobs.fits" elif args.outdir == '': From da7c88e77a3d0a27645cc4c6b50119a791621f49 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 4 Apr 2024 23:27:58 -0400 Subject: [PATCH 286/297] Create readwrite_pixel_bitmasknobs_mpi.py --- scripts/readwrite_pixel_bitmasknobs_mpi.py | 154 +++++++++++++++++++++ 1 file changed, 154 insertions(+) create mode 100644 scripts/readwrite_pixel_bitmasknobs_mpi.py diff --git a/scripts/readwrite_pixel_bitmasknobs_mpi.py b/scripts/readwrite_pixel_bitmasknobs_mpi.py new file mode 100644 index 000000000..371016ec6 --- /dev/null +++ b/scripts/readwrite_pixel_bitmasknobs_mpi.py @@ -0,0 +1,154 @@ +# Get bitmask values from pixel-level per-brick masks for a catalog +# Examples: +# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmaskmasknobs.py +from __future__ import division, print_function +import sys, os, glob, time, warnings, gc +import numpy as np +import matplotlib.pyplot as plt +from astropy.table import Table, vstack, hstack, join +import fitsio + +from astropy.io import fits +from astropy import wcs + +from multiprocessing import Pool +import argparse + +from mockfactory.desi import get_brick_pixel_quantities + + +import os +import logging + +import fitsio +import numpy as np +from mpi4py import MPI + + +# create logger +logname = 'masknobs' +logger = logging.getLogger(logname) +logger.setLevel(logging.INFO) + +# create console handler and set level to debug +ch = logging.StreamHandler() +ch.setLevel(logging.INFO) + +# create formatter +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + +# add formatter to ch +ch.setFormatter(formatter) + +# add ch to logger +logger.addHandler(ch) + + + +#time_start = time.time() + + + +parser = argparse.ArgumentParser() +parser.add_argument( '--cat_type', default='obielg', choices=['obielg', 'abacus'],required=False) +parser.add_argument( '--reg', default='north', choices=['north','south'],required=False) +parser.add_argument('--abacus_fa_num', default = 0, required = False) +parser.add_argument('--do_randoms', default = 'n', choices = ['n','y'], required = False) +parser.add_argument('--random_tracer', default = 'LRG', required = False) +parser.add_argument('--mock_number', default = 0, required = False) +parser.add_argument('--outdir', default = '', required=False ) +parser.add_argument('--overwrite', default = 'n', required=False ) +parser.add_argument('--test', default = 'n', required=False ) + +args = parser.parse_args() + + +if args.cat_type == 'obielg': + input_path = '/global/cfs/cdirs/desi/survey/catalogs/image_simulations/ELG/dr9/Y1/'+args.reg+'/file0_rs0_skip0/merged/matched_input_full.fits' + output_path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/elg_obiwan_'+args.reg+'_matched_input_full_masknobs.fits' + +if args.cat_type == 'abacus': + if args.do_randoms == 'n': + fa_num = args.abacus_fa_num + str_fa_num = str(fa_num) + input_dir = "/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/" + input_path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/forFA' + str_fa_num + '.fits' + if args.outdir != '': + output_path = args.outdir + "/" + "forFA" + str_fa_num + "_matched_input_full_masknobs.fits" + elif args.outdir == '': + output_path = input_dir + "forFA" + str_fa_num + "_matched_input_full_masknobs.fits" + print(output_path) + + elif args.do_randoms == 'y': + ran_tr = args.random_tracer + mockno = args.mock_number + print("Running for Mock %s on Tracer %s"%(mockno, ran_tr)) + input_dir = "/global/cfs/cdirs/desi/survey/catalogs/main/mocks/FirstGenMocks/AbacusSummit/Y1/mock%s/LSScats/"%(mockno) + input_path = "/global/cfs/cdirs/desi/survey/catalogs/main/mocks/FirstGenMocks/AbacusSummit/Y1/mock%s/LSScats/%s_1_full_noveto.ran.fits"%(mockno, ran_tr) + if args.outdir != '': + output_path = args.outdir + "/" + ran_tr + "_1_full_matched_input_full_masknobs.ran.fits" + print("Output to " + output_path) + elif args.outdir == '': + output_path = input_dir + ran_tr + "_1_full_matched_input_full_masknobs.ran.fits" + print("Output to " + output_path) + + +bitmask_dir = '/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/' + +# input_path = '/global/cfs/cdirs/desi/target/catalogs/dr9/0.49.0/randoms/resolve/randoms-1-0.fits' +# output_path = '/global/cscratch1/sd/rongpu/temp/randoms-1-0-lrgmask_v1.fits' +fe = False + +mpicomm = MPI.COMM_WORLD +mpiroot = 0 + + +if mpicomm.rank == mpiroot: + if os.path.isfile(output_path): + fe = True + if args.overwrite == 'n' and args.test == 'n': + raise ValueError(output_path+' already exists!') + if args.overwrite == 'n' and args.test == 'y': + logger.info('will run and get timing, but no output will be written (because path exists)') + if args.overwrite == 'y': + logger.info('will overwrite '+output_path) + + + + if args.cat_type == 'obielg': + cat = Table(fitsio.read(input_path,columns=['input_ra','input_dec'])) + cat.rename_columns(['input_ra', 'input_dec'], ['RA', 'DEC']) + else: + cat = Table(fitsio.read(input_path)) + + logger.info('loaded catalog length '+str(len(cat))) + + for col in cat.colnames: + cat.rename_column(col, col.upper()) + + if 'TARGETID' not in cat.colnames: + cat['TARGETID'] = np.arange(len(cat)) + + if 'TARGET_RA' in cat.colnames: + cat.rename_columns(['TARGET_RA', 'TARGET_DEC'], ['RA', 'DEC']) + + if 'BRICKID' not in cat.colnames: + from desiutil import brick + tmp = brick.Bricks(bricksize=0.25) + cat['BRICKID'] = tmp.brickid(cat['RA'], cat['DEC']) + + ra, dec = cat['RA'], cat['DEC'] + +columns['maskbits'] = {'fn': '/dvs_ro/cfs/cdirs/cosmo/data/legacysurvey/dr9/{region}/coadd/{brickname:.3s}/{brickname}/legacysurvey-{brickname}-maskbits.fits.fz', 'dtype': 'i2', 'default': 1} +bl = ['g','r','z'] +for band in bl: + columns['nobs_'+band] = {'fn': '/dvs_ro/cfs/cdirs/cosmo/data/legacysurvey/dr9/{region}/coadd/{brickname:.3s}/{brickname}/legacysurvey-{brickname}-nexp-'+band+'.fits.fz', 'dtype': 'i2', 'default': 0} +columns['brickname'] = None +columns['photsys'] = None +catalog = get_brick_pixel_quantities(ra, dec, columns, mpicomm=mpicomm) +if mpicomm.rank == 0: + logger.info('Output columns are {}.'.format(list(catalog.keys()))) + logger.info('Pixel-level quantities read in {:2.2f} s.'.format(MPI.Wtime() - start)) + + +logger.info('Done!') From 04efdad7c8c476762f47c321cd73165dff41eb9d Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Thu, 4 Apr 2024 23:30:15 -0400 Subject: [PATCH 287/297] Update readwrite_pixel_bitmasknobs_mpi.py --- scripts/readwrite_pixel_bitmasknobs_mpi.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/readwrite_pixel_bitmasknobs_mpi.py b/scripts/readwrite_pixel_bitmasknobs_mpi.py index 371016ec6..bd44d91ae 100644 --- a/scripts/readwrite_pixel_bitmasknobs_mpi.py +++ b/scripts/readwrite_pixel_bitmasknobs_mpi.py @@ -139,6 +139,8 @@ ra, dec = cat['RA'], cat['DEC'] +start = MPI.Wtime() +columns = {} columns['maskbits'] = {'fn': '/dvs_ro/cfs/cdirs/cosmo/data/legacysurvey/dr9/{region}/coadd/{brickname:.3s}/{brickname}/legacysurvey-{brickname}-maskbits.fits.fz', 'dtype': 'i2', 'default': 1} bl = ['g','r','z'] for band in bl: From 26ad701f7ae51965c49bb86a62cbca1c44ac4458 Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Thu, 4 Apr 2024 09:11:44 -0700 Subject: [PATCH 288/297] some lines to bitweights mock --- bin/MakeBitweights_mock.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/bin/MakeBitweights_mock.py b/bin/MakeBitweights_mock.py index cce25002b..02bbfed40 100755 --- a/bin/MakeBitweights_mock.py +++ b/bin/MakeBitweights_mock.py @@ -58,15 +58,17 @@ -from LSS.globals import main -mainp = main('All', 'iron', survey='Y1') -specf = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/datcomb_dark_spec_zdone.fits' -with fitsio.FITS(specf.replace('global', 'dvs_ro')) as hdulist: - fs = hdulist[1].read() #specf.replace('global', 'dvs_ro')) -fs = common.cut_specdat(fs, mainp.badfib) -fs = Table(fs) -fs['TILELOCID'] = 10000*fs['TILEID'] +fs['LOCATION'] -gtl = np.unique(fs['TILELOCID']) +#from LSS.globals import main +#mainp = main('All', 'iron', survey='Y1') +#specf = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/iron/datcomb_dark_spec_zdone.fits' +#with fitsio.FITS(specf.replace('global', 'dvs_ro')) as hdulist: +# fs = hdulist[1].read() #specf.replace('global', 'dvs_ro')) +#fs = common.cut_specdat(fs, mainp.badfib) +#fs = Table(fs) +#fs['TILELOCID'] = 10000*fs['TILEID'] +fs['LOCATION'] +#gtl = np.unique(fs['TILELOCID']) + +gtl = np.loadtxt('/pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/mynew_gtl.txt',unpack=True,dtype=int) def procFunc(nproc): # print(gtl) From 9053f0778442f41dfa65e1be9b3fffe999a8dc4a Mon Sep 17 00:00:00 2001 From: Aurelio Carnero Rosell Date: Fri, 5 Apr 2024 04:12:34 -0700 Subject: [PATCH 289/297] changes --- bin/Y1Bitweights128RealizationsDARK_mock.sh | 12 +- bin/dateLoopAltMTLBugFix.sh | 2 +- scripts/mock_tools/add_extra_tilesTracker.py | 8 +- scripts/mock_tools/initialize_amtl_mocks.py | 103 ++++++++++++++++++ .../run_Y1SecondGen_initialledger_batch.sh | 9 +- scripts/mock_tools/run_mtl_ledger.py | 37 ------- 6 files changed, 118 insertions(+), 53 deletions(-) create mode 100644 scripts/mock_tools/initialize_amtl_mocks.py delete mode 100644 scripts/mock_tools/run_mtl_ledger.py diff --git a/bin/Y1Bitweights128RealizationsDARK_mock.sh b/bin/Y1Bitweights128RealizationsDARK_mock.sh index aaae0f827..737b8f217 100755 --- a/bin/Y1Bitweights128RealizationsDARK_mock.sh +++ b/bin/Y1Bitweights128RealizationsDARK_mock.sh @@ -320,20 +320,20 @@ exit 54321 -printf -v OFBW "%s/MakeBitweights%sOutput%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring -srun --nodes=1 -C $CVal -q $QVal -A desi -t 04:00:00 --mem=120000 $path2LSS/MakeBitweights.py --survey=$survey --obscon=$obscon --ndir=$ndir --ProcPerNode=$ProcPerNode --HPListFile=$hpListFile --outdir=$outputMTLFinalDestination $overwrite2 $verbose $debug >& $OFBW +#printf -v OFBW "%s/MakeBitweights%sOutput%sRepro%s.out" $outputMTLFinalDestination $obscon $survey $datestring +#srun --nodes=1 -C $CVal -q $QVal -A desi -t 04:00:00 --mem=120000 $path2LSS/MakeBitweights.py --survey=$survey --obscon=$obscon --ndir=$ndir --ProcPerNode=$ProcPerNode --HPListFile=$hpListFile --outdir=$outputMTLFinalDestination $overwrite2 $verbose $debug >& $OFBW -endBW=`date +%s.%N` +#endBW=`date +%s.%N` runtimeInit=$( echo "$endInit - $start" | bc -l ) runtimeDateLoop=$( echo "$endDL - $endInit" | bc -l ) -runtimeBitweights=$( echo "$endBW - $endDL" | bc -l ) +#runtimeBitweights=$( echo "$endBW - $endDL" | bc -l ) echo "runtime for initialization" echo $runtimeInit echo "runtime for Dateloop of $NObsDates days" echo $runtimeDateLoop -echo "runtime for making bitweights" -echo $runtimeBitweights +#echo "runtime for making bitweights" +#echo $runtimeBitweights diff --git a/bin/dateLoopAltMTLBugFix.sh b/bin/dateLoopAltMTLBugFix.sh index 3502ff8fb..abfdc2455 100755 --- a/bin/dateLoopAltMTLBugFix.sh +++ b/bin/dateLoopAltMTLBugFix.sh @@ -37,7 +37,7 @@ fi if [ $QVal = 'regular' ]; then - srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:21570852 $path2LSS/runAltMTLParallel.py $argstring + srun --nodes=$NNodes -C $CVal --qos=$QVal -A desi -t 12:00:00 --dependency=afterany:23940763 $path2LSS/runAltMTLParallel.py $argstring fi if [ $QVal = 'debug' ]; diff --git a/scripts/mock_tools/add_extra_tilesTracker.py b/scripts/mock_tools/add_extra_tilesTracker.py index 4fe9a8c80..cfda70511 100644 --- a/scripts/mock_tools/add_extra_tilesTracker.py +++ b/scripts/mock_tools/add_extra_tilesTracker.py @@ -6,9 +6,9 @@ #program = 'bright' rmin = 0 -rmax = 25 +rmax = 1 -path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/altmtl{MOCKNUM}/Univ000' +#path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4_1/altmtl{MOCKNUM}/Univ000' #path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummitBGS_v2/altmtl{MOCKNUM}/Univ000' extratiles = Table.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/aux_data/extra_{PROGRAM}.ecsv'.format(PROGRAM = program), format='ascii.ecsv') @@ -17,9 +17,9 @@ print(tileref) for i in range(rmin, rmax): - input_track = os.path.join(path, 'mainsurvey-{PRG}obscon-TileTracker.ecsv').format(MOCKNUM = i, PRG=program.upper()) + input_track = 'mainsurvey-{PRG}obscon-TileTracker.ecsv'.format(MOCKNUM = i, PRG=program.upper()) tiles = Table.read(input_track, format='ascii.ecsv') - tiles.meta['amtldir'] = path.format(MOCKNUM = i) + tiles.meta['amtldir'] = './' #path.format(MOCKNUM = i) if tiles['TILEID'][-1] != tileref: print('merging for mock', i) newtable = vstack([tiles, extratiles]) diff --git a/scripts/mock_tools/initialize_amtl_mocks.py b/scripts/mock_tools/initialize_amtl_mocks.py new file mode 100644 index 000000000..f8d04c5a0 --- /dev/null +++ b/scripts/mock_tools/initialize_amtl_mocks.py @@ -0,0 +1,103 @@ +from desitarget import mtl +import sys +import glob +import os +import errno +from LSS.SV3 import altmtltools as amtl +from astropy.table import Table, vstack + +def create_dirs(value): + if not os.path.exists(value): + try: + os.makedirs(value, 0o755) + print('made ' + value) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + + +#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run_mtl_ledger.py $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j DARK + +par=True + +arg1 = sys.argv[1].replace('global','dvs_ro') #Input mock +arg2 = sys.argv[2] #Output path +obscon = sys.argv[3] #DARK or BRIGHT + + +initledger_path = os.path.join(arg2, 'initled') + +altmtl_path = os.path.join(arg2, 'Univ000') + +''' +print('Running initial ledgers') +if par: + mtl.make_ledger(arg1, initledger_path, obscon=obscon.upper(), numproc=12) +else: + mtl.make_ledger(arg1, initledger_path, obscon=obscon.upper()) +''' + +print('Creating list of tiles to be processed by AltMTL mock production') + +path = os.path.join(initledger_path, 'main', obscon.lower()) + +ff = glob.glob(os.path.join(path, 'mtl-{obscon}-hp-*.ecsv'.format(obscon=obscon.lower()))) + + +dd=[] + +for f in ff: + dd.append(int(f.split('hp-')[-1].split('.ecsv')[0])) +tosave = ','.join(map(str, sorted(dd))) + +savepath = os.path.join(initledger_path, 'hpxlist_{obscon}.txt'.format(obscon = obscon.lower())) + +ff = open(savepath, 'w') +ff.write(tosave) +ff.close() + +print('saving list of HP ledgers in '+savepath) +path_to_altmtl = os.path.join(altmtl_path, 'main', obscon.lower()) + +print('Copying initial ledgers to altmtl directory ', path_to_altmtl) + +create_dirs(path_to_altmtl) + +os.system('cp %s %s' % (os.path.join(path,'*'), path_to_altmtl)) + +print('Creating tileTracker file and tilestatus file') + +startDateShort = 19990101 +endDate='2022-06-24T00:00:00+00:00' + +if ('T' in endDate) & ('-' in endDate): + endDateShort = int(endDate.split('T')[0].replace('-', '')) +else: + endDateShort = int(endDate) + +amtl.makeTileTracker(altmtl_path, survey = 'main', obscon = 'DARK', startDate = startDateShort, endDate = endDateShort, overwrite = True) + +ztilefile = '/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/ops/tiles-specstatus.ecsv' + +ztilefn = ztilefile.split('/')[-1] + +if not os.path.isfile(os.path.join(altmtl_path, ztilefn)): + amtl.processTileFile(ztilefile, os.path.join(altmtl_path, ztilefn), None, endDate) + + +extratiles = Table.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit_v4/aux_data/extra_{obscon}.ecsv'.format(obscon = obscon.lower()), format='ascii.ecsv') + +tileref = extratiles['TILEID'][-1] +#print(tileref) +input_track = os.path.join(altmtl_path, 'mainsurvey-{OBSCON}obscon-TileTracker.ecsv'.format(OBSCON = obscon.upper())) +tiles = Table.read(input_track, format='ascii.ecsv') +tiles.meta['amtldir'] = altmtl_path + +if tiles['TILEID'][-1] != tileref: + print('Adding extra tiles') + newtable = vstack([tiles, extratiles]) + newtable.meta = tiles.meta + newtable.write(input_track, overwrite=True) +else: + print('It has already been merged') diff --git a/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh index 935b9ddc0..5c287a939 100755 --- a/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh +++ b/scripts/mock_tools/run_Y1SecondGen_initialledger_batch.sh @@ -1,9 +1,8 @@ +OBSCON=DARK #BRIGHT + SeconGenVer=AbacusSummit_v4_1 #AbacusSummitBGS_v2 -for j in {1..24} +for j in {0..24} do -#j=0 echo $j -#echo $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled -#python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run_mtl_ledger.py $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled BRIGHT -python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run_mtl_ledger.py $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j/initled DARK +python /pscratch/sd/a/acarnero/codes/LSS/scripts/mock_tools/run_mtl_ledger.py $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/forFA$j.fits $DESI_ROOT/survey/catalogs/Y1/mocks/SecondGenMocks/$SeconGenVer/altmtl$j $OBSCON done diff --git a/scripts/mock_tools/run_mtl_ledger.py b/scripts/mock_tools/run_mtl_ledger.py deleted file mode 100644 index 2b5965780..000000000 --- a/scripts/mock_tools/run_mtl_ledger.py +++ /dev/null @@ -1,37 +0,0 @@ -from desitarget import mtl -import sys -import glob -import os - -par=True - -arg1 = sys.argv[1] #Input mock -arg2 = sys.argv[2] #Output path -obscon = sys.argv[3] #DARK or BRIGHT - -print('Running initial ledgers') -if par: - mtl.make_ledger(arg1, arg2, obscon=obscon.upper(), numproc=12) -else: - mtl.make_ledger(arg1, arg2, obscon=obscon.upper()) - - -print('Creating list of tiles to be processed by AltMTL mock production') - -path = os.path.join(arg2, 'main', obscon.lower()) - -ff = glob.glob(os.path.join(path, 'mtl-{obscon}-hp-*.ecsv'.format(obscon=obscon.lower()))) - -dd=[] - -for f in ff: - dd.append(int(f.split('hp-')[-1].split('.ecsv')[0])) -tosave = ','.join(map(str, sorted(dd))) - -savepath = os.path.join(arg2, 'hpxlist_{obscon}.txt'.format(obscon = obscon.lower())) - -ff = open(savepath, 'w') -ff.write(tosave) -ff.close() - -print('saving list of HP ledgers in '+savepath) From a45986c23b4f6f8d5aebab7fe84af7b1c8f97683 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 5 Apr 2024 11:13:58 -0400 Subject: [PATCH 290/297] Update readwrite_pixel_bitmasknobs_mpi.py --- scripts/readwrite_pixel_bitmasknobs_mpi.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/scripts/readwrite_pixel_bitmasknobs_mpi.py b/scripts/readwrite_pixel_bitmasknobs_mpi.py index bd44d91ae..3520a6358 100644 --- a/scripts/readwrite_pixel_bitmasknobs_mpi.py +++ b/scripts/readwrite_pixel_bitmasknobs_mpi.py @@ -1,6 +1,4 @@ # Get bitmask values from pixel-level per-brick masks for a catalog -# Examples: -# srun -N 1 -C haswell -c 64 -t 04:00:00 -q interactive python read_pixel_bitmaskmasknobs.py from __future__ import division, print_function import sys, os, glob, time, warnings, gc import numpy as np @@ -62,7 +60,7 @@ args = parser.parse_args() - +output_path = None if args.cat_type == 'obielg': input_path = '/global/cfs/cdirs/desi/survey/catalogs/image_simulations/ELG/dr9/Y1/'+args.reg+'/file0_rs0_skip0/merged/matched_input_full.fits' output_path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/elg_obiwan_'+args.reg+'_matched_input_full_masknobs.fits' @@ -91,7 +89,11 @@ elif args.outdir == '': output_path = input_dir + ran_tr + "_1_full_matched_input_full_masknobs.ran.fits" print("Output to " + output_path) - + +if args.cat_type == 'genran': + from mockfactory import RandomCutskyCatalog + cutsky = RandomCutskyCatalog(rarange=(0., 180.), decrange=(0, 90.), csize=3e7, seed=44, mpicomm=mpicomm) + ra, dec = cutsky['RA'], cutsky['DEC'] bitmask_dir = '/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/' @@ -102,6 +104,7 @@ mpicomm = MPI.COMM_WORLD mpiroot = 0 +ra,dec = None,None if mpicomm.rank == mpiroot: if os.path.isfile(output_path): From 1fb31d84591a178053bd54447ee5ae16e9c3c2e1 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 5 Apr 2024 11:17:58 -0400 Subject: [PATCH 291/297] Update readwrite_pixel_bitmasknobs_mpi.py --- scripts/readwrite_pixel_bitmasknobs_mpi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/readwrite_pixel_bitmasknobs_mpi.py b/scripts/readwrite_pixel_bitmasknobs_mpi.py index 3520a6358..aac16b75c 100644 --- a/scripts/readwrite_pixel_bitmasknobs_mpi.py +++ b/scripts/readwrite_pixel_bitmasknobs_mpi.py @@ -48,7 +48,7 @@ parser = argparse.ArgumentParser() -parser.add_argument( '--cat_type', default='obielg', choices=['obielg', 'abacus'],required=False) +parser.add_argument( '--cat_type', default='obielg',required=False)#, choices=['obielg', 'abacus']) parser.add_argument( '--reg', default='north', choices=['north','south'],required=False) parser.add_argument('--abacus_fa_num', default = 0, required = False) parser.add_argument('--do_randoms', default = 'n', choices = ['n','y'], required = False) From 9e160e6274fd6bb259acfe25d4d33df4dcb02aa0 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 5 Apr 2024 11:24:57 -0400 Subject: [PATCH 292/297] Update readwrite_pixel_bitmasknobs_mpi.py --- scripts/readwrite_pixel_bitmasknobs_mpi.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/scripts/readwrite_pixel_bitmasknobs_mpi.py b/scripts/readwrite_pixel_bitmasknobs_mpi.py index aac16b75c..9aba60c39 100644 --- a/scripts/readwrite_pixel_bitmasknobs_mpi.py +++ b/scripts/readwrite_pixel_bitmasknobs_mpi.py @@ -60,6 +60,8 @@ args = parser.parse_args() + + output_path = None if args.cat_type == 'obielg': input_path = '/global/cfs/cdirs/desi/survey/catalogs/image_simulations/ELG/dr9/Y1/'+args.reg+'/file0_rs0_skip0/merged/matched_input_full.fits' @@ -90,6 +92,10 @@ output_path = input_dir + ran_tr + "_1_full_matched_input_full_masknobs.ran.fits" print("Output to " + output_path) +mpicomm = MPI.COMM_WORLD +mpiroot = 0 + + if args.cat_type == 'genran': from mockfactory import RandomCutskyCatalog cutsky = RandomCutskyCatalog(rarange=(0., 180.), decrange=(0, 90.), csize=3e7, seed=44, mpicomm=mpicomm) @@ -101,8 +107,6 @@ # output_path = '/global/cscratch1/sd/rongpu/temp/randoms-1-0-lrgmask_v1.fits' fe = False -mpicomm = MPI.COMM_WORLD -mpiroot = 0 ra,dec = None,None From 28e7b5ed0c8381c50c6a4e99ce340b2230a3e4a7 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 5 Apr 2024 11:28:30 -0400 Subject: [PATCH 293/297] Update readwrite_pixel_bitmasknobs_mpi.py --- scripts/readwrite_pixel_bitmasknobs_mpi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/readwrite_pixel_bitmasknobs_mpi.py b/scripts/readwrite_pixel_bitmasknobs_mpi.py index 9aba60c39..8fc3e579b 100644 --- a/scripts/readwrite_pixel_bitmasknobs_mpi.py +++ b/scripts/readwrite_pixel_bitmasknobs_mpi.py @@ -98,7 +98,7 @@ if args.cat_type == 'genran': from mockfactory import RandomCutskyCatalog - cutsky = RandomCutskyCatalog(rarange=(0., 180.), decrange=(0, 90.), csize=3e7, seed=44, mpicomm=mpicomm) + cutsky = RandomCutskyCatalog(rarange=(0., 180.), decrange=(0, 90.), csize=int(3e7), seed=44, mpicomm=mpicomm) ra, dec = cutsky['RA'], cutsky['DEC'] bitmask_dir = '/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/' From bf6eb9b6a1c17ba2ac26ddc00537d54f298fc285 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 5 Apr 2024 11:47:44 -0400 Subject: [PATCH 294/297] Update readwrite_pixel_bitmasknobs_mpi.py --- scripts/readwrite_pixel_bitmasknobs_mpi.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/scripts/readwrite_pixel_bitmasknobs_mpi.py b/scripts/readwrite_pixel_bitmasknobs_mpi.py index 8fc3e579b..117def459 100644 --- a/scripts/readwrite_pixel_bitmasknobs_mpi.py +++ b/scripts/readwrite_pixel_bitmasknobs_mpi.py @@ -62,7 +62,7 @@ -output_path = None +#output_path = None if args.cat_type == 'obielg': input_path = '/global/cfs/cdirs/desi/survey/catalogs/image_simulations/ELG/dr9/Y1/'+args.reg+'/file0_rs0_skip0/merged/matched_input_full.fits' output_path = '/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/elg_obiwan_'+args.reg+'_matched_input_full_masknobs.fits' @@ -92,13 +92,15 @@ output_path = input_dir + ran_tr + "_1_full_matched_input_full_masknobs.ran.fits" print("Output to " + output_path) +start = MPI.Wtime() mpicomm = MPI.COMM_WORLD mpiroot = 0 if args.cat_type == 'genran': from mockfactory import RandomCutskyCatalog - cutsky = RandomCutskyCatalog(rarange=(0., 180.), decrange=(0, 90.), csize=int(3e7), seed=44, mpicomm=mpicomm) + #cutsky = RandomCutskyCatalog(rarange=(0., 180.), decrange=(0, 90.), csize=int(3e7), seed=44, mpicomm=mpicomm) + cutsky = RandomCutskyCatalog(rarange=(28., 30.), decrange=(1., 2.), csize=10000, seed=44, mpicomm=mpicomm) ra, dec = cutsky['RA'], cutsky['DEC'] bitmask_dir = '/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/' @@ -108,9 +110,9 @@ fe = False -ra,dec = None,None +#ra,dec = None,None -if mpicomm.rank == mpiroot: +if mpicomm.rank == mpiroot and args.cat_type != 'genran': if os.path.isfile(output_path): fe = True if args.overwrite == 'n' and args.test == 'n': @@ -146,7 +148,7 @@ ra, dec = cat['RA'], cat['DEC'] -start = MPI.Wtime() + columns = {} columns['maskbits'] = {'fn': '/dvs_ro/cfs/cdirs/cosmo/data/legacysurvey/dr9/{region}/coadd/{brickname:.3s}/{brickname}/legacysurvey-{brickname}-maskbits.fits.fz', 'dtype': 'i2', 'default': 1} bl = ['g','r','z'] From 8faa5fb40b4a7f8376ee70036f61c944e41937fa Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 5 Apr 2024 11:49:32 -0400 Subject: [PATCH 295/297] Update readwrite_pixel_bitmasknobs_mpi.py --- scripts/readwrite_pixel_bitmasknobs_mpi.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/readwrite_pixel_bitmasknobs_mpi.py b/scripts/readwrite_pixel_bitmasknobs_mpi.py index 117def459..a599e5182 100644 --- a/scripts/readwrite_pixel_bitmasknobs_mpi.py +++ b/scripts/readwrite_pixel_bitmasknobs_mpi.py @@ -99,8 +99,8 @@ if args.cat_type == 'genran': from mockfactory import RandomCutskyCatalog - #cutsky = RandomCutskyCatalog(rarange=(0., 180.), decrange=(0, 90.), csize=int(3e7), seed=44, mpicomm=mpicomm) - cutsky = RandomCutskyCatalog(rarange=(28., 30.), decrange=(1., 2.), csize=10000, seed=44, mpicomm=mpicomm) + cutsky = RandomCutskyCatalog(rarange=(0., 180.), decrange=(0, 90.), csize=int(3e7), seed=44, mpicomm=mpicomm) + #cutsky = RandomCutskyCatalog(rarange=(28., 30.), decrange=(1., 2.), csize=10000, seed=44, mpicomm=mpicomm) ra, dec = cutsky['RA'], cutsky['DEC'] bitmask_dir = '/global/cfs/cdirs/cosmo/data/legacysurvey/dr9/' @@ -162,4 +162,4 @@ logger.info('Pixel-level quantities read in {:2.2f} s.'.format(MPI.Wtime() - start)) -logger.info('Done!') +#logger.info('Done!') From 54fe3e018cba473c32540b18cfe8dff252c17e1b Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 5 Apr 2024 11:53:15 -0400 Subject: [PATCH 296/297] Update readwrite_pixel_bitmasknobs_mpi.py --- scripts/readwrite_pixel_bitmasknobs_mpi.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/readwrite_pixel_bitmasknobs_mpi.py b/scripts/readwrite_pixel_bitmasknobs_mpi.py index a599e5182..7c922c603 100644 --- a/scripts/readwrite_pixel_bitmasknobs_mpi.py +++ b/scripts/readwrite_pixel_bitmasknobs_mpi.py @@ -156,6 +156,8 @@ columns['nobs_'+band] = {'fn': '/dvs_ro/cfs/cdirs/cosmo/data/legacysurvey/dr9/{region}/coadd/{brickname:.3s}/{brickname}/legacysurvey-{brickname}-nexp-'+band+'.fits.fz', 'dtype': 'i2', 'default': 0} columns['brickname'] = None columns['photsys'] = None +if mpicomm.rank == 0: + logger.info('getting brick pixel quantities') catalog = get_brick_pixel_quantities(ra, dec, columns, mpicomm=mpicomm) if mpicomm.rank == 0: logger.info('Output columns are {}.'.format(list(catalog.keys()))) From 3870ace1c776205e6982c40994eb3ff3352eb280 Mon Sep 17 00:00:00 2001 From: ashleyjross Date: Fri, 5 Apr 2024 12:35:07 -0400 Subject: [PATCH 297/297] Update readwrite_pixel_bitmasknobs_mpi.py --- scripts/readwrite_pixel_bitmasknobs_mpi.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/readwrite_pixel_bitmasknobs_mpi.py b/scripts/readwrite_pixel_bitmasknobs_mpi.py index 7c922c603..3023d5366 100644 --- a/scripts/readwrite_pixel_bitmasknobs_mpi.py +++ b/scripts/readwrite_pixel_bitmasknobs_mpi.py @@ -96,6 +96,7 @@ mpicomm = MPI.COMM_WORLD mpiroot = 0 +ra,dec = [],[] if args.cat_type == 'genran': from mockfactory import RandomCutskyCatalog