From b7d0d49505c1a52108e964a0df9e1109b6ac2686 Mon Sep 17 00:00:00 2001 From: sbourke Date: Fri, 9 Sep 2016 14:13:11 +0200 Subject: [PATCH 01/19] Fix syntax error in amplitudes_losoto3.py --- scripts/amplitudes_losoto_3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/amplitudes_losoto_3.py b/scripts/amplitudes_losoto_3.py index a65482de..f631c466 100755 --- a/scripts/amplitudes_losoto_3.py +++ b/scripts/amplitudes_losoto_3.py @@ -390,7 +390,7 @@ def running_median(ampl,half_window) : for i in range(0,len(freqs_new)): - ampsoutfile.write('%s %s %s %s %s\n'%(amptab.ant[antenna_id], antenna_id,i, np.median(amp_xx[:,i], axis=0), np.median(amp_yy[:,i], axis=0) ) + ampsoutfile.write('%s %s %s %s %s\n'%(amptab.ant[antenna_id], antenna_id,i, np.median(amp_xx[:,i], axis=0), np.median(amp_yy[:,i], axis=0))) for time in range(0,len(amptab.time[:])): amps_array[antenna_id,time,:,0] = np.copy(savitzky_golay(amp_xx[time,:], 17, 2)) From 0ec769dd7dfa181869f749ab31e4de9a8e41129b Mon Sep 17 00:00:00 2001 From: Andreas Horneffer Date: Wed, 14 Sep 2016 20:49:35 +0200 Subject: [PATCH 02/19] I think we don't need the parmmap step --- Pre-Facet-Target.parset | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/Pre-Facet-Target.parset b/Pre-Facet-Target.parset index a0213d3d..4b6c4516 100644 --- a/Pre-Facet-Target.parset +++ b/Pre-Facet-Target.parset @@ -82,7 +82,7 @@ ### Stuff that you probably don't need to modify # which steps to run -pipeline.steps=[mk_inspect_dir, createmap_target, combine_data_target_map, get_ion_files, trans, parmmap, ndppp_prep_target, create_ateam_model_map, make_sourcedb_ateam, expand_sourcedb_ateam, predict_ateam, ateamcliptar, combine_target_map, sortmap_target, do_sortmap_maps, dpppconcat, check_unflagged, check_unflagged_map, sky_tar, create_target_model_map, make_sourcedb_target, expand_sourcedb_target, gsmcal_parmmap, gsmcal_solve, gsmcal_apply, h5_imp_gsmsol_map, h5imp_gsmsol, plot_gsm_phases, gsmcal_antmap, make_structurefunction, old_plot_gsmphases, createmap_plots, copy_plots, mk_results_dir, make_results_mapfile, move_results] +pipeline.steps=[mk_inspect_dir, createmap_target, combine_data_target_map, get_ion_files, trans, ndppp_prep_target, create_ateam_model_map, make_sourcedb_ateam, expand_sourcedb_ateam, predict_ateam, ateamcliptar, combine_target_map, sortmap_target, do_sortmap_maps, dpppconcat, check_unflagged, check_unflagged_map, sky_tar, create_target_model_map, make_sourcedb_target, expand_sourcedb_target, gsmcal_parmmap, gsmcal_solve, gsmcal_apply, h5_imp_gsmsol_map, h5imp_gsmsol, plot_gsm_phases, gsmcal_antmap, make_structurefunction, old_plot_gsmphases, createmap_plots, copy_plots, mk_results_dir, make_results_mapfile, move_results] # create the inspection_directory if needed mk_inspect_dir.control.kind = plugin @@ -129,20 +129,20 @@ trans.argument.ionex_server = {{ ionex_server }} trans.argument.ionex_prefix = {{ ionex_prefix }} trans.argument.ionexPath = {{ ionex_path }} -# generate mapfile with the parmDBs to be applied to the target data -parmmap.control.kind = plugin -parmmap.control.type = createMapfile -parmmap.control.method = add_suffix_to_file -parmmap.control.mapfile_in = createmap_target.output.mapfile -parmmap.control.add_suffix_to_file = /instrument_amp_clock_offset -parmmap.control.mapfile_dir = input.output.mapfile_dir -parmmap.control.filename = targetparmdb.mapfile +## generate mapfile with the parmDBs to be applied to the target data +#parmmap.control.kind = plugin +#parmmap.control.type = createMapfile +#parmmap.control.method = add_suffix_to_file +#parmmap.control.mapfile_in = createmap_target.output.mapfile +#parmmap.control.add_suffix_to_file = /instrument_amp_clock_offset +#parmmap.control.mapfile_dir = input.output.mapfile_dir +#parmmap.control.filename = targetparmdb.mapfile # run NDPPP on the target data to flag, transfer calibrator values, and average ndppp_prep_target.control.type = dppp ndppp_prep_target.control.max_per_node = {{ num_proc_per_node_limit }} ndppp_prep_target.control.error_tolerance = {{ error_tolerance }} -ndppp_prep_target.control.mapfiles_in = [createmap_target.output.mapfile, parmmap.output.mapfile] +ndppp_prep_target.control.mapfiles_in = [createmap_target.output.mapfile, trans.output.mapfile] ndppp_prep_target.control.inputkeys = [input_file, parmdb_file] ndppp_prep_target.argument.numthreads = {{ max_dppp_threads }} ndppp_prep_target.argument.msin = input_file From ea97230e85b05f0d49bf3ff333dbfc7dde2b7c19 Mon Sep 17 00:00:00 2001 From: Andreas Horneffer Date: Wed, 14 Sep 2016 20:50:47 +0200 Subject: [PATCH 03/19] First version of transfer_gains that can work with multiple files that will be concatenated --- scripts/transfer_gains_RMextract.py | 89 +++++++++++++++++++++-------- 1 file changed, 65 insertions(+), 24 deletions(-) diff --git a/scripts/transfer_gains_RMextract.py b/scripts/transfer_gains_RMextract.py index 6717266a..b867dc17 100755 --- a/scripts/transfer_gains_RMextract.py +++ b/scripts/transfer_gains_RMextract.py @@ -60,6 +60,7 @@ def make_empty_parmdb(outname): myParmdb.addDefValues("RotationMeasure",1e-6) return myParmdb + def get_COMMONROTATION_vals(MSinfo, server, prefix, ionexPath): """ Call getRM() from RMextract to get the RM values for the opbservation, @@ -81,11 +82,23 @@ def get_COMMONROTATION_vals(MSinfo, server, prefix, ionexPath): return rmdict -def main(msname, store_basename='caldata_transfer', store_directory='.', newparmdbext='-instrument_amp_clock_offset', + +######################################################################## +def main(MSfiles, store_basename='caldata_transfer', store_directory='.', newparmdbext='-instrument_amp_clock_offset', ionex_server="ftp://ftp.unibe.ch/aiub/CODE/", ionex_prefix='CODG', ionexPath="IONEXdata/"): + mslist_unfiltered = input2strlist_nomapfile(MSfiles) + + mslist = [] + for ms in mslist_unfiltered: + if os.path.isdir(ms): + mslist.append(ms) + + if len(mslist) == 0: + raise ValueError("Did not find any existing directory in input MS list!") + # name (path) for parmdb to be written - newparmDB = msname+newparmdbext + newparmDB = mslist[0]+newparmdbext # load the numpy arrays written by the previous scripts # (filenames constructed in the same way as in these scripts) @@ -101,13 +114,27 @@ def main(msname, store_basename='caldata_transfer', store_directory='.', newparm #print "amps shape:",np.shape(amps_array) #print "clock shape:",np.shape(clock_array) - #for ms in mslist: #this script works only on one MS! - msinfo = ReadMs(msname) + msinfo = ReadMs(mslist[0]) # process first MS # this is the same for all antennas starttime = msinfo.timepara['start'] endtime = msinfo.timepara['end'] - startfreqs = msinfo.msfreqvalues-msinfo.GetFreqpara('step')/2. - endfreqs = msinfo.msfreqvalues+msinfo.GetFreqpara('step')/2. + freqstep = msinfo.GetFreqpara('step') + minfreq = np.min(msinfo.msfreqvalues) + maxfreq = np.max(msinfo.msfreqvalues) + + for ms in mslist[1:]: + msinfo = ReadMs(ms) + # this is the same for all antennas + assert starttime == msinfo.timepara['start'] + assert endtime == msinfo.timepara['end'] + assert freqstep == msinfo.GetFreqpara('step') + minfreq = min(np.min(msinfo.msfreqvalues),minfreq) + maxfreq = max(np.max(msinfo.msfreqvalues),maxfreq) + + freqvalues = np.arange(minfreq, stop=(maxfreq+freqstep), step=freqstep) + startfreqs = freqvalues - freqstep/2. + endfreqs = freqvalues + freqstep/2. + ntimes = 1 nfreqs = len(startfreqs) @@ -122,12 +149,11 @@ def main(msname, store_basename='caldata_transfer', store_directory='.', newparm raise ValueError("Couldn't get RM information from RMextract! (But I don't know why.)") c = 299792458.0 - lambdaSquared = (c/msinfo.msfreqvalues)**2 - # get an array wwith the same size as rmdict['times'] but filled with rmdict['timestep'] + lambdaSquared = (c/freqvalues)**2 + # get an array with the same size as rmdict['times'] but filled with rmdict['timestep'] timesteps = np.full_like(rmdict['times'],rmdict['timestep']) # same for frequencies - freqsteps = np.full_like(msinfo.msfreqvalues,msinfo.freqpara['step']) - + freqsteps = np.full_like(freqvalues,freqstep) outDB = make_empty_parmdb(newparmDB) @@ -140,11 +166,11 @@ def main(msname, store_basename='caldata_transfer', store_directory='.', newparm amp_cal_00_all = np.median(amps_array[antenna_id,:,:,0],axis=0) amp_cal_11_all = np.median(amps_array[antenna_id,:,:,1],axis=0) # interpolate to target frequencies - amp_cal_00 = np.interp(msinfo.msfreqvalues, freqs_ampl, amp_cal_00_all) - amp_cal_11 = np.interp(msinfo.msfreqvalues, freqs_ampl, amp_cal_11_all) + amp_cal_00 = np.interp(freqvalues, freqs_ampl, amp_cal_00_all) + amp_cal_11 = np.interp(freqvalues, freqs_ampl, amp_cal_11_all) # interpolate phases phase_cal_00 = 0. - phase_cal_11 = np.interp(msinfo.msfreqvalues, freqs_phase, phases_array[:,antenna_id]) + phase_cal_11 = np.interp(freqvalues, freqs_phase, phases_array[:,antenna_id]) # convert to real and imaginary real_00 = amp_cal_00*np.cos(phase_cal_00) @@ -183,7 +209,7 @@ def main(msname, store_basename='caldata_transfer', store_directory='.', newparm outDB.addValues('Clock:'+antenna,ValueHolder) rotation_angles = np.outer(rmdict['RM'][antenna],lambdaSquared) - newValue = outDB.makeValue(values=rotation_angles, sfreq=msinfo.msfreqvalues, efreq=freqsteps, stime=rmdict['times'], etime=timesteps, asStartEnd=False) + newValue = outDB.makeValue(values=rotation_angles, sfreq=freqvalues, efreq=freqsteps, stime=rmdict['times'], etime=timesteps, asStartEnd=False) outDB.addValues('CommonRotationAngle:'+antenna,newValue) @@ -192,12 +218,32 @@ def main(msname, store_basename='caldata_transfer', store_directory='.', newparm return {'transfer_parmDB': newparmDB } +######################################################################## +def input2strlist_nomapfile(invar): + """ + from bin/download_IONEX.py + give the list of MSs from the list provided as a string + """ + str_list = None + if type(invar) is str: + if invar.startswith('[') and invar.endswith(']'): + str_list = [f.strip(' \'\"') for f in invar.strip('[]').split(',')] + else: + str_list = [invar.strip(' \'\"')] + elif type(invar) is list: + str_list = [str(f).strip(' \'\"') for f in invar] + else: + raise TypeError('input2strlist: Type '+str(type(invar))+' unknown!') + return str_list + + +######################################################################## if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='Create a parmDB with values from the calibrator and rotaton values from RMextract.') - parser.add_argument('MSfile', type=str, nargs='+', - help='One or more MSs for which the IONEX data should be downloaded.') + parser.add_argument('MSfiles', type=str, nargs='+', + help='One or more MSs for which the parmdb should be created.') parser.add_argument('--server', type=str, default='None', help='URL of the server to use. (default: None)') parser.add_argument('--prefix', type=str, default='CODG', @@ -214,12 +260,7 @@ def main(msname, store_basename='caldata_transfer', store_directory='.', newparm args = parser.parse_args() - for MS in args.MSfile: - print "Working on:", MS - #main(MS, args.basename, args.extension, server=args.server, prefix=args.prefix, ionexPath=args.ionexpath) - #msinfo = ReadMs(MS) - #newparmDB = MS+args.extension - #outDB = make_empty_parmdb(newparmDB) - #add_COMMONROTATION_vals(outDB, msinfo, args.server, args.prefix, args.ionexpath) - main(MS, store_basename=args.basename, store_directory=args.storedir, newparmdbext=args.extension, + MS = args.MSfiles + print "Working on:", MS + main(MS, store_basename=args.basename, store_directory=args.storedir, newparmdbext=args.extension, ionex_server=args.server, ionex_prefix=args.prefix, ionexPath=args.ionexpath) From 744b666aaa55278d1ef7746cb109c02ece0b54b2 Mon Sep 17 00:00:00 2001 From: Andreas Horneffer Date: Thu, 15 Sep 2016 09:47:35 +0200 Subject: [PATCH 04/19] No multithreaded BBS when running on single subbands --- Pre-Facet-Calibrator-RawSingle.parset | 1 - 1 file changed, 1 deletion(-) diff --git a/Pre-Facet-Calibrator-RawSingle.parset b/Pre-Facet-Calibrator-RawSingle.parset index 7880a43f..6747da39 100644 --- a/Pre-Facet-Calibrator-RawSingle.parset +++ b/Pre-Facet-Calibrator-RawSingle.parset @@ -96,7 +96,6 @@ calib_cal.control.type = python-calibrate-stand-alone calib_cal.control.max_per_node = {{ num_proc_per_node }} calib_cal.control.error_tolerance = {{ error_tolerance }} calib_cal.argument.force = True -calib_cal.argument.numthreads = 5 calib_cal.argument.observation = ndppp_prep_cal.output.mapfile # mapfile for the NDPPP-ed calibrator data calib_cal.argument.parset = {{ calib_cal_parset }} calib_cal.argument.catalog = {{ calibrator_skymodel }} From d64765690208f316cd954b6af90d115ccf94a2d7 Mon Sep 17 00:00:00 2001 From: Andreas Horneffer Date: Thu, 15 Sep 2016 09:48:44 +0200 Subject: [PATCH 05/19] First untested version --- Pre-Facet-Target-RawCombine.parset | 476 +++++++++++++++++++++++++++ Pre-Facet-Target-RawSingle.parset | 499 +++++++++++++++++++++++++++++ 2 files changed, 975 insertions(+) create mode 100644 Pre-Facet-Target-RawCombine.parset create mode 100644 Pre-Facet-Target-RawSingle.parset diff --git a/Pre-Facet-Target-RawCombine.parset b/Pre-Facet-Target-RawCombine.parset new file mode 100644 index 00000000..39576db7 --- /dev/null +++ b/Pre-Facet-Target-RawCombine.parset @@ -0,0 +1,476 @@ +# Pre-Facet Target Calibration Pipeline +# +# Target part of the basic Pre-Facet calibration pipeline: +# - no demixing but A-team flagging, +# - calibration transfer and averaging of target data in one go. +# - checks frequncies in MSs to group files +# - the new "error_tolerance" option requires LOFAR software version >= 2.15 +# (Comment out all lines with "error_tolerance" if you want to use an older version.) +# - using LoSoTo from the pipeline requires latest executable_args node-script +# (available in the current LOFAR trunk, revision 33969 and later) +# (The diff, to do the patching by hand, can be found in the 6th comment at: +# https://github.com/lofar-astron/prefactor/issues/4 ) +# - expects shared filesystem, that all nodes can reach all files! +# (E.g. a single workstation or compute cluster with shared filesystem +# doesn't work on multiple nodes on CEP-2 or CEP3.) + +### parameters you will need to adjust. +# parameters you will need to adjust. +# averaging for the target data +! avg_timestep = 4 # averaging step needed to average the data to 4 seconds time resolution +! avg_freqstep = 16 # averaging step needed to average the data to 4 ch/SB frequency resolution +# where to find the target data +! target_input_path = /data/scratch/username/PathToYourTargetData/ +! target_input_pattern = L*.MS +# path to the skymodel for the phase-only calibration of the target +! target_skymodel = /cep3home/username/Pre-Facet-Cal/skymodels/PleaseProvideTarget.skymodel +# download the phase-only calibration skymodel from TGSS +# "Force" : always download , "True" download if {{ target_skymodel }} does not exist , "False" : never download +! use_tgss_target = True +# how many subbands to concatenate into on frequency band (usually 10 or 12) +! num_SBs_per_group = 10 # make concatenated measurement-sets with that many subbands +# where to put the inspection plots generated by the pipeline +! inspection_directory = /media/scratch/test/username/WhereYouWantInspectionPlotsEtc/ +# where the files with the calibration values from the calibrator pipeline are +! cal_values_directory = /media/scratch/test/username/WhereYouWantToStoreTheValuesFromTheCalibrator/ +# where to put the resulting measurement sets generated by the pipeline +! results_directory = /media/scratch/test/username/WhereYouWantYourProcessedData/ + +# NDPPP-compatible pattern for baselines or stations to be flagged +! flag_baselines = [ CS013HBA* ] +# minimum fraction of unflagged data after RFI flagging and A-team clipping +! min_unflagged_fraction = 0.5 +# name of the station that will be used as a reference for the phase-plots +! reference_station = CS001HBA0 + +### Values needed for RMextract +# the URL of the server where the IONEX files can be downloaded +# leave it at "None" to disable downloads, or set it to: +# ftp://ftp.unibe.ch/aiub/CODE/ +# to download from the "standard" server +! ionex_server = None +# the prefix of the IONEX files +! ionex_prefix = CODG +# path where the IONEX files can be stored or are already stored +! ionex_path = /media/scratch/test/username/WhereYouWantToStoreTheValuesFromTheCalibrator/ + +# pathes to the scripts etc. +# #### ToDo: get the scripts onto CEP3 and adjust the pathes here! +! ATeam_skymodel = /homea/htb00/htb001/prefactor/skymodels/Ateam_LBA_CC.skymodel +! losoto_importer = /homea/htb00/htb001/prefactor/scripts/losotoImporter.py +! get_ion_script = /homea/htb00/htb001/prefactor/bin/download_IONEX.py +! transfer_script = /homea/htb00/htb001/prefactor/scripts/transfer_gains_RMextract.py +! ATeam_Clipper = /homea/htb00/htb001/prefactor/scripts/Ateamclipper.py +! get_tgss_skymodel_script = /homea/htb00/htb001/prefactor/scripts/download_tgss_skymodel_target.py +! sortmap_script = /homea/htb00/htb001/prefactor/scripts/sort_times_into_freqGroups.py +! check_flagged_script = /homea/htb00/htb001/prefactor/scripts/check_unflagged_fraction.py +! structurefunction_script = /cep3home/horneffer/Pre-Facet-Cal/bin/getStructure_from_phases.py +! plotphases_script = /homea/htb00/htb001/prefactor/scripts/plot_solutions_all_stations.py +! losoto_executable = /opt/cep/losoto/current/bin/losoto +! makesourcedb = /homea/htb00/htb003/lofar_jureca_2-15/bin/makesourcedb +! flagging_strategy = /opt/cep/lofar/lofar_versions/LOFAR-Release-2_15_2/lofar_build/install/gnu_opt/share/rfistrategies/HBAdefault + +# number of processes to use per step per node +! num_proc_per_node = 10 +# number of processes to use per step per node for tasks with high i/o (dppp or cp) or memory (eg calibration) +! num_proc_per_node_limit = 4 +# number of threads per process for NDPPP +! max_dppp_threads = 8 + +# set this to True if you want the pipeline run to continue if single bands fail +! error_tolerance = False + +### Stuff that you probably don't need to modify +# which steps to run +pipeline.steps=[mk_inspect_dir, createmap_target, combine_data_target_map, sortmap_target, do_sortmap_target_maps, get_ion_files, trans, ndppp_prep_target, create_ateam_model_map, make_sourcedb_ateam, expand_sourcedb_ateam, predict_ateam, ateamcliptar, check_unflagged, check_unflagged_map, sky_tar, create_target_model_map, make_sourcedb_target, expand_sourcedb_target, gsmcal_parmmap, gsmcal_solve, gsmcal_apply, h5_imp_gsmsol_map, h5imp_gsmsol, plot_gsm_phases, gsmcal_antmap, make_structurefunction, old_plot_gsmphases, createmap_plots, copy_plots, mk_results_dir, make_results_mapfile, move_results] + +# create the inspection_directory if needed +mk_inspect_dir.control.kind = plugin +mk_inspect_dir.control.type = makeDirectory +mk_inspect_dir.control.directory = {{ inspection_directory }} + +# generate a mapfile of all the target data +createmap_target.control.kind = plugin +createmap_target.control.type = createMapfile +createmap_target.control.method = mapfile_from_folder +createmap_target.control.mapfile_dir = input.output.mapfile_dir +createmap_target.control.filename = createmap_target.mapfile +createmap_target.control.folder = {{ target_input_path }} +createmap_target.control.pattern = {{ target_input_pattern }} + +# combine all entries into one mapfile, for the sortmap script +combine_data_target_map.control.kind = plugin +combine_data_target_map.control.type = createMapfile +combine_data_target_map.control.method = mapfile_all_to_one +combine_data_target_map.control.mapfile_dir = input.output.mapfile_dir +combine_data_target_map.control.filename = combine_data_tar_map.mapfile +combine_data_target_map.control.mapfile_in = createmap_target.output.mapfile + +# sort the target data by frequency into groups so that NDPPP can concatenate them +sortmap_target.control.type = pythonplugin +sortmap_target.control.executable = {{ sortmap_script }} +sortmap_target.argument.flags = [combine_data_target_map.output.mapfile] +sortmap_target.argument.filename = sortmap_target +sortmap_target.argument.mapfile_dir = input.output.mapfile_dir +sortmap_target.argument.target_path = input.output.working_directory/input.output.job_name +sortmap_target.argument.numSB = {{ num_SBs_per_group }} +sortmap_target.argument.NDPPPfill = True +sortmap_target.argument.stepname = ndppp_prep_target +sortmap_target.argument.truncateLastSBs = True # This means that a excess subbands that don't make a full group get discarded + +# convert the output of sortmap_target into usable mapfiles +do_sortmap_target_maps.control.kind = plugin +do_sortmap_target_maps.control.type = mapfilenamesFromMapfiles +do_sortmap_target_maps.control.mapfile_groupmap = sortmap_target.output.groupmapfile.mapfile +do_sortmap_target_maps.control.mapfile_datamap = sortmap_target.output.mapfile.mapfile +do_sortmap_target_maps.control.mapfile_flagmap = sortmap_target.output.flagmapfile.mapfile + +# get ionex files once for every day that is covered by one of the input MSs +get_ion_files.control.type = pythonplugin +get_ion_files.control.executable = {{ get_ion_script }} +get_ion_files.control.max_per_node = 1 +get_ion_files.control.error_tolerance = {{ error_tolerance }} +get_ion_files.argument.flags = [combine_data_target_map.output.mapfile] +get_ion_files.argument.ionex_server = {{ ionex_server }} +get_ion_files.argument.ionex_prefix = {{ ionex_prefix }} +get_ion_files.argument.ionexPath = {{ ionex_path }} + +# generate parmDB with the interpolated calibrator data to apply to the traget +trans.control.type = pythonplugin +trans.control.executable = {{ transfer_script }} +trans.control.max_per_node = {{ num_proc_per_node }} +trans.control.error_tolerance = {{ error_tolerance }} +trans.argument.flags = [do_sortmap_target_maps.output.groupmap] +trans.argument.store_basename = caldata_transfer +trans.argument.store_directory = {{ cal_values_directory }} +trans.argument.newparmdbext = /instrument_amp_clock_offset +trans.argument.ionex_server = {{ ionex_server }} +trans.argument.ionex_prefix = {{ ionex_prefix }} +trans.argument.ionexPath = {{ ionex_path }} + +# run NDPPP on the target data to flag, transfer calibrator values, and average +ndppp_prep_target.control.type = dppp +ndppp_prep_target.control.max_per_node = {{ num_proc_per_node_limit }} +ndppp_prep_target.control.error_tolerance = {{ error_tolerance }} +ndppp_prep_target.control.mapfile_out = do_sortmap_target_maps.output.groupmap # specify the output filenames +ndppp_prep_target.control.mapfiles_in = [do_sortmap_target_maps.output.datamap, do_sortmap_target_maps.output.flagmap, trans.output.mapfile] +ndppp_prep_target.control.inputkeys = [input_file,channels_to_flag,parmdb_file] +ndppp_prep_target.argument.numthreads = {{ max_dppp_threads }} +ndppp_prep_target.argument.msin = input_file +ndppp_prep_target.argument.msin.datacolumn = DATA +ndppp_prep_target.argument.msin.baseline = CS*&; RS*&; CS*&RS* +ndppp_prep_target.argument.msin.missingdata = True #\ these two lines will make NDPPP generate dummy data when +ndppp_prep_target.argument.msin.orderms = False #/ concatenating data +ndppp_prep_target.argument.msin.autoweight = True # recomended for processing raw input data +ndppp_prep_target.argument.msout.datacolumn = DATA +ndppp_prep_target.argument.msout.writefullresflag = False +ndppp_prep_target.argument.msout.overwrite = True +ndppp_prep_target.argument.steps = [autoflag,flagedge,flag1,filter,flagamp,aoflag,applyclock,applygain,applybeam,applyrotate,count,flag2,count,avg] +ndppp_prep_target.argument.autoflag.type = preflagger +ndppp_prep_target.argument.autoflag.corrtype = auto +ndppp_prep_target.argument.flagedge.type = preflagger +ndppp_prep_target.argument.flagedge.chan = channels_to_flag +ndppp_prep_target.argument.flag1.type = preflagger +ndppp_prep_target.argument.flag1.baseline = {{ flag_baselines }} +ndppp_prep_target.argument.filter.type = filter +ndppp_prep_target.argument.filter.baseline = CS*, RS*&& +ndppp_prep_target.argument.filter.remove = true # fully kick out the international stations. +ndppp_prep_target.argument.flagamp.type = preflagger +ndppp_prep_target.argument.flagamp.amplmin = 1e-30 +ndppp_prep_target.argument.aoflag.type = aoflagger +ndppp_prep_target.argument.aoflag.memoryperc = 10 +ndppp_prep_target.argument.aoflag.keepstatistics = false +ndppp_prep_target.argument.applyclock.type = applycal +ndppp_prep_target.argument.applyclock.parmdb = parmdb_file +ndppp_prep_target.argument.applyclock.correction = clock +ndppp_prep_target.argument.applygain.type = applycal +ndppp_prep_target.argument.applygain.parmdb = parmdb_file +ndppp_prep_target.argument.applygain.correction = gain +ndppp_prep_target.argument.applybeam.type = applybeam +ndppp_prep_target.argument.applybeam.usechannelfreq = True +ndppp_prep_target.argument.applyrotate.type = applycal +ndppp_prep_target.argument.applyrotate.parmdb = parmdb_file +ndppp_prep_target.argument.applyrotate.correction = commonrotationangle +ndppp_prep_target.argument.flag2.type = aoflagger +ndppp_prep_target.argument.flag2.keepstatistics = false +ndppp_prep_target.argument.flag2.memoryperc = 10 +ndppp_prep_target.argument.flag2.strategy = {{ flagging_strategy }} +ndppp_prep_target.argument.avg.type = average +ndppp_prep_target.argument.avg.timestep = {{ avg_timestep }} # average to 8 second intervals , PLEASE ADJUST! +ndppp_prep_target.argument.avg.freqstep = {{ avg_freqstep }} # average to 2 ch/SB , PLEASE ADJUST! + +# create a mapfile with the A-Team skymodel, length = 1 +create_ateam_model_map.control.kind = plugin +create_ateam_model_map.control.type = addListMapfile +create_ateam_model_map.control.hosts = ['localhost'] +create_ateam_model_map.control.files = [ {{ ATeam_skymodel }} ] +create_ateam_model_map.control.mapfile_dir = input.output.mapfile_dir +create_ateam_model_map.control.filename = ateam_model_name.mapfile + +# make sourcedbs from the A-Team skymodel, length = 1 +# outtype = blob, because NDPPP likes that +make_sourcedb_ateam.control.kind = recipe +make_sourcedb_ateam.control.type = executable_args +make_sourcedb_ateam.control.executable = {{ makesourcedb }} +make_sourcedb_ateam.control.error_tolerance = {{ error_tolerance }} +make_sourcedb_ateam.control.args_format = lofar +make_sourcedb_ateam.control.outputkey = out +make_sourcedb_ateam.control.mapfile_in = create_ateam_model_map.output.mapfile +make_sourcedb_ateam.control.inputkey = in +make_sourcedb_ateam.argument.format = < +make_sourcedb_ateam.argument.outtype = blob + +# expand the sourcedb mapfile so that there is one entry for every file, length = nfiles +expand_sourcedb_ateam.control.kind = plugin +expand_sourcedb_ateam.control.type = expandMapfile +expand_sourcedb_ateam.control.mapfile_in = make_sourcedb_ateam.output.mapfile +expand_sourcedb_ateam.control.mapfile_to_match = ndppp_prep_target.output.mapfile +expand_sourcedb_ateam.control.mapfile_dir = input.output.mapfile_dir +expand_sourcedb_ateam.control.filename = expand_sourcedb_ateam.datamap + +# Predict, corrupt, and predict the ateam-resolution model, length = nfiles +predict_ateam.control.type = dppp +predict_ateam.control.mapfiles_in = [ndppp_prep_target.output.mapfile, expand_sourcedb_ateam.output.mapfile] +predict_ateam.control.inputkeys = [msin,sourcedb] +predict_ateam.control.inplace = True +predict_ateam.control.max_per_node = {{ num_proc_per_node_limit }} +predict_ateam.argument.numthreads = {{ max_dppp_threads }} +predict_ateam.control.error_tolerance = {{ error_tolerance }} +predict_ateam.argument.msin.datacolumn = DATA +predict_ateam.argument.msout = . +predict_ateam.argument.msout.datacolumn = MODEL_DATA +predict_ateam.argument.steps = [predict] +predict_ateam.argument.predict.type = predict +predict_ateam.argument.predict.operation = replace +predict_ateam.argument.predict.sourcedb = sourcedb +predict_ateam.argument.predict.sources = [VirA_4_patch,CygAGG,CasA_4_patch,TauAGG] +predict_ateam.argument.predict.usebeammodel = True +# This is run on concatenated subbands, which means that it is ineed needed +# to set usechannelfreq to True +predict_ateam.argument.predict.usechannelfreq = True + +# run the a-team clipper to flag data affected by the a-team +ateamcliptar.control.kind = recipe +ateamcliptar.control.type = executable_args +ateamcliptar.control.max_per_node = {{ num_proc_per_node }} +ateamcliptar.control.mapfile_in = ndppp_prep_target.output.mapfile +ateamcliptar.control.executable = {{ ATeam_Clipper }} +ateamcliptar.control.error_tolerance = {{ error_tolerance }} +ateamcliptar.control.arguments = [allms] +ateamcliptar.control.inputkey = allms + +# check all files for minimum unflagged fraction +check_unflagged.control.type = pythonplugin +check_unflagged.control.executable = {{ check_flagged_script }} +check_unflagged.argument.flags = [ndppp_prep_target.output.mapfile] +check_unflagged.argument.min_fraction = {{ min_unflagged_fraction }} +# this step writes hostnames into "check_unflagged.flagged.mapfile" due to a "feature" of the pythonplugin + +# prune flagged files from mapfile +check_unflagged_map.control.kind = plugin +check_unflagged_map.control.type = pruneMapfile +check_unflagged_map.control.mapfile_in = check_unflagged.output.flagged.mapfile +check_unflagged_map.control.mapfile_dir = input.output.mapfile_dir +check_unflagged_map.control.filename = check_unflagged_map.mapfile +check_unflagged_map.control.prune_str = None + +# if wished, download the tgss skymodel for the target +sky_tar.control.type = pythonplugin +sky_tar.control.executable = {{ get_tgss_skymodel_script }} +sky_tar.argument.flags = [combine_target_map.output.mapfile] +sky_tar.argument.DoDownload = {{ use_tgss_target }} +sky_tar.argument.SkymodelPath = {{ target_skymodel }} +sky_tar.argument.Radius = 5. #in degrees + +# create a mapfile with the target skymodel, length = 1 +create_target_model_map.control.kind = plugin +create_target_model_map.control.type = addListMapfile +create_target_model_map.control.hosts = ['localhost'] +create_target_model_map.control.files = [ {{ target_skymodel }} ] +create_target_model_map.control.mapfile_dir = input.output.mapfile_dir +create_target_model_map.control.filename = target_model_name.mapfile + +# make sourcedbs from the target skymodel, length = 1 +# outtype = blob, because NDPPP likes that +make_sourcedb_target.control.kind = recipe +make_sourcedb_target.control.type = executable_args +make_sourcedb_target.control.executable = {{ makesourcedb }} +make_sourcedb_target.control.error_tolerance = {{ error_tolerance }} +make_sourcedb_target.control.args_format = lofar +make_sourcedb_target.control.outputkey = out +make_sourcedb_target.control.mapfile_in = create_target_model_map.output.mapfile +make_sourcedb_target.control.inputkey = in +make_sourcedb_target.argument.format = < +make_sourcedb_target.argument.outtype = blob + +# expand the sourcedb mapfile so that there is one entry for every file, length = nfiles +expand_sourcedb_target.control.kind = plugin +expand_sourcedb_target.control.type = expandMapfile +expand_sourcedb_target.control.mapfile_in = make_sourcedb_target.output.mapfile +expand_sourcedb_target.control.mapfile_to_match = check_unflagged_map.output.mapfile +expand_sourcedb_target.control.mapfile_dir = input.output.mapfile_dir +expand_sourcedb_target.control.filename = expand_sourcedb_target.datamap + +# generate mapfile with the parmDB names to be used in the gsmcal steps +gsmcal_parmmap.control.kind = plugin +gsmcal_parmmap.control.type = createMapfile +gsmcal_parmmap.control.method = add_suffix_to_file +gsmcal_parmmap.control.mapfile_in = check_unflagged_map.output.mapfile +gsmcal_parmmap.control.add_suffix_to_file = /instrument_directionindependent +gsmcal_parmmap.control.mapfile_dir = input.output.mapfile_dir +gsmcal_parmmap.control.filename = gsmcal_parmdbs.mapfile + +# solve for phase-only calibration solutions +# solve and apply are seperate to allow to solve on a subset of baselines but apply to all +gsmcal_solve.control.type = dppp +gsmcal_solve.control.environment = {OMP_NUM_THREADS: 4} +gsmcal_solve.control.error_tolerance = {{ error_tolerance }} +gsmcal_solve.control.inplace = True +gsmcal_solve.control.max_per_node = {{ num_proc_per_node_limit }} +gsmcal_solve.argument.numthreads = {{ max_dppp_threads }} +gsmcal_solve.argument.msin = check_unflagged_map.output.mapfile +gsmcal_solve.argument.msin.datacolumn = DATA +gsmcal_solve.argument.msin.baseline = CS*&; RS*&; CS*&RS* +gsmcal_solve.argument.msout.datacolumn = CORRECTED_DATA +gsmcal_solve.argument.steps = [filter,gaincal] +gsmcal_solve.filter.type = filter +gsmcal_solve.filter.blrange = [150, 999999] +gsmcal_solve.argument.gaincal.type = gaincal +gsmcal_solve.argument.gaincal.maxiter = 500 +gsmcal_solve.argument.gaincal.caltype = phaseonly +gsmcal_solve.argument.gaincal.nchan = 0 +gsmcal_solve.argument.gaincal.solint = 1 +gsmcal_solve.argument.gaincal.sourcedb = expand_sourcedb_target.output.mapfile +gsmcal_solve.argument.gaincal.parmdb = gsmcal_parmmap.output.mapfile +gsmcal_solve.argument.gaincal.usebeammodel = True +gsmcal_solve.argument.gaincal.usechannelfreq = True +gsmcal_solve.argument.gaincal.beammode = array_factor + +# apply the phase-only calibration solutions +# solve and apply are seperate to allow to solve on a subset of baselines but apply to all +gsmcal_apply.control.type = dppp +gsmcal_apply.control.error_tolerance = {{ error_tolerance }} +gsmcal_apply.control.inplace = True +gsmcal_apply.control.max_per_node = {{ num_proc_per_node_limit }} +gsmcal_apply.argument.numthreads = {{ max_dppp_threads }} +gsmcal_apply.argument.msin = check_unflagged_map.output.mapfile +gsmcal_apply.argument.msin.datacolumn = DATA +gsmcal_apply.argument.msout.datacolumn = CORRECTED_DATA +gsmcal_apply.argument.msout.writefullresflag = False +gsmcal_apply.argument.steps = [applycal] +gsmcal_apply.argument.applycal.type = applycal +gsmcal_apply.argument.applycal.correction = gain +gsmcal_apply.argument.applycal.parmdb = gsmcal_parmmap.output.mapfile + +# generate a mapfile with all files in a single entry +h5_imp_gsmsol_map.control.kind = plugin +h5_imp_gsmsol_map.control.type = MapfileToOne +h5_imp_gsmsol_map.control.method = mapfile_all_to_one +h5_imp_gsmsol_map.control.mapfile_in = check_unflagged_map.output.mapfile +h5_imp_gsmsol_map.control.mapfile_dir = input.output.mapfile_dir +h5_imp_gsmsol_map.control.filename = h5_imp_gsmsol_map.mapfile + +# import all instrument tables into one LoSoTo file +h5imp_gsmsol.control.type = pythonplugin +h5imp_gsmsol.control.executable = {{ losoto_importer }} +h5imp_gsmsol.control.error_tolerance = {{ error_tolerance }} +h5imp_gsmsol.argument.flags = [h5_imp_gsmsol_map.output.mapfile,h5imp_gsmsol_losoto.h5] +h5imp_gsmsol.argument.instrument = /instrument_directionindependent +h5imp_gsmsol.argument.solsetName = sol000 +h5imp_gsmsol.argument.compression = 7 + +# plot the phase solutions from the phase-only calibration of the target +plot_gsm_phases.control.kind = recipe +plot_gsm_phases.control.type = executable_args +plot_gsm_phases.control.executable = {{ losoto_executable }} +plot_gsm_phases.control.max_per_node = {{ num_proc_per_node }} +plot_gsm_phases.control.parsetasfile = True +plot_gsm_phases.control.args_format = losoto +plot_gsm_phases.control.mapfiles_in = [h5imp_gsmsol.output.h5parm.mapfile] +plot_gsm_phases.control.inputkeys = [hdf5file] +plot_gsm_phases.argument.flags = [hdf5file] +plot_gsm_phases.argument.LoSoTo.Steps = [plot] +plot_gsm_phases.argument.LoSoTo.Solset = [sol000] +plot_gsm_phases.argument.LoSoTo.Soltab = [sol000/phase000] +plot_gsm_phases.argument.LoSoTo.SolType = [phase] +plot_gsm_phases.argument.LoSoTo.ant = [] +plot_gsm_phases.argument.LoSoTo.pol = [XX,YY] +plot_gsm_phases.argument.LoSoTo.dir = [pointing] +plot_gsm_phases.argument.LoSoTo.Steps.plot.Operation = PLOT +plot_gsm_phases.argument.LoSoTo.Steps.plot.PlotType = 2D +plot_gsm_phases.argument.LoSoTo.Steps.plot.Axes = [time,freq] +plot_gsm_phases.argument.LoSoTo.Steps.plot.TableAxis = [ant] +plot_gsm_phases.argument.LoSoTo.Steps.plot.ColorAxis = [pol] +plot_gsm_phases.argument.LoSoTo.Steps.plot.Reference = {{ reference_station }} +plot_gsm_phases.argument.LoSoTo.Steps.plot.PlotFlag = False +plot_gsm_phases.argument.LoSoTo.Steps.plot.Prefix = {{ inspection_directory }}/gsm_phases_ + +# generate mapfile with the antenna tables of the concatenated target datafiles +gsmcal_antmap.control.kind = plugin +gsmcal_antmap.control.type = createMapfile +gsmcal_antmap.control.method = add_suffix_to_file +gsmcal_antmap.control.mapfile_in = check_unflagged_map.output.mapfile +gsmcal_antmap.control.add_suffix_to_file = /ANTENNA +gsmcal_antmap.control.mapfile_dir = input.output.mapfile_dir +gsmcal_antmap.control.filename = gsmcal_antmaps.mapfile + +# plot the phase solutions from the phase-only calibration of the target +make_structurefunction.control.kind = recipe +make_structurefunction.control.type = executable_args +make_structurefunction.control.executable = {{ structurefunction_script }} +make_structurefunction.control.max_per_node = {{ num_proc_per_node }} +make_structurefunction.control.mapfiles_in = [gsmcal_parmmap.output.mapfile,gsmcal_antmap.output.mapfile,check_unflagged_map.output.mapfile] +make_structurefunction.control.inputkeys = [inparmdb,inants,outbase] +make_structurefunction.control.arguments = [inparmdb,inants,outbase] + +# plot the phase solutions from the phase-only calibration of the target +old_plot_gsmphases.control.kind = recipe +old_plot_gsmphases.control.type = executable_args +old_plot_gsmphases.control.executable = {{ plotphases_script }} +old_plot_gsmphases.control.max_per_node = {{ num_proc_per_node }} +old_plot_gsmphases.control.mapfiles_in = [gsmcal_parmmap.output.mapfile,check_unflagged_map.output.mapfile] +old_plot_gsmphases.control.inputkeys = [infile,outbase] +old_plot_gsmphases.control.arguments = [-p,infile,outbase] + +# generate a mapfile of all the diagnostic plots +createmap_plots.control.kind = plugin +createmap_plots.control.type = createMapfile +createmap_plots.control.method = mapfile_from_folder +createmap_plots.control.mapfile_dir = input.output.mapfile_dir +createmap_plots.control.filename = diagnostic_plots.mapfile +createmap_plots.control.folder = input.output.working_directory/input.output.job_name +createmap_plots.control.pattern = *.png + +# copy the diagnostic plots to the results_directory +copy_plots.control.kind = recipe +copy_plots.control.type = executable_args +copy_plots.control.executable = /bin/cp +copy_plots.control.max_per_node = {{ num_proc_per_node_limit }} +copy_plots.control.mapfile_in = createmap_plots.output.mapfile +copy_plots.control.inputkey = source +copy_plots.control.arguments = [source,{{ inspection_directory }}] + +# create the results directory if needed +mk_results_dir.control.kind = plugin +mk_results_dir.control.type = makeDirectory +mk_results_dir.control.directory = {{ results_directory }} + +# make mapfile with the filenames of the results that we want +make_results_mapfile.control.kind = plugin +make_results_mapfile.control.type = makeResultsMapfile +make_results_mapfile.control.mapfile_dir = input.output.mapfile_dir +make_results_mapfile.control.filename = make_results_mapfile.mapfile +make_results_mapfile.control.mapfile_in = check_unflagged_map.output.mapfile +make_results_mapfile.control.target_dir = {{ results_directory }} +make_results_mapfile.control.make_target_dir = True +make_results_mapfile.control.new_suffix = .pre-cal.ms + +# move the results to where we want them +move_results.control.kind = recipe +move_results.control.type = executable_args +move_results.control.executable = /bin/mv +move_results.control.max_per_node = {{ num_proc_per_node_limit }} +move_results.control.mapfiles_in = [check_unflagged_map.output.mapfile,make_results_mapfile.output.mapfile] +move_results.control.inputkeys = [source,destination] +move_results.control.arguments = [source,destination] + diff --git a/Pre-Facet-Target-RawSingle.parset b/Pre-Facet-Target-RawSingle.parset new file mode 100644 index 00000000..f5525620 --- /dev/null +++ b/Pre-Facet-Target-RawSingle.parset @@ -0,0 +1,499 @@ +# Pre-Facet Target Calibration Pipeline +# +# Target part of the basic Pre-Facet calibration pipeline: +# - no demixing but A-team flagging, +# - calibration transfer and averaging of target data in one go. +# - checks frequncies in MSs to group files +# - the new "error_tolerance" option requires LOFAR software version >= 2.15 +# (Comment out all lines with "error_tolerance" if you want to use an older version.) +# - using LoSoTo from the pipeline requires latest executable_args node-script +# (available in the current LOFAR trunk, revision 33969 and later) +# (The diff, to do the patching by hand, can be found in the 6th comment at: +# https://github.com/lofar-astron/prefactor/issues/4 ) +# - expects shared filesystem, that all nodes can reach all files! +# (E.g. a single workstation or compute cluster with shared filesystem +# doesn't work on multiple nodes on CEP-2 or CEP3.) + +### parameters you will need to adjust. +# parameters you will need to adjust. +# averaging for the target data +! avg_timestep = 4 # averaging step needed to average the data to 4 seconds time resolution +! avg_freqstep = 16 # averaging step needed to average the data to 4 ch/SB frequency resolution +# where to find the target data +! target_input_path = /data/scratch/username/PathToYourTargetData/ +! target_input_pattern = L*.MS +# path to the skymodel for the phase-only calibration of the target +! target_skymodel = /cep3home/username/Pre-Facet-Cal/skymodels/PleaseProvideTarget.skymodel +# download the phase-only calibration skymodel from TGSS +# "Force" : always download , "True" download if {{ target_skymodel }} does not exist , "False" : never download +! use_tgss_target = True +# how many subbands to concatenate into on frequency band (usually 10 or 12) +! num_SBs_per_group = 10 # make concatenated measurement-sets with that many subbands +# where to put the inspection plots generated by the pipeline +! inspection_directory = /media/scratch/test/username/WhereYouWantInspectionPlotsEtc/ +# where the files with the calibration values from the calibrator pipeline are +! cal_values_directory = /media/scratch/test/username/WhereYouWantToStoreTheValuesFromTheCalibrator/ +# where to put the resulting measurement sets generated by the pipeline +! results_directory = /media/scratch/test/username/WhereYouWantYourProcessedData/ + +# NDPPP-compatible pattern for baselines or stations to be flagged +! flag_baselines = [ CS013HBA* ] +# minimum fraction of unflagged data after RFI flagging and A-team clipping +! min_unflagged_fraction = 0.5 +# name of the station that will be used as a reference for the phase-plots +! reference_station = CS001HBA0 + +### Values needed for RMextract +# the URL of the server where the IONEX files can be downloaded +# leave it at "None" to disable downloads, or set it to: +# ftp://ftp.unibe.ch/aiub/CODE/ +# to download from the "standard" server +! ionex_server = None +# the prefix of the IONEX files +! ionex_prefix = CODG +# path where the IONEX files can be stored or are already stored +! ionex_path = /media/scratch/test/username/WhereYouWantToStoreTheValuesFromTheCalibrator/ + +# pathes to the scripts etc. +# #### ToDo: get the scripts onto CEP3 and adjust the pathes here! +! ATeam_skymodel = /homea/htb00/htb001/prefactor/skymodels/Ateam_LBA_CC.skymodel +! losoto_importer = /homea/htb00/htb001/prefactor/scripts/losotoImporter.py +! get_ion_script = /homea/htb00/htb001/prefactor/bin/download_IONEX.py +! transfer_script = /homea/htb00/htb001/prefactor/scripts/transfer_gains_RMextract.py +! ATeam_Clipper = /homea/htb00/htb001/prefactor/scripts/Ateamclipper.py +! get_tgss_skymodel_script = /homea/htb00/htb001/prefactor/scripts/download_tgss_skymodel_target.py +! sortmap_script = /homea/htb00/htb001/prefactor/scripts/sort_times_into_freqGroups.py +! check_flagged_script = /homea/htb00/htb001/prefactor/scripts/check_unflagged_fraction.py +! structurefunction_script = /cep3home/horneffer/Pre-Facet-Cal/bin/getStructure_from_phases.py +! plotphases_script = /homea/htb00/htb001/prefactor/scripts/plot_solutions_all_stations.py +! losoto_executable = /opt/cep/losoto/current/bin/losoto +! makesourcedb = /homea/htb00/htb003/lofar_jureca_2-15/bin/makesourcedb +! flagging_strategy = /opt/cep/lofar/lofar_versions/LOFAR-Release-2_15_2/lofar_build/install/gnu_opt/share/rfistrategies/HBAdefault + +# number of processes to use per step per node +! num_proc_per_node = 10 +# number of processes to use per step per node for tasks with high i/o (dppp or cp) or memory (eg calibration) +! num_proc_per_node_limit = 4 +# number of threads per process for NDPPP +! max_dppp_threads = 8 + +# set this to True if you want the pipeline run to continue if single bands fail +! error_tolerance = False + +### Stuff that you probably don't need to modify +# which steps to run +pipeline.steps=[mk_inspect_dir, createmap_target, combine_data_target_map, get_ion_files, trans, ndppp_prep_target, create_ateam_model_map, make_sourcedb_ateam, expand_sourcedb_ateam, predict_ateam, ateamcliptar, combine_target_map, sortmap_target, do_sortmap_maps, dpppconcat, check_unflagged, check_unflagged_map, sky_tar, create_target_model_map, make_sourcedb_target, expand_sourcedb_target, gsmcal_parmmap, gsmcal_solve, gsmcal_apply, h5_imp_gsmsol_map, h5imp_gsmsol, plot_gsm_phases, gsmcal_antmap, make_structurefunction, old_plot_gsmphases, createmap_plots, copy_plots, mk_results_dir, make_results_mapfile, move_results] + +# create the inspection_directory if needed +mk_inspect_dir.control.kind = plugin +mk_inspect_dir.control.type = makeDirectory +mk_inspect_dir.control.directory = {{ inspection_directory }} + +# generate a mapfile of all the target data +createmap_target.control.kind = plugin +createmap_target.control.type = createMapfile +createmap_target.control.method = mapfile_from_folder +createmap_target.control.mapfile_dir = input.output.mapfile_dir +createmap_target.control.filename = createmap_target.mapfile +createmap_target.control.folder = {{ target_input_path }} +createmap_target.control.pattern = {{ target_input_pattern }} + +# combine all entries into one mapfile, for the sortmap script +combine_data_target_map.control.kind = plugin +combine_data_target_map.control.type = createMapfile +combine_data_target_map.control.method = mapfile_all_to_one +combine_data_target_map.control.mapfile_dir = input.output.mapfile_dir +combine_data_target_map.control.filename = combine_data_tar_map.mapfile +combine_data_target_map.control.mapfile_in = createmap_target.output.mapfile + +# get ionex files once for every day that is covered by one of the input MSs +get_ion_files.control.type = pythonplugin +get_ion_files.control.executable = {{ get_ion_script }} +get_ion_files.control.max_per_node = 1 +get_ion_files.control.error_tolerance = {{ error_tolerance }} +get_ion_files.argument.flags = [combine_data_target_map.output.mapfile] +get_ion_files.argument.ionex_server = {{ ionex_server }} +get_ion_files.argument.ionex_prefix = {{ ionex_prefix }} +get_ion_files.argument.ionexPath = {{ ionex_path }} + +# generate parmDB with the interpolated calibrator data to apply to the traget +trans.control.type = pythonplugin +trans.control.executable = {{ transfer_script }} +trans.control.max_per_node = {{ num_proc_per_node }} +trans.control.error_tolerance = {{ error_tolerance }} +trans.argument.flags = [createmap_target.output.mapfile] +trans.argument.store_basename = caldata_transfer +trans.argument.store_directory = {{ cal_values_directory }} +trans.argument.newparmdbext = /instrument_amp_clock_offset +trans.argument.ionex_server = {{ ionex_server }} +trans.argument.ionex_prefix = {{ ionex_prefix }} +trans.argument.ionexPath = {{ ionex_path }} + +# run NDPPP on the target data to flag, transfer calibrator values, and average +ndppp_prep_target.control.type = dppp +ndppp_prep_target.control.max_per_node = {{ num_proc_per_node_limit }} +ndppp_prep_target.control.error_tolerance = {{ error_tolerance }} +ndppp_prep_target.control.mapfiles_in = [createmap_target.output.mapfile, trans.output.mapfile] +ndppp_prep_target.control.inputkeys = [input_file, parmdb_file] +ndppp_prep_target.argument.numthreads = {{ max_dppp_threads }} +ndppp_prep_target.argument.msin = input_file +ndppp_prep_target.argument.msin.datacolumn = DATA +ndppp_prep_target.argument.msin.baseline = CS*&; RS*&; CS*&RS* +ndppp_prep_target.argument.msin.autoweight = True # recomended for processing raw input data +ndppp_prep_target.argument.msout.datacolumn = DATA +ndppp_prep_target.argument.msout.writefullresflag = False +ndppp_prep_target.argument.msout.overwrite = True +ndppp_prep_target.argument.steps = [autoflag,flagedge,flag1,filter,aoflag,flagamp,applyclock,applygain,applybeam,applyrotate,count,flag2,count,avg] +ndppp_prep_target.argument.autoflag.type = preflagger +ndppp_prep_target.argument.autoflag.corrtype = auto +ndppp_prep_target.argument.flagedge.type = preflagger +ndppp_prep_target.argument.flagedge.chan = [0..nchan/32-1,31*nchan/32..nchan-1] # we are running on a single subband +ndppp_prep_target.argument.flag1.type = preflagger +ndppp_prep_target.argument.flag1.baseline = {{ flag_baselines }} +ndppp_prep_target.argument.filter.type = filter +ndppp_prep_target.argument.filter.baseline = CS*, RS*&& +ndppp_prep_target.argument.filter.remove = true # fully kick out the international stations. +ndppp_prep_target.argument.aoflag.type = aoflagger +ndppp_prep_target.argument.aoflag.memoryperc = 10 +ndppp_prep_target.argument.aoflag.keepstatistics = false +ndppp_prep_target.argument.flagamp.type = preflagger +ndppp_prep_target.argument.flagamp.amplmin = 1e-30 +ndppp_prep_target.argument.applyclock.type = applycal +ndppp_prep_target.argument.applyclock.parmdb = parmdb_file +ndppp_prep_target.argument.applyclock.correction = clock +ndppp_prep_target.argument.applygain.type = applycal +ndppp_prep_target.argument.applygain.parmdb = parmdb_file +ndppp_prep_target.argument.applygain.correction = gain +ndppp_prep_target.argument.applybeam.type = applybeam +ndppp_prep_target.argument.applybeam.usechannelfreq = True +ndppp_prep_target.argument.applyrotate.type = applycal +ndppp_prep_target.argument.applyrotate.parmdb = parmdb_file +ndppp_prep_target.argument.applyrotate.correction = commonrotationangle +ndppp_prep_target.argument.flag2.type = aoflagger +ndppp_prep_target.argument.flag2.keepstatistics = false +ndppp_prep_target.argument.flag2.memoryperc = 10 +ndppp_prep_target.argument.flag2.strategy = {{ flagging_strategy }} +ndppp_prep_target.argument.avg.type = average +ndppp_prep_target.argument.avg.timestep = {{ avg_timestep }} +ndppp_prep_target.argument.avg.freqstep = {{ avg_freqstep }} + +# create a mapfile with the A-Team skymodel, length = 1 +create_ateam_model_map.control.kind = plugin +create_ateam_model_map.control.type = addListMapfile +create_ateam_model_map.control.hosts = ['localhost'] +create_ateam_model_map.control.files = [ {{ ATeam_skymodel }} ] +create_ateam_model_map.control.mapfile_dir = input.output.mapfile_dir +create_ateam_model_map.control.filename = ateam_model_name.mapfile + +# make sourcedbs from the A-Team skymodel, length = 1 +# outtype = blob, because NDPPP likes that +make_sourcedb_ateam.control.kind = recipe +make_sourcedb_ateam.control.type = executable_args +make_sourcedb_ateam.control.executable = {{ makesourcedb }} +make_sourcedb_ateam.control.error_tolerance = {{ error_tolerance }} +make_sourcedb_ateam.control.args_format = lofar +make_sourcedb_ateam.control.outputkey = out +make_sourcedb_ateam.control.mapfile_in = create_ateam_model_map.output.mapfile +make_sourcedb_ateam.control.inputkey = in +make_sourcedb_ateam.argument.format = < +make_sourcedb_ateam.argument.outtype = blob + +# expand the sourcedb mapfile so that there is one entry for every file, length = nfiles +expand_sourcedb_ateam.control.kind = plugin +expand_sourcedb_ateam.control.type = expandMapfile +expand_sourcedb_ateam.control.mapfile_in = make_sourcedb_ateam.output.mapfile +expand_sourcedb_ateam.control.mapfile_to_match = ndppp_prep_target.output.mapfile +expand_sourcedb_ateam.control.mapfile_dir = input.output.mapfile_dir +expand_sourcedb_ateam.control.filename = expand_sourcedb_ateam.datamap + +# Predict, corrupt, and predict the ateam-resolution model, length = nfiles +predict_ateam.control.type = dppp +predict_ateam.control.mapfiles_in = [ndppp_prep_target.output.mapfile, expand_sourcedb_ateam.output.mapfile] +predict_ateam.control.inputkeys = [msin,sourcedb] +predict_ateam.control.inplace = True +predict_ateam.control.max_per_node = {{ num_proc_per_node_limit }} +predict_ateam.argument.numthreads = {{ max_dppp_threads }} +predict_ateam.control.error_tolerance = {{ error_tolerance }} +predict_ateam.argument.msin.datacolumn = DATA +predict_ateam.argument.msout = . +predict_ateam.argument.msout.datacolumn = MODEL_DATA +predict_ateam.argument.steps = [predict] +predict_ateam.argument.predict.type = predict +predict_ateam.argument.predict.operation = replace +predict_ateam.argument.predict.sourcedb = sourcedb +predict_ateam.argument.predict.sources = [VirA_4_patch,CygAGG,CasA_4_patch,TauAGG] +predict_ateam.argument.predict.usebeammodel = True +# This is run on single subbands, which means that it is ineed "more correct" +# to set usechannelfreq to false +predict_ateam.argument.predict.usechannelfreq = false + +# run the a-team clipper to flag data affected by the a-team +ateamcliptar.control.kind = recipe +ateamcliptar.control.type = executable_args +ateamcliptar.control.max_per_node = {{ num_proc_per_node }} +ateamcliptar.control.mapfile_in = ndppp_prep_target.output.mapfile +ateamcliptar.control.executable = {{ ATeam_Clipper }} +ateamcliptar.control.error_tolerance = {{ error_tolerance }} +ateamcliptar.control.arguments = [allms] +ateamcliptar.control.inputkey = allms + +# combine all entries into one mapfile, for the sortmap script +combine_target_map.control.kind = plugin +combine_target_map.control.type = createMapfile +combine_target_map.control.method = mapfile_all_to_one +combine_target_map.control.mapfile_dir = input.output.mapfile_dir +combine_target_map.control.filename = combine_tar_map.mapfile +combine_target_map.control.mapfile_in = ndppp_prep_target.output.mapfile + +# sort the target data by frequency into groups so that NDPPP can concatenate them +sortmap_target.control.type = pythonplugin +sortmap_target.control.executable = {{ sortmap_script }} +sortmap_target.argument.flags = [combine_target_map.output.mapfile] +sortmap_target.argument.filename = sortmap_target +sortmap_target.argument.mapfile_dir = input.output.mapfile_dir +sortmap_target.argument.target_path = input.output.working_directory/input.output.job_name +sortmap_target.argument.numSB = {{ num_SBs_per_group }} +sortmap_target.argument.NDPPPfill = True +sortmap_target.argument.stepname = dpppconcat +sortmap_target.argument.truncateLastSBs = True # This means that a excess subbands that don't make a full group get discarded + +# convert the output of sortmap_target into usable mapfiles +do_sortmap_maps.control.kind = plugin +do_sortmap_maps.control.type = mapfilenamesFromMapfiles +do_sortmap_maps.control.mapfile_groupmap = sortmap_target.output.groupmapfile.mapfile +do_sortmap_maps.control.mapfile_datamap = sortmap_target.output.mapfile.mapfile + +# run NDPPP to concatenate the target +dpppconcat.control.type = dppp +dpppconcat.control.max_per_node = {{ num_proc_per_node_limit }} +dpppconcat.control.error_tolerance = {{ error_tolerance }} +dpppconcat.control.mapfile_out = do_sortmap_maps.output.groupmap # tell the pipeline to give the output useful names +dpppconcat.control.mapfiles_in = [do_sortmap_maps.output.datamap] +dpppconcat.control.inputkey = msin +dpppconcat.argument.msin.datacolumn = DATA +dpppconcat.argument.msin.missingdata = True #\ these two lines will make NDPPP generate dummy data when +dpppconcat.argument.msin.orderms = False #/ concatenating data +dpppconcat.argument.msout.datacolumn = DATA +dpppconcat.argumentmsout.writefullresflag = False +dpppconcat.argument.msout.overwrite = True +dpppconcat.argument.steps = [flag] # run the aoflagger (this used to be an extra step) +dpppconcat.argument.flag.type = aoflagger +dpppconcat.argument.flag.keepstatistics = false +dpppconcat.argument.flag.memoryperc = 10 +dpppconcat.argument.flag.strategy = {{ flagging_strategy }} + +# check all files for minimum unflagged fraction +check_unflagged.control.type = pythonplugin +check_unflagged.control.executable = {{ check_flagged_script }} +check_unflagged.argument.flags = [dpppconcat.output.mapfile] +check_unflagged.argument.min_fraction = {{ min_unflagged_fraction }} +# this step writes hostnames into "check_unflagged.flagged.mapfile" due to a "feature" of the pythonplugin + +# prune flagged files from mapfile +check_unflagged_map.control.kind = plugin +check_unflagged_map.control.type = pruneMapfile +check_unflagged_map.control.mapfile_in = check_unflagged.output.flagged.mapfile +check_unflagged_map.control.mapfile_dir = input.output.mapfile_dir +check_unflagged_map.control.filename = check_unflagged_map.mapfile +check_unflagged_map.control.prune_str = None + +# if wished, download the tgss skymodel for the target +sky_tar.control.type = pythonplugin +sky_tar.control.executable = {{ get_tgss_skymodel_script }} +sky_tar.argument.flags = [combine_target_map.output.mapfile] +sky_tar.argument.DoDownload = {{ use_tgss_target }} +sky_tar.argument.SkymodelPath = {{ target_skymodel }} +sky_tar.argument.Radius = 5. #in degrees + +# create a mapfile with the target skymodel, length = 1 +create_target_model_map.control.kind = plugin +create_target_model_map.control.type = addListMapfile +create_target_model_map.control.hosts = ['localhost'] +create_target_model_map.control.files = [ {{ target_skymodel }} ] +create_target_model_map.control.mapfile_dir = input.output.mapfile_dir +create_target_model_map.control.filename = target_model_name.mapfile + +# make sourcedbs from the target skymodel, length = 1 +# outtype = blob, because NDPPP likes that +make_sourcedb_target.control.kind = recipe +make_sourcedb_target.control.type = executable_args +make_sourcedb_target.control.executable = {{ makesourcedb }} +make_sourcedb_target.control.error_tolerance = {{ error_tolerance }} +make_sourcedb_target.control.args_format = lofar +make_sourcedb_target.control.outputkey = out +make_sourcedb_target.control.mapfile_in = create_target_model_map.output.mapfile +make_sourcedb_target.control.inputkey = in +make_sourcedb_target.argument.format = < +make_sourcedb_target.argument.outtype = blob + +# expand the sourcedb mapfile so that there is one entry for every file, length = nfiles +expand_sourcedb_target.control.kind = plugin +expand_sourcedb_target.control.type = expandMapfile +expand_sourcedb_target.control.mapfile_in = make_sourcedb_target.output.mapfile +expand_sourcedb_target.control.mapfile_to_match = check_unflagged_map.output.mapfile +expand_sourcedb_target.control.mapfile_dir = input.output.mapfile_dir +expand_sourcedb_target.control.filename = expand_sourcedb_target.datamap + +# generate mapfile with the parmDB names to be used in the gsmcal steps +gsmcal_parmmap.control.kind = plugin +gsmcal_parmmap.control.type = createMapfile +gsmcal_parmmap.control.method = add_suffix_to_file +gsmcal_parmmap.control.mapfile_in = check_unflagged_map.output.mapfile +gsmcal_parmmap.control.add_suffix_to_file = /instrument_directionindependent +gsmcal_parmmap.control.mapfile_dir = input.output.mapfile_dir +gsmcal_parmmap.control.filename = gsmcal_parmdbs.mapfile + +# solve for phase-only calibration solutions +# solve and apply are seperate to allow to solve on a subset of baselines but apply to all +gsmcal_solve.control.type = dppp +gsmcal_solve.control.environment = {OMP_NUM_THREADS: 4} +gsmcal_solve.control.error_tolerance = {{ error_tolerance }} +gsmcal_solve.control.inplace = True +gsmcal_solve.control.max_per_node = {{ num_proc_per_node_limit }} +gsmcal_solve.argument.numthreads = {{ max_dppp_threads }} +gsmcal_solve.argument.msin = check_unflagged_map.output.mapfile +gsmcal_solve.argument.msin.datacolumn = DATA +gsmcal_solve.argument.msin.baseline = CS*&; RS*&; CS*&RS* +gsmcal_solve.argument.msout.datacolumn = CORRECTED_DATA +gsmcal_solve.argument.steps = [filter,gaincal] +gsmcal_solve.filter.type = filter +gsmcal_solve.filter.blrange = [150, 999999] +gsmcal_solve.argument.gaincal.type = gaincal +gsmcal_solve.argument.gaincal.maxiter = 500 +gsmcal_solve.argument.gaincal.caltype = phaseonly +gsmcal_solve.argument.gaincal.nchan = 0 +gsmcal_solve.argument.gaincal.solint = 1 +gsmcal_solve.argument.gaincal.sourcedb = expand_sourcedb_target.output.mapfile +gsmcal_solve.argument.gaincal.parmdb = gsmcal_parmmap.output.mapfile +gsmcal_solve.argument.gaincal.usebeammodel = True +gsmcal_solve.argument.gaincal.usechannelfreq = True +gsmcal_solve.argument.gaincal.beammode = array_factor + +# apply the phase-only calibration solutions +# solve and apply are seperate to allow to solve on a subset of baselines but apply to all +gsmcal_apply.control.type = dppp +gsmcal_apply.control.error_tolerance = {{ error_tolerance }} +gsmcal_apply.control.inplace = True +gsmcal_apply.control.max_per_node = {{ num_proc_per_node_limit }} +gsmcal_apply.argument.numthreads = {{ max_dppp_threads }} +gsmcal_apply.argument.msin = check_unflagged_map.output.mapfile +gsmcal_apply.argument.msin.datacolumn = DATA +gsmcal_apply.argument.msout.datacolumn = CORRECTED_DATA +gsmcal_apply.argument.msout.writefullresflag = False +gsmcal_apply.argument.steps = [applycal] +gsmcal_apply.argument.applycal.type = applycal +gsmcal_apply.argument.applycal.correction = gain +gsmcal_apply.argument.applycal.parmdb = gsmcal_parmmap.output.mapfile + +# generate a mapfile with all files in a single entry +h5_imp_gsmsol_map.control.kind = plugin +h5_imp_gsmsol_map.control.type = MapfileToOne +h5_imp_gsmsol_map.control.method = mapfile_all_to_one +h5_imp_gsmsol_map.control.mapfile_in = check_unflagged_map.output.mapfile +h5_imp_gsmsol_map.control.mapfile_dir = input.output.mapfile_dir +h5_imp_gsmsol_map.control.filename = h5_imp_gsmsol_map.mapfile + +# import all instrument tables into one LoSoTo file +h5imp_gsmsol.control.type = pythonplugin +h5imp_gsmsol.control.executable = {{ losoto_importer }} +h5imp_gsmsol.control.error_tolerance = {{ error_tolerance }} +h5imp_gsmsol.argument.flags = [h5_imp_gsmsol_map.output.mapfile,h5imp_gsmsol_losoto.h5] +h5imp_gsmsol.argument.instrument = /instrument_directionindependent +h5imp_gsmsol.argument.solsetName = sol000 +h5imp_gsmsol.argument.compression = 7 + +# plot the phase solutions from the phase-only calibration of the target +plot_gsm_phases.control.kind = recipe +plot_gsm_phases.control.type = executable_args +plot_gsm_phases.control.executable = {{ losoto_executable }} +plot_gsm_phases.control.max_per_node = {{ num_proc_per_node }} +plot_gsm_phases.control.parsetasfile = True +plot_gsm_phases.control.args_format = losoto +plot_gsm_phases.control.mapfiles_in = [h5imp_gsmsol.output.h5parm.mapfile] +plot_gsm_phases.control.inputkeys = [hdf5file] +plot_gsm_phases.argument.flags = [hdf5file] +plot_gsm_phases.argument.LoSoTo.Steps = [plot] +plot_gsm_phases.argument.LoSoTo.Solset = [sol000] +plot_gsm_phases.argument.LoSoTo.Soltab = [sol000/phase000] +plot_gsm_phases.argument.LoSoTo.SolType = [phase] +plot_gsm_phases.argument.LoSoTo.ant = [] +plot_gsm_phases.argument.LoSoTo.pol = [XX,YY] +plot_gsm_phases.argument.LoSoTo.dir = [pointing] +plot_gsm_phases.argument.LoSoTo.Steps.plot.Operation = PLOT +plot_gsm_phases.argument.LoSoTo.Steps.plot.PlotType = 2D +plot_gsm_phases.argument.LoSoTo.Steps.plot.Axes = [time,freq] +plot_gsm_phases.argument.LoSoTo.Steps.plot.TableAxis = [ant] +plot_gsm_phases.argument.LoSoTo.Steps.plot.ColorAxis = [pol] +plot_gsm_phases.argument.LoSoTo.Steps.plot.Reference = {{ reference_station }} +plot_gsm_phases.argument.LoSoTo.Steps.plot.PlotFlag = False +plot_gsm_phases.argument.LoSoTo.Steps.plot.Prefix = {{ inspection_directory }}/gsm_phases_ + +# generate mapfile with the antenna tables of the concatenated target datafiles +gsmcal_antmap.control.kind = plugin +gsmcal_antmap.control.type = createMapfile +gsmcal_antmap.control.method = add_suffix_to_file +gsmcal_antmap.control.mapfile_in = dpppconcat.output.mapfile +gsmcal_antmap.control.add_suffix_to_file = /ANTENNA +gsmcal_antmap.control.mapfile_dir = input.output.mapfile_dir +gsmcal_antmap.control.filename = gsmcal_antmaps.mapfile + +# plot the phase solutions from the phase-only calibration of the target +make_structurefunction.control.kind = recipe +make_structurefunction.control.type = executable_args +make_structurefunction.control.executable = {{ structurefunction_script }} +make_structurefunction.control.max_per_node = {{ num_proc_per_node }} +make_structurefunction.control.mapfiles_in = [gsmcal_parmmap.output.mapfile,gsmcal_antmap.output.mapfile,check_unflagged_map.output.mapfile] +make_structurefunction.control.inputkeys = [inparmdb,inants,outbase] +make_structurefunction.control.arguments = [inparmdb,inants,outbase] + +# plot the phase solutions from the phase-only calibration of the target +old_plot_gsmphases.control.kind = recipe +old_plot_gsmphases.control.type = executable_args +old_plot_gsmphases.control.executable = {{ plotphases_script }} +old_plot_gsmphases.control.max_per_node = {{ num_proc_per_node }} +old_plot_gsmphases.control.mapfiles_in = [gsmcal_parmmap.output.mapfile,check_unflagged_map.output.mapfile] +old_plot_gsmphases.control.inputkeys = [infile,outbase] +old_plot_gsmphases.control.arguments = [-p,infile,outbase] + +# generate a mapfile of all the diagnostic plots +createmap_plots.control.kind = plugin +createmap_plots.control.type = createMapfile +createmap_plots.control.method = mapfile_from_folder +createmap_plots.control.mapfile_dir = input.output.mapfile_dir +createmap_plots.control.filename = diagnostic_plots.mapfile +createmap_plots.control.folder = input.output.working_directory/input.output.job_name +createmap_plots.control.pattern = *.png + +# copy the diagnostic plots to the results_directory +copy_plots.control.kind = recipe +copy_plots.control.type = executable_args +copy_plots.control.executable = /bin/cp +copy_plots.control.max_per_node = {{ num_proc_per_node_limit }} +copy_plots.control.mapfile_in = createmap_plots.output.mapfile +copy_plots.control.inputkey = source +copy_plots.control.arguments = [source,{{ inspection_directory }}] + +# create the results directory if needed +mk_results_dir.control.kind = plugin +mk_results_dir.control.type = makeDirectory +mk_results_dir.control.directory = {{ results_directory }} + +# make mapfile with the filenames of the results that we want +make_results_mapfile.control.kind = plugin +make_results_mapfile.control.type = makeResultsMapfile +make_results_mapfile.control.mapfile_dir = input.output.mapfile_dir +make_results_mapfile.control.filename = make_results_mapfile.mapfile +make_results_mapfile.control.mapfile_in = check_unflagged_map.output.mapfile +make_results_mapfile.control.target_dir = {{ results_directory }} +make_results_mapfile.control.make_target_dir = True +make_results_mapfile.control.new_suffix = .pre-cal.ms + +# move the results to where we want them +move_results.control.kind = recipe +move_results.control.type = executable_args +move_results.control.executable = /bin/mv +move_results.control.max_per_node = {{ num_proc_per_node_limit }} +move_results.control.mapfiles_in = [check_unflagged_map.output.mapfile,make_results_mapfile.output.mapfile] +move_results.control.inputkeys = [source,destination] +move_results.control.arguments = [source,destination] + From 8e8c8607616438008684d3dbe514a8486087c9c2 Mon Sep 17 00:00:00 2001 From: Andreas Horneffer Date: Fri, 16 Sep 2016 18:02:32 +0200 Subject: [PATCH 06/19] Fixed trans.output mapfile --- Pre-Facet-Target-RawCombine.parset | 6 ++++-- Pre-Facet-Target-RawSingle.parset | 6 ++++-- Pre-Facet-Target.parset | 14 ++++---------- 3 files changed, 12 insertions(+), 14 deletions(-) diff --git a/Pre-Facet-Target-RawCombine.parset b/Pre-Facet-Target-RawCombine.parset index 39576db7..9446959c 100644 --- a/Pre-Facet-Target-RawCombine.parset +++ b/Pre-Facet-Target-RawCombine.parset @@ -140,7 +140,9 @@ trans.control.type = pythonplugin trans.control.executable = {{ transfer_script }} trans.control.max_per_node = {{ num_proc_per_node }} trans.control.error_tolerance = {{ error_tolerance }} -trans.argument.flags = [do_sortmap_target_maps.output.groupmap] +trans.control.mapfile_in = do_sortmap_target_maps.output.datamap +trans.control.inputkey = indata +trans.argument.flags = [indata] trans.argument.store_basename = caldata_transfer trans.argument.store_directory = {{ cal_values_directory }} trans.argument.newparmdbext = /instrument_amp_clock_offset @@ -153,7 +155,7 @@ ndppp_prep_target.control.type = dppp ndppp_prep_target.control.max_per_node = {{ num_proc_per_node_limit }} ndppp_prep_target.control.error_tolerance = {{ error_tolerance }} ndppp_prep_target.control.mapfile_out = do_sortmap_target_maps.output.groupmap # specify the output filenames -ndppp_prep_target.control.mapfiles_in = [do_sortmap_target_maps.output.datamap, do_sortmap_target_maps.output.flagmap, trans.output.mapfile] +ndppp_prep_target.control.mapfiles_in = [do_sortmap_target_maps.output.datamap, do_sortmap_target_maps.output.flagmap, trans.output.transfer_parmDB.mapfile] ndppp_prep_target.control.inputkeys = [input_file,channels_to_flag,parmdb_file] ndppp_prep_target.argument.numthreads = {{ max_dppp_threads }} ndppp_prep_target.argument.msin = input_file diff --git a/Pre-Facet-Target-RawSingle.parset b/Pre-Facet-Target-RawSingle.parset index f5525620..2cc790c5 100644 --- a/Pre-Facet-Target-RawSingle.parset +++ b/Pre-Facet-Target-RawSingle.parset @@ -121,7 +121,9 @@ trans.control.type = pythonplugin trans.control.executable = {{ transfer_script }} trans.control.max_per_node = {{ num_proc_per_node }} trans.control.error_tolerance = {{ error_tolerance }} -trans.argument.flags = [createmap_target.output.mapfile] +trans.control.mapfile_in = createmap_target.output.mapfile +trans.control.inputkey = indata +trans.argument.flags = [indata] trans.argument.store_basename = caldata_transfer trans.argument.store_directory = {{ cal_values_directory }} trans.argument.newparmdbext = /instrument_amp_clock_offset @@ -133,7 +135,7 @@ trans.argument.ionexPath = {{ ionex_path }} ndppp_prep_target.control.type = dppp ndppp_prep_target.control.max_per_node = {{ num_proc_per_node_limit }} ndppp_prep_target.control.error_tolerance = {{ error_tolerance }} -ndppp_prep_target.control.mapfiles_in = [createmap_target.output.mapfile, trans.output.mapfile] +ndppp_prep_target.control.mapfiles_in = [createmap_target.output.mapfile, trans.output.transfer_parmDB.mapfile] ndppp_prep_target.control.inputkeys = [input_file, parmdb_file] ndppp_prep_target.argument.numthreads = {{ max_dppp_threads }} ndppp_prep_target.argument.msin = input_file diff --git a/Pre-Facet-Target.parset b/Pre-Facet-Target.parset index 4b6c4516..d853e605 100644 --- a/Pre-Facet-Target.parset +++ b/Pre-Facet-Target.parset @@ -121,7 +121,9 @@ trans.control.type = pythonplugin trans.control.executable = {{ transfer_script }} trans.control.max_per_node = {{ num_proc_per_node }} trans.control.error_tolerance = {{ error_tolerance }} -trans.argument.flags = [createmap_target.output.mapfile] +trans.control.mapfile_in = createmap_target.output.mapfile +trans.control.inputkey = indata +trans.argument.flags = [indata] trans.argument.store_basename = caldata_transfer trans.argument.store_directory = {{ cal_values_directory }} trans.argument.newparmdbext = /instrument_amp_clock_offset @@ -129,20 +131,12 @@ trans.argument.ionex_server = {{ ionex_server }} trans.argument.ionex_prefix = {{ ionex_prefix }} trans.argument.ionexPath = {{ ionex_path }} -## generate mapfile with the parmDBs to be applied to the target data -#parmmap.control.kind = plugin -#parmmap.control.type = createMapfile -#parmmap.control.method = add_suffix_to_file -#parmmap.control.mapfile_in = createmap_target.output.mapfile -#parmmap.control.add_suffix_to_file = /instrument_amp_clock_offset -#parmmap.control.mapfile_dir = input.output.mapfile_dir -#parmmap.control.filename = targetparmdb.mapfile # run NDPPP on the target data to flag, transfer calibrator values, and average ndppp_prep_target.control.type = dppp ndppp_prep_target.control.max_per_node = {{ num_proc_per_node_limit }} ndppp_prep_target.control.error_tolerance = {{ error_tolerance }} -ndppp_prep_target.control.mapfiles_in = [createmap_target.output.mapfile, trans.output.mapfile] +ndppp_prep_target.control.mapfiles_in = [createmap_target.output.mapfile, trans.output.transfer_parmDB.mapfile] ndppp_prep_target.control.inputkeys = [input_file, parmdb_file] ndppp_prep_target.argument.numthreads = {{ max_dppp_threads }} ndppp_prep_target.argument.msin = input_file From 10aec54c64d209f437669d60b2a537c857de007f Mon Sep 17 00:00:00 2001 From: Andreas Horneffer Date: Mon, 19 Sep 2016 10:36:11 +0200 Subject: [PATCH 07/19] Fixed a bug in calling the TGSS script --- Pre-Facet-Target-RawCombine.parset | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Pre-Facet-Target-RawCombine.parset b/Pre-Facet-Target-RawCombine.parset index 9446959c..d102813a 100644 --- a/Pre-Facet-Target-RawCombine.parset +++ b/Pre-Facet-Target-RawCombine.parset @@ -279,7 +279,7 @@ check_unflagged_map.control.prune_str = None # if wished, download the tgss skymodel for the target sky_tar.control.type = pythonplugin sky_tar.control.executable = {{ get_tgss_skymodel_script }} -sky_tar.argument.flags = [combine_target_map.output.mapfile] +sky_tar.argument.flags = [combine_data_target_map.output.mapfile] sky_tar.argument.DoDownload = {{ use_tgss_target }} sky_tar.argument.SkymodelPath = {{ target_skymodel }} sky_tar.argument.Radius = 5. #in degrees From b96b4b511b530c807d86eae4adf1dc39d4c11fdd Mon Sep 17 00:00:00 2001 From: Andreas Horneffer Date: Mon, 19 Sep 2016 14:06:49 +0200 Subject: [PATCH 08/19] updated comments --- Pre-Facet-Target-RawCombine.parset | 29 ++++++++++++++--------------- Pre-Facet-Target-RawSingle.parset | 29 ++++++++++++++--------------- Pre-Facet-Target.parset | 25 ++++++++++--------------- 3 files changed, 38 insertions(+), 45 deletions(-) diff --git a/Pre-Facet-Target-RawCombine.parset b/Pre-Facet-Target-RawCombine.parset index d102813a..1460ec12 100644 --- a/Pre-Facet-Target-RawCombine.parset +++ b/Pre-Facet-Target-RawCombine.parset @@ -1,15 +1,13 @@ # Pre-Facet Target Calibration Pipeline # -# Target part of the basic Pre-Facet calibration pipeline: +# Target part of the basic Pre-Facet calibration pipeline +# This version works on raw (non averaged) data, and combines the MSs in +# the first NDPPP step +# # - no demixing but A-team flagging, # - calibration transfer and averaging of target data in one go. # - checks frequncies in MSs to group files -# - the new "error_tolerance" option requires LOFAR software version >= 2.15 -# (Comment out all lines with "error_tolerance" if you want to use an older version.) -# - using LoSoTo from the pipeline requires latest executable_args node-script -# (available in the current LOFAR trunk, revision 33969 and later) -# (The diff, to do the patching by hand, can be found in the 6th comment at: -# https://github.com/lofar-astron/prefactor/issues/4 ) +# - requires LOFAR software version >= 2.17 # - expects shared filesystem, that all nodes can reach all files! # (E.g. a single workstation or compute cluster with shared filesystem # doesn't work on multiple nodes on CEP-2 or CEP3.) @@ -23,6 +21,7 @@ ! target_input_path = /data/scratch/username/PathToYourTargetData/ ! target_input_pattern = L*.MS # path to the skymodel for the phase-only calibration of the target +# (Existing file or path (incl. filename) where TGSS model is stored.) ! target_skymodel = /cep3home/username/Pre-Facet-Cal/skymodels/PleaseProvideTarget.skymodel # download the phase-only calibration skymodel from TGSS # "Force" : always download , "True" download if {{ target_skymodel }} does not exist , "False" : never download @@ -36,7 +35,7 @@ # where to put the resulting measurement sets generated by the pipeline ! results_directory = /media/scratch/test/username/WhereYouWantYourProcessedData/ -# NDPPP-compatible pattern for baselines or stations to be flagged +# NDPPP-compatible pattern for baselines or stations to be flagged (may be an empty list, i.e.: [] ) ! flag_baselines = [ CS013HBA* ] # minimum fraction of unflagged data after RFI flagging and A-team clipping ! min_unflagged_fraction = 0.5 @@ -64,21 +63,21 @@ ! get_tgss_skymodel_script = /homea/htb00/htb001/prefactor/scripts/download_tgss_skymodel_target.py ! sortmap_script = /homea/htb00/htb001/prefactor/scripts/sort_times_into_freqGroups.py ! check_flagged_script = /homea/htb00/htb001/prefactor/scripts/check_unflagged_fraction.py -! structurefunction_script = /cep3home/horneffer/Pre-Facet-Cal/bin/getStructure_from_phases.py +! structurefunction_script = /homea/htb00/htb001/prefactor/scripts/getStructure_from_phases.py ! plotphases_script = /homea/htb00/htb001/prefactor/scripts/plot_solutions_all_stations.py -! losoto_executable = /opt/cep/losoto/current/bin/losoto -! makesourcedb = /homea/htb00/htb003/lofar_jureca_2-15/bin/makesourcedb -! flagging_strategy = /opt/cep/lofar/lofar_versions/LOFAR-Release-2_15_2/lofar_build/install/gnu_opt/share/rfistrategies/HBAdefault +! losoto_executable = /homea/htb00/htb003/local_jureca/bin/losoto +! makesourcedb = /homea/htb00/htb003/lofar_jureca_2.17_stack2016a/bin/makesourcedb +! flagging_strategy = /homea/htb00/htb003/lofar_jureca_2.17_stack2016a/share/rfistrategies/HBAdefault # number of processes to use per step per node -! num_proc_per_node = 10 +! num_proc_per_node = 24 # number of processes to use per step per node for tasks with high i/o (dppp or cp) or memory (eg calibration) ! num_proc_per_node_limit = 4 # number of threads per process for NDPPP -! max_dppp_threads = 8 +! max_dppp_threads = 8 # set this to True if you want the pipeline run to continue if single bands fail -! error_tolerance = False +! error_tolerance = False ### Stuff that you probably don't need to modify # which steps to run diff --git a/Pre-Facet-Target-RawSingle.parset b/Pre-Facet-Target-RawSingle.parset index 2cc790c5..6ec001d1 100644 --- a/Pre-Facet-Target-RawSingle.parset +++ b/Pre-Facet-Target-RawSingle.parset @@ -1,15 +1,13 @@ # Pre-Facet Target Calibration Pipeline # -# Target part of the basic Pre-Facet calibration pipeline: +# Target part of the basic Pre-Facet calibration pipeline +# This version works on raw (non averaged) data, and combines MSs only +# after initial round of averaging, flagging and A-team clipping. +# # - no demixing but A-team flagging, # - calibration transfer and averaging of target data in one go. # - checks frequncies in MSs to group files -# - the new "error_tolerance" option requires LOFAR software version >= 2.15 -# (Comment out all lines with "error_tolerance" if you want to use an older version.) -# - using LoSoTo from the pipeline requires latest executable_args node-script -# (available in the current LOFAR trunk, revision 33969 and later) -# (The diff, to do the patching by hand, can be found in the 6th comment at: -# https://github.com/lofar-astron/prefactor/issues/4 ) +# - requires LOFAR software version >= 2.17 # - expects shared filesystem, that all nodes can reach all files! # (E.g. a single workstation or compute cluster with shared filesystem # doesn't work on multiple nodes on CEP-2 or CEP3.) @@ -23,6 +21,7 @@ ! target_input_path = /data/scratch/username/PathToYourTargetData/ ! target_input_pattern = L*.MS # path to the skymodel for the phase-only calibration of the target +# (Existing file or path (incl. filename) where TGSS model is stored.) ! target_skymodel = /cep3home/username/Pre-Facet-Cal/skymodels/PleaseProvideTarget.skymodel # download the phase-only calibration skymodel from TGSS # "Force" : always download , "True" download if {{ target_skymodel }} does not exist , "False" : never download @@ -36,7 +35,7 @@ # where to put the resulting measurement sets generated by the pipeline ! results_directory = /media/scratch/test/username/WhereYouWantYourProcessedData/ -# NDPPP-compatible pattern for baselines or stations to be flagged +# NDPPP-compatible pattern for baselines or stations to be flagged (may be an empty list, i.e.: [] ) ! flag_baselines = [ CS013HBA* ] # minimum fraction of unflagged data after RFI flagging and A-team clipping ! min_unflagged_fraction = 0.5 @@ -64,21 +63,21 @@ ! get_tgss_skymodel_script = /homea/htb00/htb001/prefactor/scripts/download_tgss_skymodel_target.py ! sortmap_script = /homea/htb00/htb001/prefactor/scripts/sort_times_into_freqGroups.py ! check_flagged_script = /homea/htb00/htb001/prefactor/scripts/check_unflagged_fraction.py -! structurefunction_script = /cep3home/horneffer/Pre-Facet-Cal/bin/getStructure_from_phases.py +! structurefunction_script = /homea/htb00/htb001/prefactor/scripts/getStructure_from_phases.py ! plotphases_script = /homea/htb00/htb001/prefactor/scripts/plot_solutions_all_stations.py -! losoto_executable = /opt/cep/losoto/current/bin/losoto -! makesourcedb = /homea/htb00/htb003/lofar_jureca_2-15/bin/makesourcedb -! flagging_strategy = /opt/cep/lofar/lofar_versions/LOFAR-Release-2_15_2/lofar_build/install/gnu_opt/share/rfistrategies/HBAdefault +! losoto_executable = /homea/htb00/htb003/local_jureca/bin/losoto +! makesourcedb = /homea/htb00/htb003/lofar_jureca_2.17_stack2016a/bin/makesourcedb +! flagging_strategy = /homea/htb00/htb003/lofar_jureca_2.17_stack2016a/share/rfistrategies/HBAdefault # number of processes to use per step per node -! num_proc_per_node = 10 +! num_proc_per_node = 24 # number of processes to use per step per node for tasks with high i/o (dppp or cp) or memory (eg calibration) ! num_proc_per_node_limit = 4 # number of threads per process for NDPPP -! max_dppp_threads = 8 +! max_dppp_threads = 8 # set this to True if you want the pipeline run to continue if single bands fail -! error_tolerance = False +! error_tolerance = False ### Stuff that you probably don't need to modify # which steps to run diff --git a/Pre-Facet-Target.parset b/Pre-Facet-Target.parset index d853e605..bc0c029b 100644 --- a/Pre-Facet-Target.parset +++ b/Pre-Facet-Target.parset @@ -4,12 +4,7 @@ # - no demixing but A-team flagging, # - calibration transfer and averaging of target data in one go. # - checks frequncies in MSs to group files -# - the new "error_tolerance" option requires LOFAR software version >= 2.15 -# (Comment out all lines with "error_tolerance" if you want to use an older version.) -# - using LoSoTo from the pipeline requires latest executable_args node-script -# (available in the current LOFAR trunk, revision 33969 and later) -# (The diff, to do the patching by hand, can be found in the 6th comment at: -# https://github.com/lofar-astron/prefactor/issues/4 ) +# - requires LOFAR software version >= 2.17 # - expects shared filesystem, that all nodes can reach all files! # (E.g. a single workstation or compute cluster with shared filesystem # doesn't work on multiple nodes on CEP-2 or CEP3.) @@ -23,6 +18,7 @@ ! target_input_path = /data/scratch/username/PathToYourTargetData/ ! target_input_pattern = L*.MS # path to the skymodel for the phase-only calibration of the target +# (Existing file or path (incl. filename) where TGSS model is stored.) ! target_skymodel = /cep3home/username/Pre-Facet-Cal/skymodels/PleaseProvideTarget.skymodel # download the phase-only calibration skymodel from TGSS # "Force" : always download , "True" download if {{ target_skymodel }} does not exist , "False" : never download @@ -36,7 +32,7 @@ # where to put the resulting measurement sets generated by the pipeline ! results_directory = /media/scratch/test/username/WhereYouWantYourProcessedData/ -# NDPPP-compatible pattern for baselines or stations to be flagged +# NDPPP-compatible pattern for baselines or stations to be flagged (may be an empty list, i.e.: [] ) ! flag_baselines = [ CS013HBA* ] # minimum fraction of unflagged data after RFI flagging and A-team clipping ! min_unflagged_fraction = 0.5 @@ -64,21 +60,21 @@ ! get_tgss_skymodel_script = /homea/htb00/htb001/prefactor/scripts/download_tgss_skymodel_target.py ! sortmap_script = /homea/htb00/htb001/prefactor/scripts/sort_times_into_freqGroups.py ! check_flagged_script = /homea/htb00/htb001/prefactor/scripts/check_unflagged_fraction.py -! structurefunction_script = /cep3home/horneffer/Pre-Facet-Cal/bin/getStructure_from_phases.py +! structurefunction_script = /homea/htb00/htb001/prefactor/scripts/getStructure_from_phases.py ! plotphases_script = /homea/htb00/htb001/prefactor/scripts/plot_solutions_all_stations.py -! losoto_executable = /opt/cep/losoto/current/bin/losoto -! makesourcedb = /homea/htb00/htb003/lofar_jureca_2-15/bin/makesourcedb -! flagging_strategy = /opt/cep/lofar/lofar_versions/LOFAR-Release-2_15_2/lofar_build/install/gnu_opt/share/rfistrategies/HBAdefault +! losoto_executable = /homea/htb00/htb003/local_jureca/bin/losoto +! makesourcedb = /homea/htb00/htb003/lofar_jureca_2.17_stack2016a/bin/makesourcedb +! flagging_strategy = /homea/htb00/htb003/lofar_jureca_2.17_stack2016a/share/rfistrategies/HBAdefault # number of processes to use per step per node -! num_proc_per_node = 10 +! num_proc_per_node = 24 # number of processes to use per step per node for tasks with high i/o (dppp or cp) or memory (eg calibration) ! num_proc_per_node_limit = 4 # number of threads per process for NDPPP -! max_dppp_threads = 8 +! max_dppp_threads = 8 # set this to True if you want the pipeline run to continue if single bands fail -! error_tolerance = False +! error_tolerance = False ### Stuff that you probably don't need to modify # which steps to run @@ -131,7 +127,6 @@ trans.argument.ionex_server = {{ ionex_server }} trans.argument.ionex_prefix = {{ ionex_prefix }} trans.argument.ionexPath = {{ ionex_path }} - # run NDPPP on the target data to flag, transfer calibrator values, and average ndppp_prep_target.control.type = dppp ndppp_prep_target.control.max_per_node = {{ num_proc_per_node_limit }} From 7094a8efc9810a0e704aef117cf60bae40d575ae Mon Sep 17 00:00:00 2001 From: Andreas Horneffer Date: Mon, 19 Sep 2016 14:19:33 +0200 Subject: [PATCH 09/19] more updated comments --- Pre-Facet-Calibrator-RawCombine.parset | 5 +++-- Pre-Facet-Calibrator-RawSingle.parset | 5 +++-- Pre-Facet-Calibrator.parset | 20 ++++++++------------ 3 files changed, 14 insertions(+), 16 deletions(-) diff --git a/Pre-Facet-Calibrator-RawCombine.parset b/Pre-Facet-Calibrator-RawCombine.parset index b493f98f..03a2cacb 100644 --- a/Pre-Facet-Calibrator-RawCombine.parset +++ b/Pre-Facet-Calibrator-RawCombine.parset @@ -4,6 +4,7 @@ # - Works on raw data, that has not been processed with NDPPP before. # - Combines several subbands into one MS in the first NDPPP step to # reduce the number of files. (May be more efficient on some machines.) +# - requires LOFAR software version >= 2.17 # - Expects shared filesystem, that all nodes can reach all files! # (E.g. a single workstation or compute cluster with shared filesystem # doesn't work on multiple nodes on CEP-2 or CEP3.) @@ -24,7 +25,7 @@ # where to put the files with the calibration values that are to be transferred to the target ! cal_values_directory = /media/scratch/test/username/WhereYouWantToStoreTheValuesFromTheCalibrator/ -# NDPPP-compatible pattern for baselines or stations to be flagged +# NDPPP-compatible pattern for baselines or stations to be flagged (may be an empty list, i.e.: [] ) ! flag_baselines = [ CS013HBA* ] # name of the station that will be used as a reference for the phase-plots ! reference_station = CS001HBA0 @@ -39,7 +40,7 @@ ! plotsols_script = /homea/htb00/htb001/prefactor/scripts/examine_npys.py ! fit_XYoffset_script = /homea/htb00/htb001/prefactor/scripts/find_cal_global_phaseoffset_losoto.py ! plotphases_script = /homea/htb00/htb001/prefactor/scripts/plot_solutions_all_stations.py -! losoto_executable = /opt/cep/losoto/current/bin/losoto +! losoto_executable = /homea/htb00/htb003/local_jureca/bin/losoto # number of processes to use per step per node ! num_proc_per_node = 24 diff --git a/Pre-Facet-Calibrator-RawSingle.parset b/Pre-Facet-Calibrator-RawSingle.parset index 6747da39..c31b92a8 100644 --- a/Pre-Facet-Calibrator-RawSingle.parset +++ b/Pre-Facet-Calibrator-RawSingle.parset @@ -2,6 +2,7 @@ # # Calibrator part of the Pre-Facet calibration pipeline for raw data: # - Works on raw data, that has not been processed with NDPPP before. +# - requires LOFAR software version >= 2.17 # - Expects shared filesystem, that all nodes can reach all files! # (E.g. a single workstation or compute cluster with shared filesystem # doesn't work on multiple nodes on CEP-2 or CEP3.) @@ -20,7 +21,7 @@ # where to put the files with the calibration values that are to be transferred to the target ! cal_values_directory = /media/scratch/test/username/WhereYouWantToStoreTheValuesFromTheCalibrator/ -# NDPPP-compatible pattern for baselines or stations to be flagged +# NDPPP-compatible pattern for baselines or stations to be flagged (may be an empty list, i.e.: [] ) ! flag_baselines = [ CS013HBA* ] # name of the station that will be used as a reference for the phase-plots ! reference_station = CS001HBA0 @@ -34,7 +35,7 @@ ! plotsols_script = /homea/htb00/htb001/prefactor/scripts/examine_npys.py ! fit_XYoffset_script = /homea/htb00/htb001/prefactor/scripts/find_cal_global_phaseoffset_losoto.py ! plotphases_script = /homea/htb00/htb001/prefactor/scripts/plot_solutions_all_stations.py -! losoto_executable = /opt/cep/losoto/current/bin/losoto +! losoto_executable = /homea/htb00/htb003/local_jureca/bin/losoto # number of processes to use per step per node ! num_proc_per_node = 24 diff --git a/Pre-Facet-Calibrator.parset b/Pre-Facet-Calibrator.parset index bdb02892..aea86585 100644 --- a/Pre-Facet-Calibrator.parset +++ b/Pre-Facet-Calibrator.parset @@ -1,12 +1,7 @@ # Pre-Facet Calibrator Calibration Pipeline # # Calibrator part of the basic Pre-Facet calibration pipeline: -# - the new "error_tolerance" option requires LOFAR software version >= 2.15 -# (Comment out all lines with "error_tolerance" if you want to use an older version.) -# - using LoSoTo from the pipeline requires latest executable_args node-script -# (available in the current LOFAR trunk, revision 33969 and later) -# (The diff, to do the patching by hand, can be found in the 6th comment at: -# https://github.com/lofar-astron/prefactor/issues/4 ) +# - requires LOFAR software version >= 2.17 # - expects shared filesystem, that all nodes can reach all files! # (E.g. a single workstation or compute cluster with shared filesystem # doesn't work on multiple nodes on CEP-2 or CEP3.) @@ -18,14 +13,15 @@ # where to find the calibrator data ! cal_input_path = /data/scratch/username/PathToYourCalibratorData/ ! cal_input_pattern = L*.MS -# Path to the skymodels for the Calibrator -! calibrator_path_skymodel = /cep3home/username/Pre-Facet-Cal/skymodels/ +# Either: the path to the skymodel for the calibrator (ASCII-file) +# or: the path to a directory with the skymodels for the different calibrators +! calibrator_path_skymodel = /cep3home/username/Pre-Facet-Cal/skymodels/ # where to put the inspection plots generated by the pipeline -! inspection_directory = /media/scratch/test/username/WhereYouWantInspectionPlotsEtc/ +! inspection_directory = /media/scratch/test/username/WhereYouWantInspectionPlotsEtc/ # where to put the files with the calibration values that are to be transferred to the target -! cal_values_directory = /media/scratch/test/username/WhereYouWantToStoreTheValuesFromTheCalibrator/ +! cal_values_directory = /media/scratch/test/username/WhereYouWantToStoreTheValuesFromTheCalibrator/ -# NDPPP-compatible pattern for baselines or stations to be flagged +# NDPPP-compatible pattern for baselines or stations to be flagged (may be an empty list, i.e.: [] ) ! flag_baselines = [ CS013HBA* ] # name of the station that will be used as a reference for the phase-plots ! reference_station = CS001HBA0 @@ -40,7 +36,7 @@ ! plotsols_script = /homea/htb00/htb001/prefactor/scripts/examine_npys.py ! fit_XYoffset_script = /homea/htb00/htb001/prefactor/scripts/find_cal_global_phaseoffset_losoto.py ! plotphases_script = /homea/htb00/htb001/prefactor/scripts/plot_solutions_all_stations.py -! losoto_executable = /opt/cep/losoto/current/bin/losoto +! losoto_executable = /homea/htb00/htb003/local_jureca/bin/losoto # number of processes to use per step per node ! num_proc_per_node = 24 From ce8e723e86e315f04620e55f6fe5652b69b6c9ec Mon Sep 17 00:00:00 2001 From: Andreas Horneffer Date: Thu, 22 Sep 2016 11:07:28 +0200 Subject: [PATCH 10/19] Switched off generation of the pdfs but copy the txt file #84 --- Pre-Facet-Calibrator-RawCombine.parset | 34 ++++++++++++------------ Pre-Facet-Calibrator-RawSingle.parset | 36 +++++++++++++------------- Pre-Facet-Calibrator.parset | 34 ++++++++++++------------ scripts/amplitudes_losoto_3.py | 2 +- scripts/sort_times_into_freqGroups.py | 21 +++++++++------ 5 files changed, 66 insertions(+), 61 deletions(-) diff --git a/Pre-Facet-Calibrator-RawCombine.parset b/Pre-Facet-Calibrator-RawCombine.parset index 03a2cacb..f075ba21 100644 --- a/Pre-Facet-Calibrator-RawCombine.parset +++ b/Pre-Facet-Calibrator-RawCombine.parset @@ -54,7 +54,7 @@ ### Stuff that you probably don't need to modify # which steps to run -pipeline.steps=[createmap_cal, combine_cal_map, sortmap_cal, do_sortmap_cal_maps, ndppp_prep_cal, calib_cal, h5_imp_cal_map, h5imp_cal, mk_inspect_dir, copy_h5imp_cal, fitclock, ampl, plots, phase, plot_cal_phases, createmap_cal_pngs, copy_cal_pngs, createmap_cal_pdfs, copy_cal_pdfs, mk_cal_values_dir, createmap_cal_npys, copy_cal_npys] +pipeline.steps=[createmap_cal, combine_cal_map, sortmap_cal, do_sortmap_cal_maps, ndppp_prep_cal, calib_cal, h5_imp_cal_map, h5imp_cal, mk_inspect_dir, copy_h5imp_cal, fitclock, ampl, plots, phase, plot_cal_phases, createmap_cal_pngs, copy_cal_pngs, createmap_cal_txts, copy_cal_txts, mk_cal_values_dir, createmap_cal_npys, copy_cal_npys] # generate a mapfile of all the calibrator data createmap_cal.control.kind = plugin @@ -247,23 +247,23 @@ copy_cal_pngs.control.mapfile_in = createmap_cal_pngs.output.mapfile copy_cal_pngs.control.inputkey = source copy_cal_pngs.control.arguments = [source,{{ inspection_directory }}] -# generate a mapfile of all the diagnostic pdfs -createmap_cal_pdfs.control.kind = plugin -createmap_cal_pdfs.control.type = createMapfile -createmap_cal_pdfs.control.method = mapfile_from_folder -createmap_cal_pdfs.control.mapfile_dir = input.output.mapfile_dir -createmap_cal_pdfs.control.filename = diagnostic_pdfs.mapfile -createmap_cal_pdfs.control.folder = input.output.working_directory/input.output.job_name -createmap_cal_pdfs.control.pattern = *.pdf +# generate a mapfile of all the diagnostic txts +createmap_cal_txts.control.kind = plugin +createmap_cal_txts.control.type = createMapfile +createmap_cal_txts.control.method = mapfile_from_folder +createmap_cal_txts.control.mapfile_dir = input.output.mapfile_dir +createmap_cal_txts.control.filename = diagnostic_txts.mapfile +createmap_cal_txts.control.folder = input.output.working_directory/input.output.job_name +createmap_cal_txts.control.pattern = *.txt -# copy the diagnostic pdfs to the inspection directory -copy_cal_pdfs.control.kind = recipe -copy_cal_pdfs.control.type = executable_args -copy_cal_pdfs.control.executable = /bin/cp -copy_cal_pdfs.control.max_per_node = {{ num_proc_per_node_limit }} -copy_cal_pdfs.control.mapfile_in = createmap_cal_pdfs.output.mapfile -copy_cal_pdfs.control.inputkey = source -copy_cal_pdfs.control.arguments = [source,{{ inspection_directory }}] +# copy the diagnostic txts to the inspection directory +copy_cal_txts.control.kind = recipe +copy_cal_txts.control.type = executable_args +copy_cal_txts.control.executable = /bin/cp +copy_cal_txts.control.max_per_node = {{ num_proc_per_node_limit }} +copy_cal_txts.control.mapfile_in = createmap_cal_txts.output.mapfile +copy_cal_txts.control.inputkey = source +copy_cal_txts.control.arguments = [source,{{ inspection_directory }}] # create the cal_values_directory if needed mk_cal_values_dir.control.kind = plugin diff --git a/Pre-Facet-Calibrator-RawSingle.parset b/Pre-Facet-Calibrator-RawSingle.parset index c31b92a8..83ed4060 100644 --- a/Pre-Facet-Calibrator-RawSingle.parset +++ b/Pre-Facet-Calibrator-RawSingle.parset @@ -49,7 +49,7 @@ ### Stuff that you probably don't need to modify # which steps to run -pipeline.steps=[createmap_cal, ndppp_prep_cal, calib_cal, h5_imp_cal_map, h5imp_cal, mk_inspect_dir, copy_h5imp_cal, fitclock, ampl, plots, phase, plot_cal_phases, createmap_cal_pngs, copy_cal_pngs, createmap_cal_pdfs, copy_cal_pdfs, mk_cal_values_dir, createmap_cal_npys, copy_cal_npys] +pipeline.steps=[createmap_cal, ndppp_prep_cal, calib_cal, h5_imp_cal_map, h5imp_cal, mk_inspect_dir, copy_h5imp_cal, fitclock, ampl, plots, phase, plot_cal_phases, createmap_cal_pngs, copy_cal_pngs, createmap_cal_txts, copy_cal_txts, mk_cal_values_dir, createmap_cal_npys, copy_cal_npys] # generate a mapfile of all the calibrator data createmap_cal.control.kind = plugin @@ -209,23 +209,23 @@ copy_cal_pngs.control.mapfile_in = createmap_cal_pngs.output.mapfile copy_cal_pngs.control.inputkey = source copy_cal_pngs.control.arguments = [source,{{ inspection_directory }}] -# generate a mapfile of all the diagnostic pdfs -createmap_cal_pdfs.control.kind = plugin -createmap_cal_pdfs.control.type = createMapfile -createmap_cal_pdfs.control.method = mapfile_from_folder -createmap_cal_pdfs.control.mapfile_dir = input.output.mapfile_dir -createmap_cal_pdfs.control.filename = diagnostic_pdfs.mapfile -createmap_cal_pdfs.control.folder = input.output.working_directory/input.output.job_name -createmap_cal_pdfs.control.pattern = *.pdf - -# copy the diagnostic pdfs to the inspection directory -copy_cal_pdfs.control.kind = recipe -copy_cal_pdfs.control.type = executable_args -copy_cal_pdfs.control.executable = /bin/cp -copy_cal_pdfs.control.max_per_node = {{ num_proc_per_node_limit }} -copy_cal_pdfs.control.mapfile_in = createmap_cal_pdfs.output.mapfile -copy_cal_pdfs.control.inputkey = source -copy_cal_pdfs.control.arguments = [source,{{ inspection_directory }}] +# generate a mapfile of all the diagnostic txts +createmap_cal_txts.control.kind = plugin +createmap_cal_txts.control.type = createMapfile +createmap_cal_txts.control.method = mapfile_from_folder +createmap_cal_txts.control.mapfile_dir = input.output.mapfile_dir +createmap_cal_txts.control.filename = diagnostic_txts.mapfile +createmap_cal_txts.control.folder = input.output.working_directory/input.output.job_name +createmap_cal_txts.control.pattern = *.txt + +# copy the diagnostic txts to the inspection directory +copy_cal_txts.control.kind = recipe +copy_cal_txts.control.type = executable_args +copy_cal_txts.control.executable = /bin/cp +copy_cal_txts.control.max_per_node = {{ num_proc_per_node_limit }} +copy_cal_txts.control.mapfile_in = createmap_cal_txts.output.mapfile +copy_cal_txts.control.inputkey = source +copy_cal_txts.control.arguments = [source,{{ inspection_directory }}] # create the cal_values_directory if needed mk_cal_values_dir.control.kind = plugin diff --git a/Pre-Facet-Calibrator.parset b/Pre-Facet-Calibrator.parset index aea86585..020e8e34 100644 --- a/Pre-Facet-Calibrator.parset +++ b/Pre-Facet-Calibrator.parset @@ -50,7 +50,7 @@ ### Stuff that you probably don't need to modify # which steps to run -pipeline.steps=[createmap_cal, ndppp_prep_cal, combine_data_cal_map, sky_cal, sky_cal_path, calib_cal, h5_imp_cal_map, h5imp_cal, mk_inspect_dir, copy_h5imp_cal, fitclock, ampl, plots, phase, plot_cal_phases, createmap_cal_pngs, copy_cal_pngs, createmap_cal_pdfs, copy_cal_pdfs, mk_cal_values_dir, createmap_cal_npys, copy_cal_npys] +pipeline.steps=[createmap_cal, ndppp_prep_cal, combine_data_cal_map, sky_cal, sky_cal_path, calib_cal, h5_imp_cal_map, h5imp_cal, mk_inspect_dir, copy_h5imp_cal, fitclock, ampl, plots, phase, plot_cal_phases, createmap_cal_pngs, copy_cal_pngs, createmap_cal_txts, copy_cal_txts, mk_cal_values_dir, createmap_cal_npys, copy_cal_npys] # generate a mapfile of all the calibrator data createmap_cal.control.kind = plugin @@ -221,23 +221,23 @@ copy_cal_pngs.control.mapfile_in = createmap_cal_pngs.output.mapfile copy_cal_pngs.control.inputkey = source copy_cal_pngs.control.arguments = [source,{{ inspection_directory }}] -# generate a mapfile of all the diagnostic pdfs -createmap_cal_pdfs.control.kind = plugin -createmap_cal_pdfs.control.type = createMapfile -createmap_cal_pdfs.control.method = mapfile_from_folder -createmap_cal_pdfs.control.mapfile_dir = input.output.mapfile_dir -createmap_cal_pdfs.control.filename = diagnostic_pdfs.mapfile -createmap_cal_pdfs.control.folder = input.output.working_directory/input.output.job_name -createmap_cal_pdfs.control.pattern = *.pdf +# generate a mapfile of all the diagnostic txts +createmap_cal_txts.control.kind = plugin +createmap_cal_txts.control.type = createMapfile +createmap_cal_txts.control.method = mapfile_from_folder +createmap_cal_txts.control.mapfile_dir = input.output.mapfile_dir +createmap_cal_txts.control.filename = diagnostic_txts.mapfile +createmap_cal_txts.control.folder = input.output.working_directory/input.output.job_name +createmap_cal_txts.control.pattern = *.txt -# copy the diagnostic pdfs to the inspection directory -copy_cal_pdfs.control.kind = recipe -copy_cal_pdfs.control.type = executable_args -copy_cal_pdfs.control.executable = /bin/cp -copy_cal_pdfs.control.max_per_node = {{ num_proc_per_node_limit }} -copy_cal_pdfs.control.mapfile_in = createmap_cal_pdfs.output.mapfile -copy_cal_pdfs.control.inputkey = source -copy_cal_pdfs.control.arguments = [source,{{ inspection_directory }}] +# copy the diagnostic txts to the inspection directory +copy_cal_txts.control.kind = recipe +copy_cal_txts.control.type = executable_args +copy_cal_txts.control.executable = /bin/cp +copy_cal_txts.control.max_per_node = {{ num_proc_per_node_limit }} +copy_cal_txts.control.mapfile_in = createmap_cal_txts.output.mapfile +copy_cal_txts.control.inputkey = source +copy_cal_txts.control.arguments = [source,{{ inspection_directory }}] # create the cal_values_directory if needed mk_cal_values_dir.control.kind = plugin diff --git a/scripts/amplitudes_losoto_3.py b/scripts/amplitudes_losoto_3.py index f631c466..3ae32dc5 100755 --- a/scripts/amplitudes_losoto_3.py +++ b/scripts/amplitudes_losoto_3.py @@ -49,7 +49,7 @@ make_matrixplot = True source_id = 0 -show_plot = True +show_plot = False print "bad SBs:",bad_sblist diff --git a/scripts/sort_times_into_freqGroups.py b/scripts/sort_times_into_freqGroups.py index 2634cae1..ea5e7715 100755 --- a/scripts/sort_times_into_freqGroups.py +++ b/scripts/sort_times_into_freqGroups.py @@ -156,18 +156,16 @@ def main(ms_input, filename=None, mapfile_dir=None, numSB=-1, hosts=None, NDPPPf sw = pt.table(ms+'::SPECTRAL_WINDOW', ack=False) freq = sw.col('REF_FREQUENCY')[0] if first: - freq_width = sw.col('TOTAL_BANDWIDTH')[0] + file_bandwidth = sw.col('TOTAL_BANDWIDTH')[0] nchans = sw.col('CHAN_WIDTH')[0].shape[0] chwidth = sw.col('CHAN_WIDTH')[0][0] - maxfreq = freq - minfreq = freq + freqset = set([freq]) first = False else: - assert freq_width == sw.col('TOTAL_BANDWIDTH')[0] + assert file_bandwidth == sw.col('TOTAL_BANDWIDTH')[0] assert nchans == sw.col('CHAN_WIDTH')[0].shape[0] assert chwidth == sw.col('CHAN_WIDTH')[0][0] - maxfreq = max(maxfreq,freq) - minfreq = min(minfreq,freq) + freqset.add(freq) freqs.append(freq) sw.close() time_groups[time]['freq_names'] = zip(freqs,time_groups[time]['files']) @@ -176,11 +174,18 @@ def main(ms_input, filename=None, mapfile_dir=None, numSB=-1, hosts=None, NDPPPf #time_groups[time]['freqs'] = [freq for (freq,name) in freq_names] print "sort_times_into_freqGroups: Collected the frequencies for the time-groups" + freqliste = np.array(list(freqset)) + freqliste.sort() + freq_width = np.min(freqliste[1:]-freqliste[:-1]) + if file_bandwidth > freq_width: + raise ValueError("Bandwidth of files is larger than minimum frequency step between two files!") + if file_bandwidth < (freq_width/2.): + raise ValueError("Bandwidth of files is smaller than half the minimum frequency step between two files! (More than half the data is missing.)") #the new output map filemap = MultiDataMap() groupmap = DataMap() - maxfreq = maxfreq+freq_width/2. - minfreq = minfreq-freq_width/2. + maxfreq = np.max(freqliste)+freq_width/2. + minfreq = np.min(freqliste)-freq_width/2. numFiles = round((maxfreq-minfreq)/freq_width) numSB = int(numSB) if numSB > 0: From 47c0e87970e679b145c71c8108b21a8ae6dd08d6 Mon Sep 17 00:00:00 2001 From: Andreas Horneffer Date: Wed, 28 Sep 2016 13:48:31 +0200 Subject: [PATCH 11/19] Compute RM only every 5 min. (assuming I got the syntax right...) --- scripts/transfer_gains_RMextract.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/transfer_gains_RMextract.py b/scripts/transfer_gains_RMextract.py index b867dc17..5c6ad459 100755 --- a/scripts/transfer_gains_RMextract.py +++ b/scripts/transfer_gains_RMextract.py @@ -78,7 +78,7 @@ def get_COMMONROTATION_vals(MSinfo, server, prefix, ionexPath): path where we can find or store the IONEX files """ from RMextract import getRM - rmdict = getRM.getRM(MSinfo.msname,server=server,prefix=prefix,ionexPath=ionexPath) + rmdict = getRM.getRM(MSinfo.msname,server=server,prefix=prefix,ionexPath=ionexPath,timestep=300.) return rmdict From 63610306b015d4efb2ee07dd2e5b9f031c5c8bb2 Mon Sep 17 00:00:00 2001 From: Andreas Horneffer Date: Thu, 29 Sep 2016 10:26:06 +0200 Subject: [PATCH 12/19] One solution per 4 channels (1 SB) not per 10 SBs --- Pre-Facet-Target-RawCombine.parset | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Pre-Facet-Target-RawCombine.parset b/Pre-Facet-Target-RawCombine.parset index 1460ec12..3e16fa6f 100644 --- a/Pre-Facet-Target-RawCombine.parset +++ b/Pre-Facet-Target-RawCombine.parset @@ -339,7 +339,7 @@ gsmcal_solve.filter.blrange = [150, 999999] gsmcal_solve.argument.gaincal.type = gaincal gsmcal_solve.argument.gaincal.maxiter = 500 gsmcal_solve.argument.gaincal.caltype = phaseonly -gsmcal_solve.argument.gaincal.nchan = 0 +gsmcal_solve.argument.gaincal.nchan = 4 gsmcal_solve.argument.gaincal.solint = 1 gsmcal_solve.argument.gaincal.sourcedb = expand_sourcedb_target.output.mapfile gsmcal_solve.argument.gaincal.parmdb = gsmcal_parmmap.output.mapfile From 160365a1ce5b678cf8a4d9437643e80ea77a91c1 Mon Sep 17 00:00:00 2001 From: Andreas Horneffer Date: Fri, 7 Oct 2016 10:57:52 +0200 Subject: [PATCH 13/19] Solfing on 2 MHz block is what we want after all. --- Pre-Facet-Target-RawCombine.parset | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Pre-Facet-Target-RawCombine.parset b/Pre-Facet-Target-RawCombine.parset index 3e16fa6f..1460ec12 100644 --- a/Pre-Facet-Target-RawCombine.parset +++ b/Pre-Facet-Target-RawCombine.parset @@ -339,7 +339,7 @@ gsmcal_solve.filter.blrange = [150, 999999] gsmcal_solve.argument.gaincal.type = gaincal gsmcal_solve.argument.gaincal.maxiter = 500 gsmcal_solve.argument.gaincal.caltype = phaseonly -gsmcal_solve.argument.gaincal.nchan = 4 +gsmcal_solve.argument.gaincal.nchan = 0 gsmcal_solve.argument.gaincal.solint = 1 gsmcal_solve.argument.gaincal.sourcedb = expand_sourcedb_target.output.mapfile gsmcal_solve.argument.gaincal.parmdb = gsmcal_parmmap.output.mapfile From 373faa2a2c21d866d3fc1e9acc2da869d7a9944b Mon Sep 17 00:00:00 2001 From: Wendy Williams Date: Fri, 7 Oct 2016 10:31:24 +0100 Subject: [PATCH 14/19] only compute nwavelengths for first band nwavelengths uses self.timestep_sec which is set in get_averaging_steps(), but I think this only needs to be calculated from one band. --- scripts/InitSubtract_deep_sort_and_compute.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/InitSubtract_deep_sort_and_compute.py b/scripts/InitSubtract_deep_sort_and_compute.py index 821cf9d8..efa8e070 100755 --- a/scripts/InitSubtract_deep_sort_and_compute.py +++ b/scripts/InitSubtract_deep_sort_and_compute.py @@ -292,6 +292,7 @@ def main(ms_input, outmapname=None, mapfile_dir=None, cellsize_highres_deg=0.002 nchansout_clean1 = np.int(nbands) (freqstep, timestep) = bands[0].get_averaging_steps() + (nwavelengths_high, nwavelengths_low) = bands[0].nwavelengths(cellsize_highres_deg, cellsize_lowres_deg, timestep) for band in bands: print "InitSubtract_sort_and_compute.py: Working on Band:",band.name group_map.append(MultiDataProduct('localhost', band.files, False)) @@ -310,7 +311,6 @@ def main(ms_input, outmapname=None, mapfile_dir=None, cellsize_highres_deg=0.002 imsize_low_pad = band.get_optimum_size(int(imsize_low_res*image_padding)) imsize_low_pad_stretch = band.get_optimum_size(int(imsize_low_res*image_padding*y_axis_stretch)) low_paddedsize_map.append(DataProduct('localhost', str(imsize_low_pad)+" "+str(imsize_low_pad_stretch), False)) - (nwavelengths_high, nwavelengths_low) = band.nwavelengths(cellsize_highres_deg, cellsize_lowres_deg, timestep) print band.freq/1e6, imsize_high_res, imsize_high_res_stretch, imsize_high_pad, imsize_high_pad_stretch, imsize_low_res, imsize_low_res_stretch, imsize_low_pad, imsize_low_pad_stretch, nwavelengths_high, nwavelengths_low From 5d8ce1b83e8a2d5f05cdf7c61e67f580e0a71564 Mon Sep 17 00:00:00 2001 From: Andreas Horneffer Date: Tue, 11 Oct 2016 16:58:17 +0200 Subject: [PATCH 15/19] added explanation to pipeline variables for inspection_directory and local_scratch_dir --- Initial-Subtract-Deep.parset | 2 ++ Initial-Subtract.parset | 3 +++ 2 files changed, 5 insertions(+) diff --git a/Initial-Subtract-Deep.parset b/Initial-Subtract-Deep.parset index 9c206dc3..0c8e0258 100644 --- a/Initial-Subtract-Deep.parset +++ b/Initial-Subtract-Deep.parset @@ -39,7 +39,9 @@ # name of the direction independent parmDBs inside the measurement sets # needs to include the leading "/" to indicate that the parmDB is a subdirectory of the MS ! direction_indep_parmDBs = /instrument_directionindependent +# where to put the inspection plots generated by the pipeline ! inspection_directory = /media/scratch/test/username/WhereYouWantImagesAndInspectionPlots/ +# scratch directory for wsclean (can be local to the processing nodes!) ! local_scratch_dir = /local/username ##### imaging parameters (Feel free to leave them untouched.) diff --git a/Initial-Subtract.parset b/Initial-Subtract.parset index df1b7ca8..46f16b0b 100644 --- a/Initial-Subtract.parset +++ b/Initial-Subtract.parset @@ -34,7 +34,10 @@ # name of the direction independent parmDBs inside the measurement sets # needs to include the leading "/" to indicate that the parmDB is a subdirectory of the MS ! direction_indep_parmDBs = /instrument_directionindependent +# where to put the inspection plots generated by the pipeline ! inspection_directory = /media/scratch/test/username/WhereYouWantImagesAndInspectionPlots/ +# scratch directory for wsclean (can be local to the processing nodes!) +! local_scratch_dir = /local/username ##### imaging parameters (Feel free to leave them untouched.) #### specify the image parameters here From 914b1069dd2438fb0d0e52de97ebc60c5d365551 Mon Sep 17 00:00:00 2001 From: Andreas Horneffer Date: Tue, 11 Oct 2016 17:54:09 +0200 Subject: [PATCH 16/19] Also wsclean_high2 does not need to update the model column --- Initial-Subtract.parset | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Initial-Subtract.parset b/Initial-Subtract.parset index 46f16b0b..6bcb99f2 100644 --- a/Initial-Subtract.parset +++ b/Initial-Subtract.parset @@ -200,7 +200,7 @@ wsclean_high2.control.max_per_node = {{ max_imagers_per_node }} wsclean_high2.control.error_tolerance = {{ error_tolerance }} wsclean_high2.control.mapfiles_in = [do_magic_maps.output.groupmap,mask_high.output.mapfile,do_magic_maps.output.high_padsize_map,do_magic_maps.output.high_size_map,mask_high.output.threshold_5sig.mapfile] wsclean_high2.control.inputkeys = [msfile,fitsmask,paddedsize,outputsize,threshold] -wsclean_high2.argument.flags = [-update-model-required,-reorder,-fitbeam,msfile] +wsclean_high2.argument.flags = [-no-update-model-required,-reorder,-fitbeam,msfile] wsclean_high2.argument.fitsmask = fitsmask wsclean_high2.argument.size = paddedsize wsclean_high2.argument.trim = outputsize From 4f92df8eb1f1e9a05dad762dd3491c053f192a32 Mon Sep 17 00:00:00 2001 From: Andreas Horneffer Date: Fri, 14 Oct 2016 12:19:15 +0200 Subject: [PATCH 17/19] fixed calling of sub-pipelines --- Pre-Facet-Cal.parset | 118 +++++++++++++++++++++---------------------- 1 file changed, 59 insertions(+), 59 deletions(-) diff --git a/Pre-Facet-Cal.parset b/Pre-Facet-Cal.parset index f40490a4..5bd063e2 100644 --- a/Pre-Facet-Cal.parset +++ b/Pre-Facet-Cal.parset @@ -96,64 +96,64 @@ pipeline.steps = [calibrator_pipeline, target_pipeline] # run the calibrator pipeline -calibrator_pipeline.control.kind = pipeline -calibrator_pipeline.control.type = Pre-Facet-Calibrator.parset -calibrator_pipeline.control.avg_timestep = {{ avg_timestep }} -calibrator_pipeline.control.avg_freqstep = {{ avg_freqstep }} -calibrator_pipeline.control.cal_input_path = {{ cal_input_path }} -calibrator_pipeline.control.cal_input_pattern = {{ cal_input_pattern }} -calibrator_pipeline.control.calibrator_path_skymodel = {{ calibrator_path_skymodel }} -calibrator_pipeline.control.inspection_directory = {{ inspection_directory }} -calibrator_pipeline.control.cal_values_directory = {{ cal_values_directory }} -calibrator_pipeline.control.flag_baselines = {{ flag_baselines }} -calibrator_pipeline.control.reference_station = {{ reference_station }} -calibrator_pipeline.control.calib_cal_parset = {{ calib_cal_parset }} -calibrator_pipeline.control.find_skymodel_cal_auto = {{ find_skymodel_cal_auto }} -calibrator_pipeline.control.losoto_importer = {{ losoto_importer }} -calibrator_pipeline.control.fitclock_script = {{ fitclock_script }} -calibrator_pipeline.control.fitamps_script = {{ fitamps_script }} -calibrator_pipeline.control.plotsols_script = {{ plotsols_script }} -calibrator_pipeline.control.fit_XYoffset_script = {{ fit_XYoffset_script }} -calibrator_pipeline.control.plotphases_script = {{ plotphases_script }} -calibrator_pipeline.control.losoto_executable = {{ losoto_executable }} -calibrator_pipeline.control.num_proc_per_node = {{ num_proc_per_node }} -calibrator_pipeline.control.num_proc_per_node_limit = {{ num_proc_per_node_limit }} -calibrator_pipeline.control.max_dppp_threads = {{ max_dppp_threads }} -calibrator_pipeline.control.error_tolerance = {{ error_tolerance }} +calibrator_pipeline.control.kind = pipeline +calibrator_pipeline.control.type = Pre-Facet-Calibrator.parset +calibrator_pipeline.argument.avg_timestep = {{ avg_timestep }} +calibrator_pipeline.argument.avg_freqstep = {{ avg_freqstep }} +calibrator_pipeline.argument.cal_input_path = {{ cal_input_path }} +calibrator_pipeline.argument.cal_input_pattern = {{ cal_input_pattern }} +calibrator_pipeline.argument.calibrator_path_skymodel = {{ calibrator_path_skymodel }} +calibrator_pipeline.argument.inspection_directory = {{ inspection_directory }} +calibrator_pipeline.argument.cal_values_directory = {{ cal_values_directory }} +calibrator_pipeline.argument.flag_baselines = {{ flag_baselines }} +calibrator_pipeline.argument.reference_station = {{ reference_station }} +calibrator_pipeline.argument.calib_cal_parset = {{ calib_cal_parset }} +calibrator_pipeline.argument.find_skymodel_cal_auto = {{ find_skymodel_cal_auto }} +calibrator_pipeline.argument.losoto_importer = {{ losoto_importer }} +calibrator_pipeline.argument.fitclock_script = {{ fitclock_script }} +calibrator_pipeline.argument.fitamps_script = {{ fitamps_script }} +calibrator_pipeline.argument.plotsols_script = {{ plotsols_script }} +calibrator_pipeline.argument.fit_XYoffset_script = {{ fit_XYoffset_script }} +calibrator_pipeline.argument.plotphases_script = {{ plotphases_script }} +calibrator_pipeline.argument.losoto_executable = {{ losoto_executable }} +calibrator_pipeline.argument.num_proc_per_node = {{ num_proc_per_node }} +calibrator_pipeline.argument.num_proc_per_node_limit = {{ num_proc_per_node_limit }} +calibrator_pipeline.argument.max_dppp_threads = {{ max_dppp_threads }} +calibrator_pipeline.argument.error_tolerance = {{ error_tolerance }} # run the target pipeline -target_pipeline.control.kind = pipeline -target_pipeline.control.type = Pre-Facet-Target.parset -target_pipeline.control.avg_timestep = {{ avg_timestep }} -target_pipeline.control.avg_freqstep = {{ avg_freqstep }} -target_pipeline.control.target_input_path = {{ target_input_path }} -target_pipeline.control.target_input_pattern = {{ target_input_pattern }} -target_pipeline.control.target_skymodel = {{ target_skymodel }} -target_pipeline.control.get_tgss_skymodel_script = {{ get_tgss_skymodel_script }} -target_pipeline.control.num_SBs_per_group = {{ num_SBs_per_group }} -target_pipeline.control.inspection_directory = {{ inspection_directory }} -target_pipeline.control.cal_values_directory = {{ cal_values_directory }} -target_pipeline.control.results_directory = {{ results_directory }} -target_pipeline.control.ionex_server = {{ ionex_server }} -target_pipeline.control.ionex_prefix = {{ ionex_prefix }} -target_pipeline.control.ionex_path = {{ ionex_path }} -target_pipeline.control.flag_baselines = {{ flag_baselines }} -target_pipeline.control.min_unflagged_fraction = {{ min_unflagged_fraction }} -target_pipeline.control.reference_station = {{ reference_station }} -target_pipeline.control.ATeam_predict_parset = {{ ATeam_predict_parset }} -target_pipeline.control.gsm_cal_parset = {{ gsm_cal_parset }} -target_pipeline.control.ATeam_skymodel = {{ ATeam_skymodel }} -target_pipeline.control.losoto_importer = {{ losoto_importer }} -target_pipeline.control.transfer_script = {{ transfer_script }} -target_pipeline.control.ATeam_Clipper = {{ ATeam_Clipper }} -target_pipeline.control.use_tgss_target = {{ use_tgss_target }} -target_pipeline.control.sortmap_script = {{ sortmap_script }} -target_pipeline.control.check_flagged_script = {{ check_flagged_script }} -target_pipeline.control.structurefunction_script = {{ structurefunction_script }} -target_pipeline.control.plotphases_script = {{ plotphases_script }} -target_pipeline.control.losoto_executable = {{ losoto_executable }} -target_pipeline.control.flagging_strategy = {{ flagging_strategy }} -target_pipeline.control.num_proc_per_node = {{ num_proc_per_node }} -target_pipeline.control.num_proc_per_node_limit = {{ num_proc_per_node_limit }} -target_pipeline.control.max_dppp_threads = {{ max_dppp_threads }} -target_pipeline.control.error_tolerance = {{ error_tolerance }} +target_pipeline.control.kind = pipeline +target_pipeline.control.type = Pre-Facet-Target.parset +target_pipeline.argument.avg_timestep = {{ avg_timestep }} +target_pipeline.argument.avg_freqstep = {{ avg_freqstep }} +target_pipeline.argument.target_input_path = {{ target_input_path }} +target_pipeline.argument.target_input_pattern = {{ target_input_pattern }} +target_pipeline.argument.target_skymodel = {{ target_skymodel }} +target_pipeline.argument.get_tgss_skymodel_script = {{ get_tgss_skymodel_script }} +target_pipeline.argument.num_SBs_per_group = {{ num_SBs_per_group }} +target_pipeline.argument.inspection_directory = {{ inspection_directory }} +target_pipeline.argument.cal_values_directory = {{ cal_values_directory }} +target_pipeline.argument.results_directory = {{ results_directory }} +target_pipeline.argument.ionex_server = {{ ionex_server }} +target_pipeline.argument.ionex_prefix = {{ ionex_prefix }} +target_pipeline.argument.ionex_path = {{ ionex_path }} +target_pipeline.argument.flag_baselines = {{ flag_baselines }} +target_pipeline.argument.min_unflagged_fraction = {{ min_unflagged_fraction }} +target_pipeline.argument.reference_station = {{ reference_station }} +target_pipeline.argument.ATeam_predict_parset = {{ ATeam_predict_parset }} +target_pipeline.argument.gsm_cal_parset = {{ gsm_cal_parset }} +target_pipeline.argument.ATeam_skymodel = {{ ATeam_skymodel }} +target_pipeline.argument.losoto_importer = {{ losoto_importer }} +target_pipeline.argument.transfer_script = {{ transfer_script }} +target_pipeline.argument.ATeam_Clipper = {{ ATeam_Clipper }} +target_pipeline.argument.use_tgss_target = {{ use_tgss_target }} +target_pipeline.argument.sortmap_script = {{ sortmap_script }} +target_pipeline.argument.check_flagged_script = {{ check_flagged_script }} +target_pipeline.argument.structurefunction_script = {{ structurefunction_script }} +target_pipeline.argument.plotphases_script = {{ plotphases_script }} +target_pipeline.argument.losoto_executable = {{ losoto_executable }} +target_pipeline.argument.flagging_strategy = {{ flagging_strategy }} +target_pipeline.argument.num_proc_per_node = {{ num_proc_per_node }} +target_pipeline.argument.num_proc_per_node_limit = {{ num_proc_per_node_limit }} +target_pipeline.argument.max_dppp_threads = {{ max_dppp_threads }} +target_pipeline.argument.error_tolerance = {{ error_tolerance }} From e58edc5aa54fcba5c7fad496f4b334735b1bc6d0 Mon Sep 17 00:00:00 2001 From: Andreas Horneffer Date: Mon, 17 Oct 2016 14:17:38 +0200 Subject: [PATCH 18/19] Removed outdated files --- Pre-Facet-Cal-RawData-PreAvg.parset | 410 ---------------------------- Pre-Facet-Cal-RawData-Single.parset | 378 ------------------------- Pre-Facet-Cal.parset | 4 - parsets/applyparmdb.parset | 14 - parsets/ateamclip.parset | 12 - parsets/gsmcal.parset | 37 --- 6 files changed, 855 deletions(-) delete mode 100644 Pre-Facet-Cal-RawData-PreAvg.parset delete mode 100644 Pre-Facet-Cal-RawData-Single.parset delete mode 100644 parsets/applyparmdb.parset delete mode 100644 parsets/ateamclip.parset delete mode 100644 parsets/gsmcal.parset diff --git a/Pre-Facet-Cal-RawData-PreAvg.parset b/Pre-Facet-Cal-RawData-PreAvg.parset deleted file mode 100644 index 678c2963..00000000 --- a/Pre-Facet-Cal-RawData-PreAvg.parset +++ /dev/null @@ -1,410 +0,0 @@ -# Pre-Facet Calibration Pipeline for raw data -# -# Pre-Facet Calibration Pipeline: -# - first NDPPP run tailored to processing raw data -# - concatenates data in first NDPPP run (to save on number of files on shared filesystems) -# - does "dumb-smart-demixing": all A-team sources that are above a certain elevation get demixed -# - consecutive subband-numbers in the data need to be consecutive subband-frequencies -# - expects shared filesystem, that all nodes can reach all files! -# (E.g. a single workstation or compute cluster with shared filesystem -# doesn't work on multiple nodes on CEP-2 or CEP3.) - - -### First run this and check the generated plots! Modify and re-run if neccessary. -pipeline.steps=[createmap_cal, combine_cal_map, sortmap_cal, do_sortmap_cal_maps, demix-parmdbs_cal, dumb_smart_demixing_cal, ndppp_prep_cal, calib_cal, h5_imp_map, h5imp_cal, fitclock, ampl, plots, phase] -### When calibrator processing is done, you can do the processing of the target data. -### Either by using the same pipeline and running this: -# pipeline.steps=[ createmap_cal, combine_cal_map, sortmap_cal, do_sortmap_cal_maps, demix-parmdbs_cal, dumb_smart_demixing_cal, ndppp_prep_cal, calib_cal, h5_imp_map, h5imp_cal, fitclock, ampl, plots, phase, createmap_target, combine_target_map, sortmap_target, do_sortmap_target_maps, demix-parmdbs_target, dumb_smart_demixing_target, ndppp_prep_target, trans, calibtarget, dpppaverage, gsmcalibtarget, make_results_mapfile, move_results, createmap_plots, copy_plots] -### Or by copying the *.npy files from the calibrator and running this: -# pipeline.steps=[createmap_target, combine_target_map, sortmap_target, do_sortmap_target_maps, demix-parmdbs_target, dumb_smart_demixing_target, ndppp_prep_target, trans, calibtarget, dpppaverage, gsmcalibtarget, gsmcal_parmmap, plot_gsm_phases, make_results_mapfile, move_results, createmap_plots, copy_plots] -# pipeline.steps=[createmap_target, combine_target_map, sortmap_target, do_sortmap_target_maps, demix-parmdbs_target, dumb_smart_demixing_target, ndppp_prep_target, trans, calibtarget, dpppaverage, gsmcalibtarget, make_results_mapfile, move_results] - -# parameters you will need to adjust. -! avg_timestep_pre = 2 # averaging in 1st NDPPP run \ these two together should average -! avg_timestep_2nd = 2 # averaging in 2nd NDPPP run / the data to 4 seconds time resolution -! avg_freqstep_pre = 8 # averaging in 1st NDPPP run \ these two together should average -! avg_freqstep_2nd = 2 # averaging in 2nd NDPPP run / the data to 4 ch/SB frequency resolution -! num_ch_per_SB = 64 # how many channels are there in one SB (A bit redundant, but I cannot do math here!) -! cal_input_path = /work/htb00/htb001/NGC891/Cal-new-raw -! cal_input_pattern = L11551*SB0*MS -! calibrator_skymodel = /homea/htb00/htb001/Pre-Facet-Pipeline/skymodels/3c48-SH.skymodel -! target_input_path = /data/scratch/username/PathToYourTargetData/ -! target_input_pattern = L*.MS -! target_skymodel = /cep3home/username/Pre-Facet-Cal/skymodels/PleaseProvideTarget.skymodel -! num_SBs_per_group = 12 # make concatenated measurement-sets with that many subbands -! results_directory = /work/htb00/htb001/Pre-Facet-Pipeline/Outdata - -# pathes to the scripts etc. -# #### ToDo: get the scripts onto CEP3 and adjust the pathes here! -! calib_cal_parset = /homea/htb00/htb001/Pre-Facet-Pipeline/parsets/calibcal.parset -! cal_transfer_parset = /homea/htb00/htb001/Pre-Facet-Pipeline/parsets/applyparmdb.parset -! ATeam_predict_parset = /homea/htb00/htb001/Pre-Facet-Pipeline/parsets/ateamclip.parset -! ATeam_skymodel = /homea/htb00/htb001/Pre-Facet-Pipeline/skymodels/Ateam_LBA_CC.skymodel -! demixing_sourceDB = /homea/htb00/htb001/MSSS-processing/skymodels/Ateamhighresdemix.sourcedb -! gsm_cal_parset = /homea/htb00/htb001/Pre-Facet-Pipeline/parsets/gsmcal.parset -! losoto_importer = /homea/htb00/htb001/Pre-Facet-Pipeline/bin/losotoImporter.py -! dumb_smart_script = /homea/htb00/htb001/Pre-Facet-Pipeline/bin/gen_demixing_sources.py -! fitclock_script = /homea/htb00/htb001/Pre-Facet-Pipeline/bin/fit_clocktec_initialguess_losoto.py -! fitamps_script = /homea/htb00/htb001/Pre-Facet-Pipeline/bin/amplitudes_losoto_3.py -! plotsols_script = /homea/htb00/htb001/Pre-Facet-Pipeline/bin/examine_npys.py -! fit_XYoffset_script = /homea/htb00/htb001/Pre-Facet-Pipeline/bin/find_cal_global_phaseoffset_losoto.py -! transfer_script = /homea/htb00/htb001/Pre-Facet-Pipeline/bin/transfer_amplitudes+clock+offset_toMS.py -! plotphases_script = /homea/htb00/htb001/Pre-Facet-Pipeline/bin/plot_solutions_all_stations.py -! flagging_strategy = /homea/htb00/htb003/lofar_jureca_2-12/share/rfistrategies/HBAdefault - -# set this to True if you want the pipeline run to continue if single bands fail -! error_tolerance = False - - -# generate a mapfile of all the calibrator data -createmap_cal.control.kind = plugin -createmap_cal.control.type = createMapfile -createmap_cal.control.method = mapfile_from_folder -createmap_cal.control.mapfile_dir = input.output.mapfile_dir -createmap_cal.control.filename = createmap_cal.mapfile -createmap_cal.control.folder = {{ cal_input_path }} -createmap_cal.control.pattern = {{ cal_input_pattern }} - -# combine all entries into one mapfile, for the sortmap script -combine_cal_map.control.kind = plugin -combine_cal_map.control.type = createMapfile -combine_cal_map.control.method = mapfile_all_to_one -combine_cal_map.control.mapfile_dir = input.output.mapfile_dir -combine_cal_map.control.filename = combine_tar_map.mapfile -combine_cal_map.control.mapfile_in = createmap_cal.output.mapfile - -# sort the calibrator data into groups so that NDPPP can concatenate them -sortmap_cal.control.type = pythonplugin -sortmap_cal.control.executable = {{ sortmap_script }} -sortmap_cal.argument.flags = [combine_cal_map.output.mapfile] -sortmap_cal.argument.filename = sortmap_cal -sortmap_cal.argument.mapfile_dir = input.output.mapfile_dir -sortmap_cal.argument.numSB = {{ num_SBs_per_group }} -sortmap_cal.argument.NDPPPfill = True -sortmap_cal.argument.stepname = ndppp_prep_cal -sortmap_cal.argument.truncateLastSBs = True - -# convert the output of sortmap_cal into usable mapfiles -do_sortmap_cal_maps.control.kind = plugin -do_sortmap_cal_maps.control.type = mapfilenamesFromMapfiles -do_sortmap_cal_maps.control.mapfile_groupmap = sortmap_cal.output.groupmapfile.mapfile -do_sortmap_cal_maps.control.mapfile_datamap = sortmap_cal.output.mapfile.mapfile -do_sortmap_cal_maps.control.mapfile_flagmap = sortmap_cal.output.flagmapfile.mapfile - -# I want to run demixing, for that I need a MS with names for the instrument tables -demix-parmdbs_cal.control.kind = plugin -demix-parmdbs_cal.control.type = createMapfile -demix-parmdbs_cal.control.method = add_suffix_to_file -demix-parmdbs_cal.control.mapfile_in = do_sortmap_cal_maps.output.groupmap -demix-parmdbs_cal.control.add_suffix_to_file = -demix-inst -demix-parmdbs_cal.control.mapfile_dir = input.output.mapfile_dir -demix-parmdbs_cal.control.filename = demix-parmdbs_cal.mapfile - -# Get the sources to be demixed for each epoch -dumb_smart_demixing_cal.control.type = pythonplugin -dumb_smart_demixing_cal.control.executable = {{ dumb_smart_script }} -dumb_smart_demixing_cal.control.error_tolerance = {{ error_tolerance }} -dumb_smart_demixing_cal.argument.flags = do_sortmap_cal_maps.output.datamap -dumb_smart_demixing_cal.argument.sourcelist = "CasA CygA VirA TauA" -dumb_smart_demixing_cal.argument.elevlimit = 0deg - -# run NDPPP on the calibrator data -# My data is raw, non pre-processed data, so the parset looks different! -ndppp_prep_cal.control.type = dppp -ndppp_prep_cal.control.max_per_node = 8 # \ feel free to adjust these two values to match your system -ndppp_prep_cal.control.environment = {OMP_NUM_THREADS: 5} # / they should be reasonable for JURECA -ndppp_prep_cal.control.error_tolerance = {{ error_tolerance }} -ndppp_prep_cal.control.mapfile_out = do_sortmap_cal_maps.output.groupmap # specify the output filenames -ndppp_prep_cal.control.mapfiles_in = [do_sortmap_cal_maps.output.datamap, demix-parmdbs_cal.output.mapfile, dumb_smart_demixing_cal.output.demixsources.mapfile, do_sortmap_cal_maps.output.flagmap] -ndppp_prep_cal.control.inputkeys = [msin,demix.instrumentmodel,demix.subtractsources,flagedge.chan] -#ndppp_prep_cal.argument.msin = # see ndppp_prep_cal.control.inputkeys -ndppp_prep_cal.argument.msin.datacolumn = DATA -ndppp_prep_cal.argument.msin.missingdata = True #\ these two lines will make NDPPP generate dummy data when -ndppp_prep_cal.argument.msin.orderms = False #/ concatenating data -ndppp_prep_cal.argument.msin.baseline = CS*&; RS*&; CS*&RS* -ndppp_prep_cal.argument.msout.datacolumn = DATA -ndppp_prep_cal.argument.msin.autoweight = True # recomended for processing raw input data -ndppp_prep_cal.argument.steps = [autoflag,flagedge,flag,filter,aoflag,demix,avg,flagamp] -ndppp_prep_cal.argument.autoflag.type = preflagger -ndppp_prep_cal.argument.autoflag.corrtype = auto -ndppp_prep_cal.argument.flagedge.type = preflagger -#ndppp_prep_cal.argument.flagedge.chan = # see ndppp_prep_cal.control.inputkeys -ndppp_prep_cal.argument.flag.type = preflagger -ndppp_prep_cal.argument.flag.baseline = [ DE* , FR* , UK* , SE*, CS013HBA*] -ndppp_prep_cal.argument.filter.type = filter -ndppp_prep_cal.argument.filter.baseline = CS*, RS*&& -ndppp_prep_cal.argument.filter.remove = true -ndppp_prep_cal.argument.aoflag.type = aoflagger -ndppp_prep_cal.argument.aoflag.memoryperc = 10 -ndppp_prep_cal.argument.aoflag.keepstatistics = false -ndppp_prep_cal.argument.demix.type = demix -ndppp_prep_cal.argument.demix.demixtimestep = 29 # 1 min obs with 2 s int.-time are 29 time-steps long. -ndppp_prep_cal.argument.demix.demixfreqstep = {{ num_ch_per_SB }} -ndppp_prep_cal.argument.demix.timestep = 1 # 29 is a prime number... -ndppp_prep_cal.argument.demix.freqstep = {{ avg_freqstep_pre }} # first averaging step -ndppp_prep_cal.argument.demix.skymodel = {{ demixing_sourceDB }} -#ndppp_prep_cal.argument.demix.instrumentmodel = # see ndppp_prep_cal.control.inputkeys -#ndppp_prep_cal.argument.demix.subtractsources = # see ndppp_prep_cal.control.inputkeys -ndppp_prep_cal.argument.avg.type = average -ndppp_prep_cal.argument.avg.timestep = {{ avg_timestep_pre }} # first averaging step -ndppp_prep_cal.argument.avg.freqstep = {{ avg_freqstep_2nd }} # second averaging step in frequency -ndppp_prep_cal.argument.flagamp.type = preflagger -ndppp_prep_cal.argument.flagamp.amplmin = 1e-30 - -#now run BBS on the NDPPP-ed calibrator data. -calib_cal.control.type = python-calibrate-stand-alone -calib_cal.control.max_per_node = 12 -calib_cal.control.error_tolerance = {{ error_tolerance }} -calib_cal.argument.force = True -calib_cal.argument.observation = ndppp_prep_cal.output.mapfile -calib_cal.argument.parset = {{ calib_cal_parset }} -calib_cal.argument.catalog = {{ calibrator_skymodel }} -calib_cal.argument.Step.solve.Model.Beam.UseChannelFreq = T # needed becase I use data that has already been concatenated - -# generate a mapfile with all files in a single entry -h5_imp_map.control.kind = plugin -h5_imp_map.control.type = createMapfile -h5_imp_map.control.method = mapfile_all_to_one -h5_imp_map.control.mapfile_in = ndppp_prep_cal.output.mapfile -h5_imp_map.control.mapfile_dir = input.output.mapfile_dir -h5_imp_map.control.filename = h5_imp_map.mapfile - -# import all instrument tables into one LoSoTo file -h5imp_cal.control.type = pythonplugin -h5imp_cal.control.executable = {{ losoto_importer }} -h5imp_cal.argument.flags = [h5_imp_map.output.mapfile,h5imp_cal_losoto.h5] -h5imp_cal.argument.instrument = /instrument -h5imp_cal.argument.solsetName = sol000 -h5imp_cal.argument.compression = 7 - -# now run the script that does the clock-TEC fitting -fitclock.control.kind = recipe -fitclock.control.type = executable_args -fitclock.control.mapfile_in = h5imp_cal.output.h5parm.mapfile -fitclock.control.executable = {{ fitclock_script }} -# order for arguments is: [,,] -fitclock.control.arguments = [h5gvds,caldata_transfer,24] -fitclock.control.inputkey = h5gvds - -# now run the script that filters the amplitudes -ampl.control.kind = recipe -ampl.control.type = executable_args -ampl.control.mapfile_in = h5imp_cal.output.h5parm.mapfile -ampl.control.executable = {{ fitamps_script }} -# order for arguments is: [,,,] -# subbans-to-flag = semicolon-sperated list of integers in double-quotes -# e.g.: ampl.control.arguments = [h5gvds,caldata_transfer,4,"205;206;207"] -ampl.control.arguments = [h5gvds,caldata_transfer,4,""] -ampl.control.inputkey = h5gvds - -# and generate some output plots -plots.control.kind = recipe -plots.control.type = executable_args -plots.control.mapfile_in = h5imp_cal.output.h5parm.mapfile -plots.control.executable = {{ plotsols_script }} -plots.control.skip_infile = True -plots.control.arguments = [caldata_transfer] # Needs "" from the fitclock and ampl steps - -# fit the phase difference between X and Y -phase.control.type = pythonplugin -phase.control.executable = {{ fit_XYoffset_script }} -# order for flags is: [,] -phase.argument.flags = [h5imp_cal.output.h5parm.mapfile,caldata_transfer] - - -# ############################################################################################################ -# # Stop processing here and check the plots generated by the previous steps! Only then continue processing. # -# ############################################################################################################ - -# generate a mapfile of all the target data -createmap_target.control.kind = plugin -createmap_target.control.type = createMapfile -createmap_target.control.method = mapfile_from_folder -createmap_target.control.mapfile_dir = input.output.mapfile_dir -createmap_target.control.filename = createmap_target.mapfile -createmap_target.control.folder = {{ target_input_path }} -createmap_target.control.pattern = {{ target_input_pattern }} - -# combine all entries into one mapfile, for the sortmap script -combine_target_map.control.kind = plugin -combine_target_map.control.type = createMapfile -combine_target_map.control.method = mapfile_all_to_one -combine_target_map.control.mapfile_dir = input.output.mapfile_dir -combine_target_map.control.filename = combine_tar_map.mapfile -combine_target_map.control.mapfile_in = createmap_target.output.mapfile - -# sort the target data into groups so that NDPPP can concatenate them -sortmap_target.control.type = pythonplugin -sortmap_target.control.executable = {{ sortmap_script }} -sortmap_target.argument.flags = [combine_target_map.output.mapfile] -sortmap_target.argument.filename = sortmap_target -sortmap_target.argument.mapfile_dir = input.output.mapfile_dir -sortmap_target.argument.numSB = {{ num_SBs_per_group }} -sortmap_target.argument.NDPPPfill = True -sortmap_target.argument.stepname = ndppp_prep_target -sortmap_target.argument.truncateLastSBs = True - -# convert the output of sortmap_target into usable mapfiles -do_sortmap_target_maps.control.kind = plugin -do_sortmap_target_maps.control.type = mapfilenamesFromMapfiles -do_sortmap_target_maps.control.mapfile_groupmap = sortmap_target.output.groupmapfile.mapfile -do_sortmap_target_maps.control.mapfile_datamap = sortmap_target.output.mapfile.mapfile -do_sortmap_target_maps.control.mapfile_flagmap = sortmap_target.output.flagmapfile.mapfile - -# I want to run demixing, for that I need a MS with names for the instrument tables -demix-parmdbs_target.control.kind = plugin -demix-parmdbs_target.control.type = createMapfile -demix-parmdbs_target.control.method = add_suffix_to_file -demix-parmdbs_target.control.mapfile_in = do_sortmap_target_maps.output.groupmap -demix-parmdbs_target.control.add_suffix_to_file = -demix-inst -demix-parmdbs_target.control.mapfile_dir = input.output.mapfile_dir -demix-parmdbs_target.control.filename = demix-parmdbs_target.mapfile - -# Get the sources to be demixed for each epoch -dumb_smart_demixing_target.control.type = pythonplugin -dumb_smart_demixing_target.control.executable = {{ dumb_smart_script }} -dumb_smart_demixing_target.control.error_tolerance = {{ error_tolerance }} -dumb_smart_demixing_target.argument.flags = do_sortmap_target_maps.output.datamap -dumb_smart_demixing_target.argument.sourcelist = "CasA CygA VirA TauA" -dumb_smart_demixing_target.argument.elevlimit = 0deg - -# run NDPPP on the calibrator data -# My data is raw, non pre-processed data, so the parset looks different! -ndppp_prep_target.control.type = dppp -ndppp_prep_target.control.max_per_node = 2 # \ feel free to adjust these two values to match your system -ndppp_prep_target.control.environment = {OMP_NUM_THREADS: 12} # / they should be reasonable for JURECA -ndppp_prep_target.control.error_tolerance = {{ error_tolerance }} -ndppp_prep_target.control.mapfile_out = do_sortmap_target_maps.output.groupmap # specify the output filenames -ndppp_prep_target.control.mapfiles_in = [do_sortmap_target_maps.output.datamap, demix-parmdbs_target.output.mapfile, dumb_smart_demixing_target.output.demixsources.mapfile, do_sortmap_target_maps.output.flagmap] -ndppp_prep_target.control.inputkeys = [msin,demix.instrumentmodel,demix.subtractsources,flagedge.chan] -#ndppp_prep_target.argument.msin = # see ndppp_prep_target.control.inputkeys -ndppp_prep_target.argument.msin.datacolumn = DATA -ndppp_prep_target.argument.msin.missingdata = True #\ these two lines will make NDPPP generate dummy data when -ndppp_prep_target.argument.msin.orderms = False #/ concatenating data -ndppp_prep_target.argument.msin.baseline = CS*&; RS*&; CS*&RS* -ndppp_prep_target.argument.msout.datacolumn = DATA -ndppp_prep_target.argument.msin.autoweight = True # recomended for processing raw input data -ndppp_prep_target.argument.steps = [autoflag,flagedge,flag,filter,aoflag,demix,avg,flagamp] -ndppp_prep_target.argument.autoflag.type = preflagger -ndppp_prep_target.argument.autoflag.corrtype = auto -ndppp_prep_target.argument.flagedge.type = preflagger -#ndppp_prep_target.argument.flagedge.chan = # see ndppp_prep_target.control.inputkeys -ndppp_prep_target.argument.flag.type = preflagger -ndppp_prep_target.argument.flag.baseline = [ DE* , FR* , UK* , SE*, CS013HBA*] -ndppp_prep_target.argument.filter.type = filter -ndppp_prep_target.argument.filter.baseline = CS*, RS*&& -ndppp_prep_target.argument.filter.remove = true -ndppp_prep_target.argument.aoflag.type = aoflagger -ndppp_prep_target.argument.aoflag.memoryperc = 10 -ndppp_prep_target.argument.aoflag.keepstatistics = false -ndppp_prep_target.argument.demix.type = demix -ndppp_prep_target.argument.demix.demixtimestep = 29 # 1 min obs with 2 s int.-time are 29 time-steps long. -ndppp_prep_target.argument.demix.demixfreqstep = {{ num_ch_per_SB }} -ndppp_prep_target.argument.demix.timestep = 1 # 29 is a prime number... -ndppp_prep_target.argument.demix.freqstep = {{ avg_freqstep_pre }} # first averaging step -ndppp_prep_target.argument.demix.skymodel = {{ demixing_sourceDB }} -#ndppp_prep_target.argument.demix.instrumentmodel = # see ndppp_prep_target.control.inputkeys -#ndppp_prep_target.argument.demix.subtractsources = # see ndppp_prep_target.control.inputkeys -ndppp_prep_target.argument.avg.type = average -ndppp_prep_target.argument.avg.timestep = {{ avg_timestep_pre }} # first averaging step -ndppp_prep_target.argument.flagamp.type = preflagger -ndppp_prep_target.argument.flagamp.amplmin = 1e-30 - -# generate parmDB with the interpolated calibrator data to apply to the traget -trans.control.type = pythonplugin -trans.control.executable = {{ transfer_script }} -trans.control.error_tolerance = {{ error_tolerance }} -# order for flags is: [,,] -trans.argument.flags = [ndppp_prep_target.output.mapfile,caldata_transfer,/instrument_amp_clock_offset] - -# run BBS to apply the calibrator values to the target data -calibtarget.control.type = python-calibrate-stand-alone -calibtarget.control.max_per_node = 12 # feel free to adjust this value to match your system -calibtarget.control.error_tolerance = {{ error_tolerance }} -# specify the observation and the input-parmDB name via "mapfiles_in" and "inputkeys" -calibtarget.control.mapfiles_in = [ndppp_prep_target.output.mapfile, trans.output.transfer_parmDB.mapfile] -calibtarget.control.inputkeys = [observation,parmdb] -calibtarget.argument.parset = {{ cal_transfer_parset }} -calibtarget.argument.replace-parmdb = True - -# run NDPPP again to flag and average some more. -dpppaverage.control.type = dppp -dpppaverage.control.max_per_node = 8 # \ feel free to adjust these two values to match your system -dpppaverage.control.environment = {OMP_NUM_THREADS: 5} # / they should be reasonable for CEP3 -dpppaverage.control.error_tolerance = {{ error_tolerance }} -dpppaverage.argument.msin = ndppp_prep_target.output.mapfile # The input data. -dpppaverage.argument.msin.datacolumn = CORRECTED_DATA -dpppaverage.argument.msout.datacolumn = DATA -dpppaverage.argument.steps = [count,flag,count,avg] -dpppaverage.argument.flag.type = aoflagger -dpppaverage.argument.flag.keepstatistics = false -dpppaverage.argument.flag.memoryperc = 10 -dpppaverage.argument.flag.strategy = {{ flagging_strategy }} -dpppaverage.argument.avg.type = average -dpppaverage.argument.avg.freqstep = {{ avg_freqstep_2nd }} # average to 4 ch/SB -dpppaverage.argument.avg.timestep = {{ avg_timestep_2nd }} # average to 4 second intervals - -# phase calibration on the gsm skymodel -gsmcalibtarget.control.type = python-calibrate-stand-alone -gsmcalibtarget.control.max_per_node = 12 # feel free to adjust this to match your system -gsmcalibtarget.control.error_tolerance = {{ error_tolerance }} -gsmcalibtarget.argument.force = True -gsmcalibtarget.argument.observation = dpppaverage.output.mapfile -gsmcalibtarget.argument.parmdb-name = instrument_directionindependent -gsmcalibtarget.argument.parset = {{ gsm_cal_parset }} -gsmcalibtarget.argument.catalog = {{ target_skymodel }} -gsmcalibtarget.argument.Step.solve.Solve.CellSize.Freq = 4 # default: solution-cell spans one subband -gsmcalibtarget.argument.Step.solve.Solve.CellSize.Time = 1 # default: one solution per time-step - -# generate mapfile with the parmDBs that were created by BBS in the gsmcalibtarget step -gsmcal_parmmap.control.kind = plugin -gsmcal_parmmap.control.type = createMapfile -gsmcal_parmmap.control.method = add_suffix_to_file -gsmcal_parmmap.control.mapfile_in = dpppaverage.output.mapfile -gsmcal_parmmap.control.add_suffix_to_file = /instrument_directionindependent -gsmcal_parmmap.control.mapfile_dir = input.output.mapfile_dir -gsmcal_parmmap.control.filename = gsmcal_parmdbs.mapfile - -# plot the phase solutions from the phase-only calibration of the target -plot_gsm_phases.control.kind = recipe -plot_gsm_phases.control.type = executable_args -plot_gsm_phases.control.executable = {{ plotphases_script }} -plot_gsm_phases.control.max_per_node = 12 -plot_gsm_phases.control.mapfiles_in = [gsmcal_parmmap.output.mapfile,dpppaverage.output.mapfile] -plot_gsm_phases.control.inputkeys = [infile,outbase] -plot_gsm_phases.control.arguments = [-p,infile,outbase] - -# make mapfile with the filenames of the results that we want -make_results_mapfile.control.kind = plugin -make_results_mapfile.control.type = makeResultsMapfile -make_results_mapfile.control.mapfile_dir = input.output.mapfile_dir -make_results_mapfile.control.filename = make_results_mapfile.mapfile -make_results_mapfile.control.mapfile_in = dpppaverage.output.mapfile -make_results_mapfile.control.target_dir = {{ results_directory }} -make_results_mapfile.control.make_target_dir = True -make_results_mapfile.control.new_suffix = .pre-cal.ms - -# move the results to where we want them -move_results.control.kind = recipe -move_results.control.type = executable_args -move_results.control.executable = /bin/mv -move_results.control.mapfiles_in = [dpppaverage.output.mapfile, make_results_mapfile.output.mapfile] -move_results.control.inputkeys = [source,destination] -move_results.control.arguments = [source,destination] - -# generate a mapfile of all the diagnostic plots -createmap_plots.control.kind = plugin -createmap_plots.control.type = createMapfile -createmap_plots.control.method = mapfile_from_folder -createmap_plots.control.mapfile_dir = input.output.mapfile_dir -createmap_plots.control.filename = diagnostic_plots.mapfile -createmap_plots.control.folder = input.output.working_directory/input.output.job_name -createmap_plots.control.pattern = *.png - -# copy the diagnostic plots to the results_directory -copy_plots.control.kind = recipe -copy_plots.control.type = executable_args -copy_plots.control.executable = /bin/cp -copy_plots.control.mapfile_in = createmap_plots.output.mapfile -copy_plots.control.inputkey = source -copy_plots.control.arguments = [source,{{ results_directory }}] diff --git a/Pre-Facet-Cal-RawData-Single.parset b/Pre-Facet-Cal-RawData-Single.parset deleted file mode 100644 index 77fc19c7..00000000 --- a/Pre-Facet-Cal-RawData-Single.parset +++ /dev/null @@ -1,378 +0,0 @@ -# Pre-Facet Calibration Pipeline for raw data -# -# Pre-Facet Calibration Pipeline: -# - first NDPPP run tailored to processing raw data -# - does "dumb-smart-demixing": all A-team sources that are above a certain elevation get demixed -# - figures out frequencies from SPECTRAL_WINDOW table in MSs -# - expects shared filesystem, that all nodes can reach all files! -# (E.g. a single workstation or compute cluster with shared filesystem -# doesn't work on multiple nodes on CEP-2 or CEP3.) - -### First run this and check the generated plots! Modify and re-run if neccessary. -pipeline.steps=[createmap_cal, demix-parmdbs_cal, dumb_smart_demixing_cal, ndppp_prep_cal, calib_cal, h5_imp_map, h5imp_cal, fitclock, ampl, plots, phase] -### When calibrator processing is done, you can do the processing of the target data. -### Either by using the same pipeline and running this: -# pipeline.steps=[ createmap_cal, demix-parmdbs_cal, dumb_smart_demixing_cal, ndppp_prep_cal, calib_cal, h5_imp_map, h5imp_cal, fitclock, ampl, plots, phase, createmap_target, demix-parmdbs_target, dumb_smart_demixing_target, ndppp_prep_target, trans, calibtarget, combine_target_map, sortmap_target, do_sortmap_maps, dpppcombine, gsmcalibtarget, gsmcal_parmmap, plot_gsm_phases, make_results_mapfile, move_results, createmap_plots, copy_plots] -### Or by copying the *.npy files from the calibrator and running this: -# pipeline.steps=[createmap_target, demix-parmdbs_target, dumb_smart_demixing_target, ndppp_prep_target, trans, calibtarget, combine_target_map, sortmap_target, do_sortmap_maps, dpppcombine, gsmcalibtarget, gsmcal_parmmap, plot_gsm_phases, make_results_mapfile, move_results, createmap_plots, copy_plots] - -# parameters you will need to adjust. -! avg_timestep_pre = 2 # averaging in 1st NDPPP run \ these two together should average -! avg_timestep_2nd = 2 # averaging in 2nd NDPPP run / the data to 4 seconds time resolution -! avg_freqstep_pre = 4 # averaging in 1st NDPPP run \ these two together should average -! avg_freqstep_2nd = 2 # averaging in 2nd NDPPP run / the data to 4 ch/SB frequency resolution -! num_ch_per_SB = 64 # how many channels are there in one SB (A bit redundant, but I cannot do math here!) -! cal_input_path = /work/htb00/htb001/M31-HBAlow-prefacet/calibrator/raw -! cal_input_pattern = L*MS -! calibrator_skymodel = /homea/htb00/htb001/Pre-Facet-Pipeline/skymodels/3c48-SH.skymodel -! target_input_path = /work/htb00/htb001/M31-HBAlow-prefacet/SB000-059 -! target_input_pattern = L*.MS -! target_skymodel = /homea/htb00/htb001/M31-processing/gsm-M31-10deg-1Jy-20arcsec.skymodel -! num_SBs_per_group = 12 # make concatenated measurement-sets with that many subbands -! results_directory = /work/htb00/htb001/M31-HBAlow-prefacet/Outdata - -# pathes to the scripts etc. -# #### ToDo: get the scripts onto CEP3 and adjust the pathes here! -! calib_cal_parset = /homea/htb00/htb001/prefactor/parsets/calibcal.parset -! cal_transfer_parset = /homea/htb00/htb001/prefactor/parsets/applyparmdb.parset -! demixing_sourceDB = /homea/htb00/htb001/MSSS-processing/skymodels/Ateamhighresdemix.sourcedb -! gsm_cal_parset = /homea/htb00/htb001/prefactor/parsets/gsmcal.parset -! losoto_importer = /homea/htb00/htb001/prefactor/bin/losotoImporter.py -! dumb_smart_script = /homea/htb00/htb001/prefactor/bin/gen_demixing_sources.py -! fitclock_script = /homea/htb00/htb001/prefactor/bin/fit_clocktec_initialguess_losoto.py -! fitamps_script = /homea/htb00/htb001/prefactor/bin/amplitudes_losoto_3.py -! plotsols_script = /homea/htb00/htb001/prefactor/bin/examine_npys.py -! fit_XYoffset_script = /homea/htb00/htb001/prefactor/bin/find_cal_global_phaseoffset_losoto.py -! transfer_script = /homea/htb00/htb001/prefactor/bin/transfer_amplitudes+clock+offset_toMS.py -! sortmap_script = /homea/htb00/htb001/Pre-Facet-Pipeline/bin/sort_times_into_freqGroups.py -! plotphases_script = /homea/htb00/htb001/prefactor/bin/plot_solutions_all_stations.py -! flagging_strategy = /homea/htb00/htb003/lofar_jureca_2-12/share/rfistrategies/HBAdefault - -# set this to True if you want the pipeline run to continue if single bands fail -! error_tolerance = False - -# generate a mapfile of all the calibrator data -createmap_cal.control.kind = plugin -createmap_cal.control.type = createMapfile -createmap_cal.control.method = mapfile_from_folder -createmap_cal.control.mapfile_dir = input.output.mapfile_dir -createmap_cal.control.filename = createmap_cal.mapfile -createmap_cal.control.folder = {{ cal_input_path }} -createmap_cal.control.pattern = {{ cal_input_pattern }} - -# I want to run demixing, for that I need a MS with names for the instrument tables -demix-parmdbs_cal.control.kind = plugin -demix-parmdbs_cal.control.type = createMapfile -demix-parmdbs_cal.control.method = add_suffix_to_file -demix-parmdbs_cal.control.mapfile_in = createmap_cal.output.groupmap -demix-parmdbs_cal.control.add_suffix_to_file = -demix-inst -demix-parmdbs_cal.control.mapfile_dir = input.output.mapfile_dir -demix-parmdbs_cal.control.filename = demix-parmdbs_cal.mapfile - -# Get the sources to be demixed for each epoch -dumb_smart_demixing_cal.control.type = pythonplugin -dumb_smart_demixing_cal.control.executable = {{ dumb_smart_script }} -dumb_smart_demixing_cal.control.error_tolerance = {{ error_tolerance }} -dumb_smart_demixing_cal.argument.flags = createmap_cal.output.groupmap -dumb_smart_demixing_cal.argument.sourcelist = "CasA CygA VirA TauA" -dumb_smart_demixing_cal.argument.elevlimit = 0deg - -# run NDPPP on the calibrator data -# My data is raw, non pre-processed data, so the parset looks different! -ndppp_prep_cal.control.type = dppp -ndppp_prep_cal.control.max_per_node = 8 # \ feel free to adjust these two values to match your system -ndppp_prep_cal.control.environment = {OMP_NUM_THREADS: 5} # / they should be reasonable for JURECA -ndppp_prep_cal.control.error_tolerance = {{ error_tolerance }} -ndppp_prep_cal.control.mapfiles_in = [createmap_cal.output.groupmap, demix-parmdbs_cal.output.mapfile, dumb_smart_demixing_cal.output.demixsources.mapfile] -ndppp_prep_cal.control.inputkeys = [msin,demix.instrumentmodel,demix.subtractsources] -#ndppp_prep_cal.argument.msin = # see ndppp_prep_cal.control.inputkeys -ndppp_prep_cal.argument.msin.datacolumn = DATA -ndppp_prep_cal.argument.msin.missingdata = True #\ these two lines will make NDPPP generate dummy data when -ndppp_prep_cal.argument.msin.orderms = False #/ concatenating data -ndppp_prep_cal.argument.msin.baseline = CS*&; RS*&; CS*&RS* -ndppp_prep_cal.argument.msout.datacolumn = DATA -ndppp_prep_cal.argument.msin.autoweight = True # recomended for processing raw input data -ndppp_prep_cal.argument.steps = [autoflag,flagedge,flag,filter,aoflag,demix,avg,flagamp] -ndppp_prep_cal.argument.autoflag.type = preflagger -ndppp_prep_cal.argument.autoflag.corrtype = auto -ndppp_prep_cal.argument.flagedge.type = preflagger -ndppp_prep_cal.argument.flagedge.chan = [0..nchan/32-1, 31*nchan/32..nchan] -ndppp_prep_cal.argument.flag.type = preflagger -ndppp_prep_cal.argument.flag.baseline = [ DE* , FR* , UK* , SE*, CS013HBA*] -ndppp_prep_cal.argument.filter.type = filter -ndppp_prep_cal.argument.filter.baseline = CS*, RS*&& -ndppp_prep_cal.argument.filter.remove = true -ndppp_prep_cal.argument.aoflag.type = aoflagger -ndppp_prep_cal.argument.aoflag.memoryperc = 10 -ndppp_prep_cal.argument.aoflag.keepstatistics = false -ndppp_prep_cal.argument.demix.type = demix -ndppp_prep_cal.argument.demix.demixtimestep = 29 # 1 min obs with 2 s int.-time are 29 time-steps long. -ndppp_prep_cal.argument.demix.demixfreqstep = {{ num_ch_per_SB }} -ndppp_prep_cal.argument.demix.timestep = 1 # 29 is a prime number... -ndppp_prep_cal.argument.demix.freqstep = {{ avg_freqstep_pre }} # first averaging step -ndppp_prep_cal.argument.demix.skymodel = {{ demixing_sourceDB }} -#ndppp_prep_cal.argument.demix.instrumentmodel = # see ndppp_prep_cal.control.inputkeys -#ndppp_prep_cal.argument.demix.subtractsources = # see ndppp_prep_cal.control.inputkeys -ndppp_prep_cal.argument.avg.type = average -ndppp_prep_cal.argument.avg.timestep = {{ avg_timestep_pre }} # first averaging step -ndppp_prep_cal.argument.avg.freqstep = {{ avg_freqstep_2nd }} # second averaging step in frequency -ndppp_prep_cal.argument.flagamp.type = preflagger -ndppp_prep_cal.argument.flagamp.amplmin = 1e-30 - -#now run BBS on the NDPPP-ed calibrator data. -calib_cal.control.type = python-calibrate-stand-alone -calib_cal.control.max_per_node = 12 -calib_cal.control.error_tolerance = {{ error_tolerance }} -calib_cal.argument.force = True -calib_cal.argument.observation = ndppp_prep_cal.output.mapfile -calib_cal.argument.parset = {{ calib_cal_parset }} -calib_cal.argument.catalog = {{ calibrator_skymodel }} -calib_cal.argument.Step.solve.Model.Beam.UseChannelFreq = T # needed becase I use data that has already been concatenated - -# generate a mapfile with all files in a single entry -h5_imp_map.control.kind = plugin -h5_imp_map.control.type = createMapfile -h5_imp_map.control.method = mapfile_all_to_one -h5_imp_map.control.mapfile_in = ndppp_prep_cal.output.mapfile -h5_imp_map.control.mapfile_dir = input.output.mapfile_dir -h5_imp_map.control.filename = h5_imp_map.mapfile - -# import all instrument tables into one LoSoTo file -h5imp_cal.control.type = pythonplugin -h5imp_cal.control.executable = {{ losoto_importer }} -h5imp_cal.argument.flags = [h5_imp_map.output.mapfile,h5imp_cal_losoto.h5] -h5imp_cal.argument.instrument = /instrument -h5imp_cal.argument.solsetName = sol000 -h5imp_cal.argument.compression = 7 - -# now run the script that does the clock-TEC fitting -fitclock.control.kind = recipe -fitclock.control.type = executable_args -fitclock.control.mapfile_in = h5imp_cal.output.h5parm.mapfile -fitclock.control.executable = {{ fitclock_script }} -# order for arguments is: [,,] -fitclock.control.arguments = [h5gvds,caldata_transfer,24] -fitclock.control.inputkey = h5gvds - -# now run the script that filters the amplitudes -ampl.control.kind = recipe -ampl.control.type = executable_args -ampl.control.mapfile_in = h5imp_cal.output.h5parm.mapfile -ampl.control.executable = {{ fitamps_script }} -# order for arguments is: [,,,] -# subbans-to-flag = semicolon-sperated list of integers in double-quotes -# e.g.: ampl.control.arguments = [h5gvds,caldata_transfer,4,"205;206;207"] -ampl.control.arguments = [h5gvds,caldata_transfer,4,""] -ampl.control.inputkey = h5gvds - -# and generate some output plots -plots.control.kind = recipe -plots.control.type = executable_args -plots.control.mapfile_in = h5imp_cal.output.h5parm.mapfile -plots.control.executable = {{ plotsols_script }} -plots.control.skip_infile = True -plots.control.arguments = [caldata_transfer] # Needs "" from the fitclock and ampl steps - -# fit the phase difference between X and Y -phase.control.type = pythonplugin -phase.control.executable = {{ fit_XYoffset_script }} -# order for flags is: [,] -phase.argument.flags = [h5imp_cal.output.h5parm.mapfile,caldata_transfer] - - -# ############################################################################################################ -# # Stop processing here and check the plots generated by the previous steps! Only then continue processing. # -# ############################################################################################################ - -# generate a mapfile of all the target data -createmap_target.control.kind = plugin -createmap_target.control.type = createMapfile -createmap_target.control.method = mapfile_from_folder -createmap_target.control.mapfile_dir = input.output.mapfile_dir -createmap_target.control.filename = createmap_target.mapfile -createmap_target.control.folder = {{ target_input_path }} -createmap_target.control.pattern = {{ target_input_pattern }} - -# I want to run demixing, for that I need a MS with names for the instrument tables -demix-parmdbs_target.control.kind = plugin -demix-parmdbs_target.control.type = createMapfile -demix-parmdbs_target.control.method = add_suffix_to_file -demix-parmdbs_target.control.mapfile_in = createmap_target.output.mapfile -demix-parmdbs_target.control.add_suffix_to_file = -demix-inst -demix-parmdbs_target.control.mapfile_dir = input.output.mapfile_dir -demix-parmdbs_target.control.filename = demix-parmdbs_target.mapfile - -# Get the sources to be demixed for each epoch -dumb_smart_demixing_target.control.type = pythonplugin -dumb_smart_demixing_target.control.executable = {{ dumb_smart_script }} -dumb_smart_demixing_target.control.error_tolerance = {{ error_tolerance }} -dumb_smart_demixing_target.argument.flags = createmap_target.output.mapfile -dumb_smart_demixing_target.argument.sourcelist = "CasA CygA VirA TauA" -dumb_smart_demixing_target.argument.elevlimit = 0deg - -# run NDPPP on the calibrator data -# My data is raw, non pre-processed data, so the parset looks different! -ndppp_prep_target.control.type = dppp -ndppp_prep_target.control.max_per_node = 2 # \ feel free to adjust these two values to match your system -ndppp_prep_target.control.environment = {OMP_NUM_THREADS: 12} # / these should be reasonable for JURECA -ndppp_prep_target.control.error_tolerance = {{ error_tolerance }} -ndppp_prep_target.control.mapfiles_in = [createmap_target.output.mapfile, demix-parmdbs_target.output.mapfile, dumb_smart_demixing_target.output.demixsources.mapfile] -ndppp_prep_target.control.inputkeys = [msin,demix.instrumentmodel,demix.subtractsources] -#ndppp_prep_target.argument.msin = # see ndppp_prep_target.control.inputkeys -ndppp_prep_target.argument.msin.datacolumn = DATA -ndppp_prep_target.argument.msin.missingdata = True #\ these two lines will make NDPPP generate dummy data when -ndppp_prep_target.argument.msin.orderms = False #/ concatenating data -ndppp_prep_target.argument.msin.baseline = CS*&; RS*&; CS*&RS* -ndppp_prep_target.argument.msout.datacolumn = DATA -ndppp_prep_target.argument.msin.autoweight = True # recomended for processing raw input data -ndppp_prep_target.argument.steps = [autoflag,flagedge,flag,filter,aoflag,demix,avg,flagamp] -ndppp_prep_target.argument.autoflag.type = preflagger -ndppp_prep_target.argument.autoflag.corrtype = auto -ndppp_prep_target.argument.flagedge.type = preflagger -ndppp_prep_target.argument.flagedge.chan = [0..nchan/32-1, 31*nchan/32..nchan] -ndppp_prep_target.argument.flag.type = preflagger -ndppp_prep_target.argument.flag.baseline = [ DE* , FR* , UK* , SE*, CS013HBA*] -ndppp_prep_target.argument.filter.type = filter -ndppp_prep_target.argument.filter.baseline = CS*, RS*&& -ndppp_prep_target.argument.filter.remove = true -ndppp_prep_target.argument.aoflag.type = aoflagger -ndppp_prep_target.argument.aoflag.memoryperc = 10 -ndppp_prep_target.argument.aoflag.keepstatistics = false -ndppp_prep_target.argument.demix.type = demix -ndppp_prep_target.argument.demix.demixtimestep = 29 # 1 min obs with 2 s int.-time are 29 time-steps long. -ndppp_prep_target.argument.demix.demixfreqstep = {{ num_ch_per_SB }} -ndppp_prep_target.argument.demix.timestep = 1 # 29 is a prime number... -ndppp_prep_target.argument.demix.freqstep = {{ avg_freqstep_pre }} # first averaging step -ndppp_prep_target.argument.demix.skymodel = {{ demixing_sourceDB }} -#ndppp_prep_target.argument.demix.instrumentmodel = # see ndppp_prep_target.control.inputkeys -#ndppp_prep_target.argument.demix.subtractsources = # see ndppp_prep_target.control.inputkeys -ndppp_prep_target.argument.avg.type = average -ndppp_prep_target.argument.avg.timestep = {{ avg_timestep_pre }} # first averaging step -ndppp_prep_target.argument.flagamp.type = preflagger -ndppp_prep_target.argument.flagamp.amplmin = 1e-30 - -# generate parmDB with the interpolated calibrator data to apply to the traget -trans.control.type = pythonplugin -trans.control.executable = {{ transfer_script }} -trans.control.error_tolerance = {{ error_tolerance }} -# order for flags is: [,,] -trans.argument.flags = [ndppp_prep_target.output.mapfile,caldata_transfer,/instrument_amp_clock_offset] - -# run BBS to apply the calibrator values to the target data -calibtarget.control.type = python-calibrate-stand-alone -calibtarget.control.max_per_node = 12 # feel free to adjust this value to match your system -calibtarget.control.error_tolerance = {{ error_tolerance }} -# specify the observation and the input-parmDB name via "mapfiles_in" and "inputkeys" -calibtarget.control.mapfiles_in = [ndppp_prep_target.output.mapfile, trans.output.transfer_parmDB.mapfile] -calibtarget.control.inputkeys = [observation,parmdb] -calibtarget.argument.parset = {{ cal_transfer_parset }} -calibtarget.argument.replace-parmdb = True - -# combine all entries into one mapfile, for the sortmap script -combine_target_map.control.kind = plugin -combine_target_map.control.type = createMapfile -combine_target_map.control.method = mapfile_all_to_one -combine_target_map.control.mapfile_dir = input.output.mapfile_dir -combine_target_map.control.filename = combine_tar_map.mapfile -combine_target_map.control.mapfile_in = ndppp_prep_target.output.mapfile - -# sort the calibrator data into groups so that NDPPP can concatenate them -sortmap_target.control.type = pythonplugin -sortmap_target.control.executable = {{ sortmap_script }} -sortmap_target.argument.flags = [combine_target_map.output.mapfile] -sortmap_target.argument.filename = sortmap_target -sortmap_target.argument.mapfile_dir = input.output.mapfile_dir -sortmap_target.argument.numSB = {{ num_SBs_per_group }} -sortmap_target.argument.NDPPPfill = True -sortmap_target.argument.stepname = dpppcombine -sortmap_target.argument.truncateLastSBs = True - -# convert the output of sortmap_target into usable mapfiles -do_sortmap_maps.control.kind = plugin -do_sortmap_maps.control.type = mapfilenamesFromMapfiles -do_sortmap_maps.control.mapfile_groupmap = sortmap_target.output.groupmapfile.mapfile -do_sortmap_maps.control.mapfile_datamap = sortmap_target.output.mapfile.mapfile - -# run NDPPP again to flag and average some more. -dpppcombine.control.type = dppp -dpppcombine.control.max_per_node = 8 # \ feel free to adjust these two values to match your system -dpppcombine.control.environment = {OMP_NUM_THREADS: 5} # / these should be reasonable for CEP3 -dpppcombine.control.error_tolerance = {{ error_tolerance }} -dpppcombine.control.mapfile_out = do_sortmap_maps.output.groupmap # Mapfile with the output names -dpppcombine.control.mapfiles_in = do_sortmap_maps.output.datamap # The input data -dpppcombine.control.inputkey = msin -dpppcombine.argument.msin.datacolumn = CORRECTED_DATA -dpppcombine.argument.msout.datacolumn = DATA -dpppcombine.argument.steps = [count,flag,count,avg] -dpppcombine.argument.flag.type = aoflagger -dpppcombine.argument.flag.keepstatistics = false -dpppcombine.argument.flag.memoryperc = 10 -dpppcombine.argument.flag.strategy = {{ flagging_strategy }} -dpppcombine.argument.avg.type = average -dpppcombine.argument.avg.freqstep = {{ avg_freqstep_2nd }} # average to 4 ch/SB -dpppcombine.argument.avg.timestep = {{ avg_timestep_2nd }} # average to 4 second intervals - -# phase calibration on the gsm skymodel -gsmcalibtarget.control.type = python-calibrate-stand-alone -gsmcalibtarget.control.max_per_node = 12 # feel free to adjust this to match your system -gsmcalibtarget.control.error_tolerance = {{ error_tolerance }} -gsmcalibtarget.argument.force = True -gsmcalibtarget.argument.observation = dpppcombine.output.mapfile -gsmcalibtarget.argument.parmdb-name = instrument_directionindependent -gsmcalibtarget.argument.parset = {{ gsm_cal_parset }} -gsmcalibtarget.argument.catalog = {{ target_skymodel }} -gsmcalibtarget.argument.Step.solve.Solve.CellSize.Freq = 4 # default: solution-cell spans one subband -gsmcalibtarget.argument.Step.solve.Solve.CellSize.Time = 1 # default: one solution per time-step - -# generate mapfile with the parmDBs that were created by BBS in the gsmcalibtarget step -gsmcal_parmmap.control.kind = plugin -gsmcal_parmmap.control.type = createMapfile -gsmcal_parmmap.control.method = add_suffix_to_file -gsmcal_parmmap.control.mapfile_in = dpppcombine.output.mapfile -gsmcal_parmmap.control.add_suffix_to_file = /instrument_directionindependent -gsmcal_parmmap.control.mapfile_dir = input.output.mapfile_dir -gsmcal_parmmap.control.filename = gsmcal_parmdbs.mapfile - -# plot the phase solutions from the phase-only calibration of the target -plot_gsm_phases.control.kind = recipe -plot_gsm_phases.control.type = executable_args -plot_gsm_phases.control.executable = {{ plotphases_script }} -plot_gsm_phases.control.max_per_node = 12 -plot_gsm_phases.control.mapfiles_in = [gsmcal_parmmap.output.mapfile,dpppcombine.output.mapfile] -plot_gsm_phases.control.inputkeys = [infile,outbase] -plot_gsm_phases.control.arguments = [-p,infile,outbase] - -# make mapfile with the filenames of the results that we want -make_results_mapfile.control.kind = plugin -make_results_mapfile.control.type = makeResultsMapfile -make_results_mapfile.control.mapfile_dir = input.output.mapfile_dir -make_results_mapfile.control.filename = make_results_mapfile.mapfile -make_results_mapfile.control.mapfile_in = dpppcombine.output.mapfile -make_results_mapfile.control.target_dir = {{ results_directory }} -make_results_mapfile.control.make_target_dir = True -make_results_mapfile.control.new_suffix = .pre-cal.ms - -# move the results to where we want them -move_results.control.kind = recipe -move_results.control.type = executable_args -move_results.control.executable = /bin/mv -move_results.control.mapfiles_in = [dpppcombine.output.mapfile, make_results_mapfile.output.mapfile] -move_results.control.inputkeys = [source,destination] -move_results.control.arguments = [source,destination] - -# generate a mapfile of all the diagnostic plots -createmap_plots.control.kind = plugin -createmap_plots.control.type = createMapfile -createmap_plots.control.method = mapfile_from_folder -createmap_plots.control.mapfile_dir = input.output.mapfile_dir -createmap_plots.control.filename = diagnostic_plots.mapfile -createmap_plots.control.folder = input.output.working_directory/input.output.job_name -createmap_plots.control.pattern = *.png - -# copy the diagnostic plots to the results_directory -copy_plots.control.kind = recipe -copy_plots.control.type = executable_args -copy_plots.control.executable = /bin/cp -copy_plots.control.mapfile_in = createmap_plots.output.mapfile -copy_plots.control.inputkey = source -copy_plots.control.arguments = [source,{{ results_directory }}] diff --git a/Pre-Facet-Cal.parset b/Pre-Facet-Cal.parset index 5bd063e2..42606056 100644 --- a/Pre-Facet-Cal.parset +++ b/Pre-Facet-Cal.parset @@ -63,8 +63,6 @@ # #### ToDo: get the scripts onto CEP3 and adjust the pathes here! ! calib_cal_parset = /homea/htb00/htb001/prefactor/parsets/calibcal.parset ! find_skymodel_cal_auto = /homea/htb00/htb001/prefactor/scripts/find_skymodel_cal.py -! ATeam_predict_parset = /homea/htb00/htb001/prefactor/parsets/ateamclip.parset -! gsm_cal_parset = /homea/htb00/htb001/prefactor/parsets/gsmcal.parset ! ATeam_skymodel = /homea/htb00/htb001/prefactor/skymodels/Ateam_LBA_CC.skymodel ! losoto_importer = /homea/htb00/htb001/prefactor/scripts/losotoImporter.py ! fitclock_script = /homea/htb00/htb001/prefactor/scripts/fit_clocktec_initialguess_losoto.py @@ -140,8 +138,6 @@ target_pipeline.argument.ionex_path = {{ ionex_path }} target_pipeline.argument.flag_baselines = {{ flag_baselines }} target_pipeline.argument.min_unflagged_fraction = {{ min_unflagged_fraction }} target_pipeline.argument.reference_station = {{ reference_station }} -target_pipeline.argument.ATeam_predict_parset = {{ ATeam_predict_parset }} -target_pipeline.argument.gsm_cal_parset = {{ gsm_cal_parset }} target_pipeline.argument.ATeam_skymodel = {{ ATeam_skymodel }} target_pipeline.argument.losoto_importer = {{ losoto_importer }} target_pipeline.argument.transfer_script = {{ transfer_script }} diff --git a/parsets/applyparmdb.parset b/parsets/applyparmdb.parset deleted file mode 100644 index 9953a5d1..00000000 --- a/parsets/applyparmdb.parset +++ /dev/null @@ -1,14 +0,0 @@ -Strategy.InputColumn = DATA -Strategy.ChunkSize = 1000 -Strategy.UseSolver = F -Strategy.Steps = [ correct] - -Step.correct.Operation = CORRECT -Step.correct.Model.Sources = [] -Step.correct.Model.Cache.Enable = T -Step.correct.Model.Clock.Enable = T -Step.correct.Model.Gain.Enable = T -Step.correct.Model.CommonRotation.Enable = F -Step.correct.Model.Beam.Enable = T -Step.correct.Model.Beam.UseChannelFreq = T -Step.correct.Output.Column = CORRECTED_DATA diff --git a/parsets/ateamclip.parset b/parsets/ateamclip.parset deleted file mode 100644 index 92229756..00000000 --- a/parsets/ateamclip.parset +++ /dev/null @@ -1,12 +0,0 @@ -Strategy.InputColumn = DATA -Strategy.ChunkSize = 100 -Strategy.UseSolver = F -Strategy.Steps = [predict4] -Step.predict4.Model.Sources = [VirA_4_patch,CygAGG,CasA_4_patch,TauAGG] -Step.predict4.Model.Cache.Enable = T -Step.predict4.Model.Gain.Enable = F -Step.predict4.Operation = PREDICT -Step.predict4.Output.Column = MODEL_DATA -Step.predict4.Model.Beam.Enable = T -Step.predict4.Model.Beam.UseChannelFreq = T - diff --git a/parsets/gsmcal.parset b/parsets/gsmcal.parset deleted file mode 100644 index 24da5a39..00000000 --- a/parsets/gsmcal.parset +++ /dev/null @@ -1,37 +0,0 @@ -Strategy.InputColumn = DATA -Strategy.ChunkSize = 50 -Strategy.UseSolver = F -Strategy.Steps = [solve, correct] -Step.solve.Operation = SOLVE -Step.solve.Model.Sources = [] -Step.solve.Model.Beam.Enable = T -Step.solve.Model.Beam.UseChannelFreq = T -Step.solve.Model.Beam.Mode = ARRAY_FACTOR -Step.solve.Model.Cache.Enable = T -Step.solve.Model.Bandpass.Enable = F -Step.solve.Model.Phasors.Enable = T -Step.solve.Model.Gain.Enable = T -Step.solve.Model.DirectionalGain.Enable = F -Step.solve.Solve.Mode = COMPLEX -Step.solve.Solve.Parms = ["Gain:0:0:Phase:*","Gain:1:1:Phase:*"] -Step.solve.Solve.UVRange = [150, 999999] -Step.solve.Solve.ExclParms = [] -Step.solve.Solve.CalibrationGroups = [] -Step.solve.Solve.CellSize.Freq = 0 -Step.solve.Solve.CellSize.Time = 1 -Step.solve.Solve.CellChunkSize = 25 -Step.solve.Solve.PropagateSolutions = F -Step.solve.Solve.Options.MaxIter = 500 -Step.solve.Solve.Options.EpsValue = 1e-8 -Step.solve.Solve.Options.EpsDerivative = 1e-8 -Step.solve.Solve.Options.ColFactor = 1e-9 -Step.solve.Solve.Options.LMFactor = 1.0 -Step.solve.Solve.Options.BalancedEqs = F -Step.solve.Solve.Options.UseSVD = T -Step.correct.Operation = CORRECT -Step.correct.Model.Sources = [] -Step.correct.Model.Phasors.Enable = T -Step.correct.Model.Gain.Enable = T -Step.correct.Model.Beam.Enable = F -Step.correct.Model.Beam.UseChannelFreq = F -Step.correct.Output.Column = CORRECTED_DATA From 1ad734686752d6b05635ab2d02a06185411cfdcb Mon Sep 17 00:00:00 2001 From: Andreas Horneffer Date: Mon, 17 Oct 2016 16:47:43 +0200 Subject: [PATCH 19/19] added pathes to the sub-pipelines --- Pre-Facet-Cal.parset | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/Pre-Facet-Cal.parset b/Pre-Facet-Cal.parset index 42606056..c7e8d2dc 100644 --- a/Pre-Facet-Cal.parset +++ b/Pre-Facet-Cal.parset @@ -61,6 +61,8 @@ ## pathes to the scripts etc. # #### ToDo: get the scripts onto CEP3 and adjust the pathes here! +! calibrator_pipeline = /homea/htb00/htb001/prefactor/Pre-Facet-Calibrator.parset +! target_pipeline = /homea/htb00/htb001/prefactor/Pre-Facet-Target.parset ! calib_cal_parset = /homea/htb00/htb001/prefactor/parsets/calibcal.parset ! find_skymodel_cal_auto = /homea/htb00/htb001/prefactor/scripts/find_skymodel_cal.py ! ATeam_skymodel = /homea/htb00/htb001/prefactor/skymodels/Ateam_LBA_CC.skymodel @@ -74,10 +76,12 @@ ! get_tgss_skymodel_script = /homea/htb00/htb001/prefactor/scripts/download_tgss_skymodel_target.py ! sortmap_script = /homea/htb00/htb001/prefactor/scripts/sort_times_into_freqGroups.py ! check_flagged_script = /homea/htb00/htb001/prefactor/scripts/check_unflagged_fraction.py -! structurefunction_script = /cep3home/horneffer/Pre-Facet-Cal/bin/getStructure_from_phases.py +! structurefunction_script = /homea/htb00/htb001/prefactor/scripts/getStructure_from_phases.py ! plotphases_script = /homea/htb00/htb001/prefactor/scripts/plot_solutions_all_stations.py ! losoto_executable = /opt/cep/losoto/current/bin/losoto -! flagging_strategy = /opt/cep/lofar/lofar_versions/LOFAR-Release-2_15_2/lofar_build/install/gnu_opt/share/rfistrategies/HBAdefault +! flagging_strategy = /homea/htb00/htb003/lofar_jureca_2.17_stack2016a/share/rfistrategies/HBAdefault +! makesourcedb = /homea/htb00/htb003/lofar_jureca_2.17_stack2016a/bin/makesourcedb + # number of processes to use per step per node ! num_proc_per_node = 10 @@ -95,7 +99,7 @@ pipeline.steps = [calibrator_pipeline, target_pipeline] # run the calibrator pipeline calibrator_pipeline.control.kind = pipeline -calibrator_pipeline.control.type = Pre-Facet-Calibrator.parset +calibrator_pipeline.control.type = {{ calibrator_pipeline }} calibrator_pipeline.argument.avg_timestep = {{ avg_timestep }} calibrator_pipeline.argument.avg_freqstep = {{ avg_freqstep }} calibrator_pipeline.argument.cal_input_path = {{ cal_input_path }} @@ -121,7 +125,7 @@ calibrator_pipeline.argument.error_tolerance = {{ error_tolerance }} # run the target pipeline target_pipeline.control.kind = pipeline -target_pipeline.control.type = Pre-Facet-Target.parset +target_pipeline.control.type = {{ target_pipeline }} target_pipeline.argument.avg_timestep = {{ avg_timestep }} target_pipeline.argument.avg_freqstep = {{ avg_freqstep }} target_pipeline.argument.target_input_path = {{ target_input_path }} @@ -149,6 +153,7 @@ target_pipeline.argument.structurefunction_script = {{ structurefunction_script target_pipeline.argument.plotphases_script = {{ plotphases_script }} target_pipeline.argument.losoto_executable = {{ losoto_executable }} target_pipeline.argument.flagging_strategy = {{ flagging_strategy }} +target_pipeline.argument.makesourcedb = {{ makesourcedb }} target_pipeline.argument.num_proc_per_node = {{ num_proc_per_node }} target_pipeline.argument.num_proc_per_node_limit = {{ num_proc_per_node_limit }} target_pipeline.argument.max_dppp_threads = {{ max_dppp_threads }}