diff --git a/magicctapipe/scripts/lst1_magic/README.md b/magicctapipe/scripts/lst1_magic/README.md index c494d0d72..0c4ac03db 100644 --- a/magicctapipe/scripts/lst1_magic/README.md +++ b/magicctapipe/scripts/lst1_magic/README.md @@ -154,11 +154,11 @@ Eventually, to merge DL1 stereo (LST) subruns into runs, we run the `merge_stere > $ merge_stereo (-c config_auto_MCP.yaml) -### Random forest and DL1 to DL2 +### DL1 to DL2 TBD. -### Instrument response function and DL3 +### DL2 to DL3 TBD. diff --git a/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/coincident_events.py b/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/coincident_events.py index 542c5ba67..5736f570e 100644 --- a/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/coincident_events.py +++ b/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/coincident_events.py @@ -66,14 +66,19 @@ def configfile_coincidence(target_dir, source_name, config_file): "event_coincidence": config_dict["event_coincidence"], } - file_name = f"{target_dir}/v{__version__}/{source_name}/config_coincidence.yaml" + conf_dir = f"{target_dir}/v{__version__}/{source_name}" + os.makedirs(conf_dir, exist_ok=True) + + file_name = f"{conf_dir}/config_coincidence.yaml" with open(file_name, "w") as f: yaml.dump(conf, f, default_flow_style=False) -def linking_bash_lst(target_dir, LST_runs, source_name, LST_version, env_name, cluster): +def linking_bash_lst( + target_dir, LST_runs, source_name, LST_version, env_name, cluster, version +): """ This function links the LST data paths to the working directory and creates bash scripts. @@ -92,11 +97,13 @@ def linking_bash_lst(target_dir, LST_runs, source_name, LST_version, env_name, c Name of the conda environment cluster : str Cluster system + version : str + Version of the input (DL1 MAGIC runs) data """ coincidence_DL1_dir = f"{target_dir}/v{__version__}/{source_name}" - MAGIC_DL1_dir = f"{target_dir}/v{__version__}/{source_name}/DL1" + MAGIC_DL1_dir = f"{target_dir}/v{version}/{source_name}/DL1" dates = [os.path.basename(x) for x in glob.glob(f"{MAGIC_DL1_dir}/Merged/[0-9]*")] if cluster != "SLURM": @@ -199,6 +206,9 @@ def main(): source = config["data_selection"]["source_name_output"] cluster = config["general"]["cluster"] + in_version = config["directories"]["real_input_version"] + if in_version == "": + in_version == __version__ if source_in is None: source_list = joblib.load("list_sources.dat") @@ -227,6 +237,7 @@ def main(): LST_version, env_name, cluster, + in_version, ) # linking the data paths to current working directory print("***** Submitting processess to the cluster...") diff --git a/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/config_auto_MCP.yaml b/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/config_auto_MCP.yaml index 1ce9c418a..769ef9090 100644 --- a/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/config_auto_MCP.yaml +++ b/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/config_auto_MCP.yaml @@ -1,6 +1,7 @@ directories: workspace_dir: "/fefs/aswg/workspace/elisa.visentin/auto_MCP_PR/" # Output directory where all the data products will be saved. - + MC_version: "" #MCP version used to process MC; if not provided, current MCP version will be used + real_input_version: "" #MCP version used to process input real data (to be used in case you want to run only the last steps of the analysis, using lower-level data previously processed); if not provided, current MCP version will be used data_selection: source_name_database: "CrabNebula" # MUST BE THE SAME AS IN THE DATABASE; Set to null to process all sources in the given time range. diff --git a/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/database_production/create_LST_table.py b/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/database_production/create_LST_table.py index 9fd1c0696..01edf387c 100644 --- a/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/database_production/create_LST_table.py +++ b/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/database_production/create_LST_table.py @@ -88,6 +88,16 @@ def main(): out_h5, key=out_key, ) + if "ra" in df_old: + df_cut["ra"] = np.nan + if "dec" in df_old: + df_cut["dec"] = np.nan + if "MC_dec" in df_old: + df_cut["MC_dec"] = np.nan + if "point_source" in df_old: + df_cut["point_source"] = np.nan + if "wobble_offset" in df_old: + df_cut["wobble_offset"] = np.nan df_cut = pd.concat([df_old, df_cut]).drop_duplicates( subset="LST1_run", keep="first" ) diff --git a/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/database_production/set_ra_dec.py b/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/database_production/set_ra_dec.py new file mode 100644 index 000000000..ac2595687 --- /dev/null +++ b/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/database_production/set_ra_dec.py @@ -0,0 +1,164 @@ +""" +Add, to LST database, infos about coordinates and extension of the sources and MC declination to be used to process the source + +Usage: +$ set_ra_dec (-b YYYYMMDD -e YYYYMMDD -s source_dict -m mc_dec) +""" + +import argparse +import json + +import numpy as np +import pandas as pd +import yaml +from astropy.coordinates import SkyCoord +from astropy.coordinates.name_resolve import NameResolveError + +from magicctapipe.io import resource_file + + +def main(): + + """ + Main function + """ + + parser = argparse.ArgumentParser() + + parser.add_argument( + "--begin-date", + "-b", + dest="begin", + type=int, + default=0, + help="First date to update database (YYYYMMDD)", + ) + parser.add_argument( + "--end-date", + "-e", + dest="end", + type=int, + default=0, + help="End date to update database (YYYYMMDD)", + ) + parser.add_argument( + "--dict-source", + "-s", + dest="source", + type=str, + default="./source_dict.json", + help="File with dictionary of info (RA/Dec/point-source) for sources", + ) + parser.add_argument( + "--mc-dec", + "-m", + dest="dec_mc", + type=str, + default="./dec_mc.json", + help="File with list of MC declinations", + ) + + args = parser.parse_args() + config_file = resource_file("database_config.yaml") + + with open( + config_file, "rb" + ) as fc: # "rb" mode opens the file in binary format for reading + config_dict = yaml.safe_load(fc) + + LST_h5 = config_dict["database_paths"]["LST"] + LST_key = config_dict["database_keys"]["LST"] + + df_LST = pd.read_hdf( + LST_h5, + key=LST_key, + ) + if "ra" not in df_LST: + df_LST["ra"] = np.nan + if "dec" not in df_LST: + df_LST["dec"] = np.nan + if "MC_dec" not in df_LST: + df_LST["MC_dec"] = np.nan + if "point_source" not in df_LST: + df_LST["point_source"] = np.nan + df_LST_full = df_LST.copy(deep=True) + if args.begin != 0: + df_LST = df_LST[df_LST["DATE"].astype(int) >= args.begin] + if args.end != 0: + df_LST = df_LST[df_LST["DATE"].astype(int) <= args.end] + + sources = np.unique(df_LST["source"]) + with open(args.source) as f: + dict_source = f.read() + with open(args.dec_mc) as f: + mc_dec = f.read() + source_dict = json.loads(dict_source) + dec_mc = np.asarray(json.loads(mc_dec)).astype(np.float64) + print("MC declinations: \t", dec_mc) + print("\n\nChecking RA/Dec...\n\n") + i = 0 + for src in sources: + + try: + coord = SkyCoord.from_name(src) + if src == "Crab": + coord = SkyCoord.from_name("CrabNebula") + src_dec = coord.dec.degree + src_ra = coord.ra.degree + + except NameResolveError: + print(f"{i}: {src} not found in astropy. Looking to the dictionaries...") + if ( + (src in source_dict.keys()) + and (source_dict.get(src)[0] != "NaN") + and (source_dict.get(src)[1] != "NaN") + ): + src_ra = float(source_dict.get(src)[0]) + src_dec = float(source_dict.get(src)[1]) + + else: + print( + f"\t {i}: {src} RA and/or Dec not in the dictionary. Please update the dictionary" + ) + src_ra = np.nan + src_dec = np.nan + + i += 1 + df_LST["ra"] = np.where(df_LST["source"] == src, src_ra, df_LST["ra"]) + df_LST["dec"] = np.where(df_LST["source"] == src, src_dec, df_LST["dec"]) + if not (np.isnan(src_dec)): + df_LST["MC_dec"] = np.where( + df_LST["source"] == src, + float(dec_mc[np.argmin(np.abs(src_dec - dec_mc))]), + df_LST["MC_dec"], + ) + print("\n\nChecking if point source...\n\n") + i = 0 + for src in sources: + if (src in source_dict.keys()) and (source_dict.get(src)[2] != "NaN"): + src_point = str(source_dict.get(src)[2]) + df_LST["point_source"] = np.where( + df_LST["source"] == src, src_point, df_LST["point_source"] + ) + else: + print( + f"\t {i}: {src} extension information not in the dictionaries. Please add it to the dictionaries" + ) + i += 1 + df_LST = pd.concat([df_LST, df_LST_full]).drop_duplicates( + subset="LST1_run", keep="first" + ) + df_LST.to_hdf( + LST_h5, + key=LST_key, + mode="w", + min_itemsize={ + "lstchain_versions": 20, + "last_lstchain_file": 90, + "processed_lstchain_file": 90, + }, + ) + + +if __name__ == "__main__": + main() diff --git a/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/database_production/wobble_db.py b/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/database_production/wobble_db.py new file mode 100644 index 000000000..a26277393 --- /dev/null +++ b/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/database_production/wobble_db.py @@ -0,0 +1,140 @@ +""" +Add wobble offset info to the LST database (by checking MAGIC runs). + +Usage: +$ wobble_db (-b YYYYMMDD -e YYYYMMDD) +""" + +import argparse +import glob + +import numpy as np +import pandas as pd +import yaml + +from magicctapipe.io import resource_file + + +def main(): + + """ + Main function + """ + + parser = argparse.ArgumentParser() + + parser.add_argument( + "--begin-date", + "-b", + dest="begin", + type=int, + default=0, + help="First date to update database (YYYYMMDD)", + ) + parser.add_argument( + "--end-date", + "-e", + dest="end", + type=int, + default=0, + help="End date to update database (YYYYMMDD)", + ) + + args = parser.parse_args() + config_file = resource_file("database_config.yaml") + + with open( + config_file, "rb" + ) as fc: # "rb" mode opens the file in binary format for reading + config_dict = yaml.safe_load(fc) + + out_h5 = config_dict["database_paths"]["LST"] + out_key = config_dict["database_keys"]["LST"] + + df_LST = pd.read_hdf( + out_h5, + key=out_key, + ) + df = pd.read_hdf( + config_dict["database_paths"]["MAGIC+LST1"], + key=config_dict["database_keys"]["MAGIC+LST1"], + ) # TODO: put this file in a shared folder + """ + df2 = pd.read_hdf( + config_dict["database_paths"]["MAGIC+LST1_bis"], + key=config_dict["database_keys"]["MAGIC+LST1_bis"], + ) # TODO: put this file in a shared folder + df = pd.concat([df, df2]).drop_duplicates(subset="LST1_run", keep="first") + df = df.sort_values(by=["DATE", "source"]) + + df = df.reset_index(drop=True) + """ + if args.begin != 0: + df = df[df["DATE"].astype(int) >= args.begin] + if args.end != 0: + df = df[df["DATE"].astype(int) <= args.end] + if "wobble_offset" not in df_LST: + df_LST["wobble_offset"] = np.nan + + date_lst = pd.to_datetime(df["DATE"], format="%Y%m%d") + + delta = pd.Timedelta("1 day") + date_magic = date_lst + delta + + date_magic = date_magic.dt.strftime("%Y/%m/%d").to_list() + for i in range(len(df)): + magic_runs = ( + (df["MAGIC_runs"].to_list())[i] + .rstrip("]") + .lstrip("[") + .replace(" ", "") + .split(",") + ) + lst_run = (df["LST1_run"].to_list())[i] + wobble = [] + source = (df["source"].to_list())[i] + for j in range(len(magic_runs)): + print("MAGIC run:", magic_runs[j]) + runs = glob.glob( + f"/fefs/onsite/common/MAGIC/data/M[12]/event/Calibrated/{date_magic[i]}/*{magic_runs[j]}*{source}*.root" + ) + + if len(runs) < 1: + print( + f"Neither M1 nor M2 files could be found for {date_magic[i]}, run {magic_runs[j]}, {source}. Check database and stored data!" + ) + continue + wobble_run_info = runs[0].split("/")[-1].split(source)[1] + if "-W" in wobble_run_info: + wobble_run = (wobble_run_info.split("W")[1])[0:4] + else: + print( + f"No string matching for wobble offset found in the name of MAGIC files for {date_magic[i]}, run {magic_runs[j]}, {source}. Check it manually!" + ) + continue + print("wobble offset:", wobble_run) + wobble.append(wobble_run) + wobble = np.unique(wobble) + if len(wobble) > 1: + print( + f"More than one wobble offset value for LST run {lst_run}: check data!" + ) + wobble_str = "[" + ", ".join(str(x) for x in wobble) + "]" + print(f"Wobble offset for LST run {lst_run}:", wobble_str) + df_LST["wobble_offset"] = np.where( + df_LST["LST1_run"] == lst_run, wobble_str, df_LST["wobble_offset"] + ) + df_LST.to_hdf( + out_h5, + key=out_key, + mode="w", + min_itemsize={ + "lstchain_versions": 20, + "last_lstchain_file": 90, + "processed_lstchain_file": 90, + }, + ) + + +if __name__ == "__main__": + main() diff --git a/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/dl1_production.py b/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/dl1_production.py index bfd17707e..ef1e9d36c 100644 --- a/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/dl1_production.py +++ b/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/dl1_production.py @@ -41,7 +41,7 @@ logger.setLevel(logging.INFO) -def config_file_gen(target_dir, source_name, config_file): +def config_file_gen(target_dir, source_name, config_dict): """ Here we create the configuration file needed for transforming DL0 into DL1 @@ -52,17 +52,10 @@ def config_file_gen(target_dir, source_name, config_file): Directory to store the results source_name : str Name of the target source - config_file : str - Path to MCP configuration file (e.g., resources/config.yaml) + config_dict : dict + Dictionary of a configuration file (e.g., resources/config.yaml) """ - if config_file == "": - config_file = resource_file("config.yaml") - with open( - config_file, "rb" - ) as fc: # "rb" mode opens the file in binary format for reading - config_dict = yaml.safe_load(fc) - conf = { "mc_tel_ids": config_dict["mc_tel_ids"], "LST": config_dict["LST"], @@ -213,7 +206,6 @@ def main(): ) as f: # "rb" mode opens the file in binary format for reading config = yaml.safe_load(f) - telescope_ids = list(config["mc_tel_ids"].values()) env_name = config["general"]["env_name"] config_file = config["general"]["base_config_file"] source_in = config["data_selection"]["source_name_database"] @@ -221,6 +213,14 @@ def main(): cluster = config["general"]["cluster"] target_dir = Path(config["directories"]["workspace_dir"]) + if config_file == "": + config_file = resource_file("config.yaml") + with open( + config_file, "rb" + ) as fc: # "rb" mode opens the file in binary format for reading + config_dict = yaml.safe_load(fc) + telescope_ids = list(config_dict["mc_tel_ids"].values()) + if source_in is None: source_list = joblib.load("list_sources.dat") @@ -244,7 +244,7 @@ def main(): directories_generator_real( str(target_dir), telescope_ids, MAGIC_runs, source_name ) # Here we create all the necessary directories in the given workspace and collect the main directory of the target - config_file_gen(target_dir, source_name, config_file) + config_file_gen(target_dir, source_name, config_dict) # Below we run the analysis on the MAGIC data diff --git a/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/merge_stereo.py b/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/merge_stereo.py index 1833498fc..cf8b90736 100644 --- a/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/merge_stereo.py +++ b/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/merge_stereo.py @@ -27,7 +27,7 @@ logger.setLevel(logging.INFO) -def MergeStereo(target_dir, env_name, source, cluster): +def MergeStereo(target_dir, env_name, source, cluster, version): """ This function creates the bash scripts to run merge_hdf_files.py in all DL1Stereo subruns. @@ -41,10 +41,12 @@ def MergeStereo(target_dir, env_name, source, cluster): Name of the target cluster : str Cluster system + version : str + Version of the input (stereo subruns) data """ process_name = source - stereo_DL1_dir = f"{target_dir}/v{__version__}/{source}" + stereo_DL1_dir = f"{target_dir}/v{version}/{source}" listOfNightsLST = np.sort(glob.glob(f"{stereo_DL1_dir}/DL1Stereo/*")) if cluster != "SLURM": logger.warning( @@ -53,7 +55,9 @@ def MergeStereo(target_dir, env_name, source, cluster): return for nightLST in listOfNightsLST: night = nightLST.split("/")[-1] - stereoMergeDir = f"{stereo_DL1_dir}/DL1Stereo/Merged/{night}" + stereoMergeDir = ( + f"{target_dir}/v{__version__}/{source}/DL1Stereo/Merged/{night}" + ) os.makedirs(f"{stereoMergeDir}/logs", exist_ok=True) if len(glob.glob(f"{nightLST}/dl1_stereo*.h5")) < 1: @@ -109,6 +113,9 @@ def main(): source_in = config["data_selection"]["source_name_database"] source = config["data_selection"]["source_name_output"] cluster = config["general"]["cluster"] + in_version = config["directories"]["real_input_version"] + if in_version == "": + in_version == __version__ if source_in is None: source_list = joblib.load("list_sources.dat") @@ -121,7 +128,7 @@ def main(): for source_name in source_list: print("***** Merging DL1Stereo files run-wise...") - MergeStereo(target_dir, env_name, source_name, cluster) + MergeStereo(target_dir, env_name, source_name, cluster, in_version) list_of_merge = glob.glob(f"{source_name}_StereoMerge_*.sh") if len(list_of_merge) < 1: diff --git a/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/merging_runs.py b/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/merging_runs.py index ca80d7d56..27c1cb143 100644 --- a/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/merging_runs.py +++ b/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/merging_runs.py @@ -33,7 +33,7 @@ logger.setLevel(logging.INFO) -def merge(target_dir, MAGIC_runs, env_name, source, cluster): +def merge(target_dir, MAGIC_runs, env_name, source, cluster, version): """ This function creates the bash scripts to run merge_hdf_files.py for real data @@ -50,11 +50,14 @@ def merge(target_dir, MAGIC_runs, env_name, source, cluster): Target name cluster : str Cluster system + version : str + Version of the input (DL1 MAGIC subruns) data """ process_name = f"merging_{source}" - MAGIC_DL1_dir = f"{target_dir}/v{__version__}/{source}/DL1/" + MAGIC_in_dir = f"{target_dir}/v{version}/{source}/DL1/" + MAGIC_out_dir = f"{target_dir}/v{__version__}/{source}/DL1/" if cluster != "SLURM": logger.warning( @@ -65,18 +68,18 @@ def merge(target_dir, MAGIC_runs, env_name, source, cluster): queue="short", job_name=process_name, mem="2g", - out_name=f"{MAGIC_DL1_dir}/Merged/logs/slurm-%x.%j", + out_name=f"{MAGIC_out_dir}/Merged/logs/slurm-%x.%j", ) - os.makedirs(f"{MAGIC_DL1_dir}/Merged/logs", exist_ok=True) + os.makedirs(f"{MAGIC_out_dir}/Merged/logs", exist_ok=True) with open(f"{source}_Merge_MAGIC.sh", "w") as f: f.writelines(lines) for magic in [1, 2]: for i in MAGIC_runs: # Here is a difference w.r.t. original code. If only one telescope data are available they will be merged now for this telescope - indir = f"{MAGIC_DL1_dir}/M{magic}/{i[0]}/{i[1]}" + indir = f"{MAGIC_in_dir}/M{magic}/{i[0]}/{i[1]}" if os.path.exists(f"{indir}"): - outdir = f"{MAGIC_DL1_dir}/Merged/{i[0]}" + outdir = f"{MAGIC_out_dir}/Merged/{i[0]}" os.makedirs(f"{outdir}/logs", exist_ok=True) f.write( @@ -120,6 +123,9 @@ def main(): source_in = config["data_selection"]["source_name_database"] source = config["data_selection"]["source_name_output"] cluster = config["general"]["cluster"] + in_version = config["directories"]["real_input_version"] + if in_version == "": + in_version == __version__ if source_in is None: source_list = joblib.load("list_sources.dat") @@ -144,6 +150,7 @@ def main(): env_name, source_name, cluster, + in_version, ) # generating the bash script to merge the subruns print("***** Running merge_hdf_files.py on the MAGIC data files...") diff --git a/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/stereo_events.py b/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/stereo_events.py index a2f8a4eea..4e80a3a8d 100644 --- a/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/stereo_events.py +++ b/magicctapipe/scripts/lst1_magic/semi_automatic_scripts/stereo_events.py @@ -57,13 +57,18 @@ def configfile_stereo(target_dir, source_name, config_file): "mc_tel_ids": config_dict["mc_tel_ids"], "stereo_reco": config_dict["stereo_reco"], } - file_name = f"{target_dir}/v{__version__}/{source_name}/config_stereo.yaml" + + conf_dir = f"{target_dir}/v{__version__}/{source_name}" + os.makedirs(conf_dir, exist_ok=True) + + file_name = f"{conf_dir}/config_stereo.yaml" + with open(file_name, "w") as f: yaml.dump(conf, f, default_flow_style=False) -def bash_stereo(target_dir, source, env_name, cluster): +def bash_stereo(target_dir, source, env_name, cluster, version): """ This function generates the bashscripts for running the stereo analysis. @@ -78,11 +83,13 @@ def bash_stereo(target_dir, source, env_name, cluster): Name of the environment cluster : str Cluster system + version : str + Version of the input (coincident) data """ process_name = source - coincidence_DL1_dir = f"{target_dir}/v{__version__}/{source}" + coincidence_DL1_dir = f"{target_dir}/v{version}/{source}" listOfNightsLST = np.sort(glob.glob(f"{coincidence_DL1_dir}/DL1Coincident/*")) if cluster != "SLURM": @@ -92,7 +99,7 @@ def bash_stereo(target_dir, source, env_name, cluster): return for nightLST in listOfNightsLST: night = nightLST.split("/")[-1] - stereoDir = f"{coincidence_DL1_dir}/DL1Stereo/{night}" + stereoDir = f"{target_dir}/v{__version__}/{source}/DL1Stereo/{night}" os.makedirs(f"{stereoDir}/logs", exist_ok=True) if not os.listdir(f"{nightLST}"): continue @@ -166,6 +173,9 @@ def main(): source = config["data_selection"]["source_name_output"] cluster = config["general"]["cluster"] + in_version = config["directories"]["real_input_version"] + if in_version == "": + in_version == __version__ if source_in is None: source_list = joblib.load("list_sources.dat") @@ -182,7 +192,7 @@ def main(): # Below we run the analysis on the real data print("***** Generating the bashscript...") - bash_stereo(target_dir, source_name, env_name, cluster) + bash_stereo(target_dir, source_name, env_name, cluster, in_version) print("***** Submitting processess to the cluster...") print(f"Process name: {source_name}_stereo") diff --git a/setup.cfg b/setup.cfg index ecd18a554..35cfa835a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -108,8 +108,10 @@ console_scripts = merging_runs = magicctapipe.scripts.lst1_magic.semi_automatic_scripts.merging_runs:main nsb_level = magicctapipe.scripts.lst1_magic.semi_automatic_scripts.database_production.nsb_level:main nsb_to_h5 = magicctapipe.scripts.lst1_magic.semi_automatic_scripts.database_production.nsb_to_h5:main + set_ra_dec = magicctapipe.scripts.lst1_magic.semi_automatic_scripts.database_production.set_ra_dec:main stereo_events = magicctapipe.scripts.lst1_magic.semi_automatic_scripts.stereo_events:main update_MAGIC_database = magicctapipe.scripts.lst1_magic.semi_automatic_scripts.database_production.update_MAGIC_database:main + wobble_db = magicctapipe.scripts.lst1_magic.semi_automatic_scripts.database_production.wobble_db:main [tool:pytest] minversion=3.0