diff --git a/matsim/scenariogen/network/ref_model/individual.py b/matsim/scenariogen/network/ref_model/individual.py new file mode 100644 index 0000000..5a5d0b8 --- /dev/null +++ b/matsim/scenariogen/network/ref_model/individual.py @@ -0,0 +1,18 @@ +""" Reference model to fine tune individal links. This model does not use features and is not trasferable to other scenarios. """ + +# Each link has its own target speed + + +params = None + +def score(params, inputs): + return params[inputs[0]] + + +def batch_loss(params, inputs, targets): + error = 0 + inputs = inputs.astype(int) + for x, y in zip(inputs, targets): + preds = score(params, x) + error += (preds - y) ** 2 + return error diff --git a/matsim/scenariogen/network/runOPT.sh b/matsim/scenariogen/network/runOPT.sh index 3a70a2e..a29c1ec 100755 --- a/matsim/scenariogen/network/runOPT.sh +++ b/matsim/scenariogen/network/runOPT.sh @@ -1,19 +1,20 @@ #!/bin/bash --login -#$ -l h_rt=700000 -#$ -j y -#$ -m a -#$ -o ./logfile/logfile_$JOB_NAME.log -#$ -cwd -#$ -pe mp 6 -#$ -l mem_free=4G -#$ -l cpuflag_avx2=1 -#$ -N network-opt +#SBATCH --time=200:00:00 +#SBATCH --partition=smp +#SBATCH --output=./logfile/logfile_%x-%j.log +#SBATCH --nodes=1 # How many computing nodes do you need (for MATSim usually 1) +#SBATCH --ntasks=1 # How many tasks should be run (For MATSim usually 1) +#SBATCH --cpus-per-task=6 # Number of CPUs per task (For MATSim usually 8 - 12) +#SBATCH --mem=12G # RAM for the job +#SBATCH --job-name=network-opt # name of your run, will be displayed in the joblist +#SBATCH --constraint=cpuflag_avx2:1 +#SBATCH --mail-type=END,FAIL date hostname source venv/bin/activate -module add java/17 +module add java/21 jar="matsim-[name]-SNAPSHOT.jar" input="input/*" diff --git a/matsim/scenariogen/network/run_opt_freespeed.py b/matsim/scenariogen/network/run_opt_freespeed.py index 3ab33a5..410cbb9 100644 --- a/matsim/scenariogen/network/run_opt_freespeed.py +++ b/matsim/scenariogen/network/run_opt_freespeed.py @@ -61,7 +61,8 @@ def setup(parser: argparse.ArgumentParser): parser.add_argument("--resume", help="File with parameters to to resume", default=None) parser.add_argument("--port", type=int, nargs="+", help="Port to connect on", default=[9090]) parser.add_argument("--ref-model", required=False, default=None, - help="Use an integrated model instead of importing", choices=["tree", "germany"]) + help="Use an integrated model instead of importing", choices=["tree", "individual", "germany"]) + parser.add_argument("--ref-size", type=int, help="Number of links (needed for individual model)", default=0) parser.add_argument("--learning-rate", type=float, help="Start learning rate", default=1e-4) parser.add_argument("--batch-size", type=int, help="Batch size", default=128) parser.add_argument("--output", help="Output folder for params", default="output-params") @@ -77,6 +78,15 @@ def main(args): if args.ref_model == "tree": from .ref_model import tree as p rbl = tl = p + elif args.ref_model == "individual": + # Enables non relative import as well + try: + from .ref_model import individual as p + rbl = tl = p + except ImportError: + from ref_model import individual as p + rbl = tl = p + elif args.ref_model == "germany": from .ref_model.germany import speedRelative_priority as p from .ref_model.germany import speedRelative_right_before_left as rbl @@ -100,8 +110,13 @@ def main(args): staircase=False ) + # Need to load default params statically + if module.params is None: + params = jnp.array(resume[name]) if name in resume else jnp.ones(args.ref_size) * 0.85 + else: + params = jnp.array(resume[name] if name in resume else module.params) + optimizer = optax.adam(schedule) - params = jnp.array(resume[name] if name in resume else module.params) opt_state = optimizer.init(params) models[name] = Model(