Skip to content

Commit

Permalink
Merge branch 'main' of https://github.com/mackelab/labproject into main
Browse files Browse the repository at this point in the history
  • Loading branch information
zinaStef committed Feb 7, 2024
2 parents 3e5cfb5 + e4540b4 commit afa8544
Show file tree
Hide file tree
Showing 8 changed files with 4,135 additions and 27 deletions.
9 changes: 9 additions & 0 deletions configs/conf_embedding_samplesize.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
exp_log_name: "embed_samplesize" # optional but recommended
data: ["imagenet_real_embeddings"]
experiments: ["ScaleSampleSizeSW", "ScaleSampleSizeC2ST", "ScaleSampleSizeMMD", "ScaleSampleSizeFID",]
n: [10000]
d: [2048]
augmentation: ['imagenet_uncond_embeddings']

seed: 0
runs: 5
415 changes: 392 additions & 23 deletions docs/notebooks/MMD_intro.ipynb

Large diffs are not rendered by default.

308 changes: 308 additions & 0 deletions docs/notebooks/embedding_sample_size.ipynb

Large diffs are not rendered by default.

Binary file added docs/notebooks/gamma.pdf
Binary file not shown.
903 changes: 903 additions & 0 deletions docs/notebooks/mmd_sbi_vs_our.ipynb

Large diffs are not rendered by default.

2,507 changes: 2,507 additions & 0 deletions docs/notebooks/wasser_misspec_experiment.ipynb

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions labproject/experiments.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def __init__(
self.sample_sizes = list(range(min_samples, max_samples, step))
super().__init__()

def run_experiment(self, dataset1, dataset2, nb_runs=5, sample_sizes=None):
def run_experiment(self, dataset1, dataset2, nb_runs=5, sample_sizes=None, **kwargs):
"""
Computes for each subset 5 different random subsets and averages performance across the subsets.
"""
Expand All @@ -145,7 +145,7 @@ def run_experiment(self, dataset1, dataset2, nb_runs=5, sample_sizes=None):
for n in sample_sizes:
data1 = dataset1[torch.randperm(dataset1.size(0))[:n], :]
data2 = dataset2[torch.randperm(dataset2.size(0))[:n], :]
distances.append(self.metric_fn(data1, data2))
distances.append(self.metric_fn(data1, data2, **kwargs))
final_distances.append(distances)
final_distances = torch.transpose(torch.tensor(final_distances), 0, 1)
final_errors = (
Expand Down
16 changes: 14 additions & 2 deletions labproject/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def get_cfg() -> OmegaConf:
return config


def get_log_path(cfg):
def get_log_path(cfg, tag="", timestamp=True):
"""
Get the log path for the current experiment run.
This log path is then used to save the numerical results of the experiment.
Expand All @@ -63,6 +63,18 @@ def get_log_path(cfg):
else:
exp_log_name = cfg.exp_log_name
# add datetime to the name
exp_log_name = exp_log_name + "_" + now.strftime("%Y-%m-%d_%H-%M-%S")
add_date = now.strftime("%Y-%m-%d_%H-%M-%S") if timestamp else ""
exp_log_name = exp_log_name + tag + "_" + add_date
log_path = os.path.join(f"results/{cfg.running_user}/{exp_log_name}.pkl")
return log_path


def load_experiments(cfg, tag="", now=""):
"""
load the experiments to run
"""
exp_log_name = cfg.exp_log_name
# add datetime to the name
exp_log_name = exp_log_name + tag + "_" + now
log_path = os.path.join(f"results/{cfg.running_user}/{exp_log_name}")
return log_path

0 comments on commit afa8544

Please sign in to comment.