diff --git a/configs/core/tests/offline/embeddings.yaml b/configs/core/tests/offline/embeddings.yaml index f845fe00..5c11cd2a 100644 --- a/configs/core/tests/offline/embeddings.yaml +++ b/configs/core/tests/offline/embeddings.yaml @@ -26,11 +26,9 @@ model: out_features: &NUM_CLASSES 2 criterion: torch.nn.CrossEntropyLoss optimizer: - class_path: torch.optim.SGD + class_path: torch.optim.AdamW init_args: - lr: ${oc.env:LR_VALUE, 0.1} - momentum: 0.9 - weight_decay: 0.0 + lr: ${oc.env:LR_VALUE, 0.0003} lr_scheduler: class_path: torch.optim.lr_scheduler.CosineAnnealingLR init_args: diff --git a/configs/vision/pathology/offline/classification/bach.yaml b/configs/vision/pathology/offline/classification/bach.yaml index d4b17ece..bf9494ba 100644 --- a/configs/vision/pathology/offline/classification/bach.yaml +++ b/configs/vision/pathology/offline/classification/bach.yaml @@ -53,11 +53,9 @@ model: out_features: &NUM_CLASSES 4 criterion: torch.nn.CrossEntropyLoss optimizer: - class_path: torch.optim.SGD + class_path: torch.optim.AdamW init_args: - lr: &LR_VALUE ${oc.env:LR_VALUE, 0.000625} - momentum: 0.9 - weight_decay: 0.0 + lr: ${oc.env:LR_VALUE, 0.0003} lr_scheduler: class_path: torch.optim.lr_scheduler.CosineAnnealingLR init_args: diff --git a/configs/vision/pathology/offline/classification/camelyon16.yaml b/configs/vision/pathology/offline/classification/camelyon16.yaml index 6d898517..5a5f9a5c 100644 --- a/configs/vision/pathology/offline/classification/camelyon16.yaml +++ b/configs/vision/pathology/offline/classification/camelyon16.yaml @@ -18,7 +18,7 @@ trainer: filename: best save_last: true save_top_k: 1 - monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/BinaryAccuracy} + monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/BinaryBalancedAccuracy} mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: diff --git a/configs/vision/pathology/offline/classification/camelyon16_small.yaml b/configs/vision/pathology/offline/classification/camelyon16_small.yaml index 133350b5..dd4be2af 100644 --- a/configs/vision/pathology/offline/classification/camelyon16_small.yaml +++ b/configs/vision/pathology/offline/classification/camelyon16_small.yaml @@ -18,7 +18,7 @@ trainer: filename: best save_last: true save_top_k: 1 - monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/BinaryAccuracy} + monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/BinaryBalancedAccuracy} mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: diff --git a/configs/vision/pathology/offline/classification/crc.yaml b/configs/vision/pathology/offline/classification/crc.yaml index bc458a70..feca261e 100644 --- a/configs/vision/pathology/offline/classification/crc.yaml +++ b/configs/vision/pathology/offline/classification/crc.yaml @@ -53,11 +53,9 @@ model: out_features: &NUM_CLASSES 9 criterion: torch.nn.CrossEntropyLoss optimizer: - class_path: torch.optim.SGD + class_path: torch.optim.AdamW init_args: - lr: &LR_VALUE ${oc.env:LR_VALUE, 0.01} - momentum: 0.9 - weight_decay: 0.0 + lr: ${oc.env:LR_VALUE, 0.0003} lr_scheduler: class_path: torch.optim.lr_scheduler.CosineAnnealingLR init_args: @@ -104,7 +102,7 @@ data: split: val dataloaders: train: - batch_size: &BATCH_SIZE ${oc.env:BATCH_SIZE, 4096} + batch_size: &BATCH_SIZE ${oc.env:BATCH_SIZE, 256} num_workers: &N_DATA_WORKERS ${oc.env:N_DATA_WORKERS, 4} shuffle: true val: diff --git a/configs/vision/pathology/offline/classification/mhist.yaml b/configs/vision/pathology/offline/classification/mhist.yaml index 8f423c47..f96c1f15 100644 --- a/configs/vision/pathology/offline/classification/mhist.yaml +++ b/configs/vision/pathology/offline/classification/mhist.yaml @@ -18,12 +18,12 @@ trainer: filename: best save_last: true save_top_k: 1 - monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/BinaryAccuracy} + monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/BinaryBalancedAccuracy} mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: min_delta: 0 - patience: 51 + patience: 70 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE - class_path: eva.callbacks.ClassificationEmbeddingsWriter @@ -53,11 +53,9 @@ model: out_features: 1 criterion: torch.nn.BCEWithLogitsLoss optimizer: - class_path: torch.optim.SGD + class_path: torch.optim.AdamW init_args: - lr: &LR_VALUE ${oc.env:LR_VALUE, 0.000625} - momentum: 0.9 - weight_decay: 0.0 + lr: ${oc.env:LR_VALUE, 0.0003} lr_scheduler: class_path: torch.optim.lr_scheduler.CosineAnnealingLR init_args: diff --git a/configs/vision/pathology/offline/classification/patch_camelyon.yaml b/configs/vision/pathology/offline/classification/patch_camelyon.yaml index 4bce666d..fc8450e7 100644 --- a/configs/vision/pathology/offline/classification/patch_camelyon.yaml +++ b/configs/vision/pathology/offline/classification/patch_camelyon.yaml @@ -18,7 +18,7 @@ trainer: filename: best save_last: true save_top_k: 1 - monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/BinaryAccuracy} + monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/BinaryBalancedAccuracy} mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: @@ -54,11 +54,9 @@ model: out_features: 1 criterion: torch.nn.BCEWithLogitsLoss optimizer: - class_path: torch.optim.SGD + class_path: torch.optim.AdamW init_args: - lr: &LR_VALUE ${oc.env:LR_VALUE, 0.01} - momentum: 0.9 - weight_decay: 0.0 + lr: ${oc.env:LR_VALUE, 0.0003} lr_scheduler: class_path: torch.optim.lr_scheduler.CosineAnnealingLR init_args: @@ -118,7 +116,7 @@ data: split: test dataloaders: train: - batch_size: &BATCH_SIZE ${oc.env:BATCH_SIZE, 4096} + batch_size: &BATCH_SIZE ${oc.env:BATCH_SIZE, 256} num_workers: &N_DATA_WORKERS ${oc.env:N_DATA_WORKERS, 4} shuffle: true val: diff --git a/configs/vision/pathology/online/classification/bach.yaml b/configs/vision/pathology/online/classification/bach.yaml index 60e8b6ae..1719d821 100644 --- a/configs/vision/pathology/online/classification/bach.yaml +++ b/configs/vision/pathology/online/classification/bach.yaml @@ -45,11 +45,9 @@ model: out_features: &NUM_CLASSES 4 criterion: torch.nn.CrossEntropyLoss optimizer: - class_path: torch.optim.SGD + class_path: torch.optim.AdamW init_args: - lr: &LR_VALUE ${oc.env:LR_VALUE, 0.000625} - momentum: 0.9 - weight_decay: 0.0 + lr: ${oc.env:LR_VALUE, 0.0003} lr_scheduler: class_path: torch.optim.lr_scheduler.CosineAnnealingLR init_args: diff --git a/configs/vision/pathology/online/classification/crc.yaml b/configs/vision/pathology/online/classification/crc.yaml index cdf743d5..5abe659e 100644 --- a/configs/vision/pathology/online/classification/crc.yaml +++ b/configs/vision/pathology/online/classification/crc.yaml @@ -45,11 +45,9 @@ model: out_features: &NUM_CLASSES 9 criterion: torch.nn.CrossEntropyLoss optimizer: - class_path: torch.optim.SGD + class_path: torch.optim.AdamW init_args: - lr: &LR_VALUE ${oc.env:LR_VALUE, 0.01} - momentum: 0.9 - weight_decay: 0.0 + lr: ${oc.env:LR_VALUE, 0.0003} lr_scheduler: class_path: torch.optim.lr_scheduler.CosineAnnealingLR init_args: @@ -86,7 +84,7 @@ data: split: val dataloaders: train: - batch_size: &BATCH_SIZE ${oc.env:BATCH_SIZE, 4096} + batch_size: &BATCH_SIZE ${oc.env:BATCH_SIZE, 256} num_workers: &N_DATA_WORKERS ${oc.env:N_DATA_WORKERS, 4} shuffle: true val: diff --git a/configs/vision/pathology/online/classification/mhist.yaml b/configs/vision/pathology/online/classification/mhist.yaml index 4c87db98..25dcbc50 100644 --- a/configs/vision/pathology/online/classification/mhist.yaml +++ b/configs/vision/pathology/online/classification/mhist.yaml @@ -17,12 +17,12 @@ trainer: filename: best save_last: true save_top_k: 1 - monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/BinaryAccuracy} + monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/BinaryBalancedAccuracy} mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: min_delta: 0 - patience: 51 + patience: 70 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE logger: @@ -45,11 +45,9 @@ model: out_features: 1 criterion: torch.nn.BCEWithLogitsLoss optimizer: - class_path: torch.optim.SGD + class_path: torch.optim.AdamW init_args: - lr: &LR_VALUE ${oc.env:LR_VALUE, 0.000625} - momentum: 0.9 - weight_decay: 0.0 + lr: ${oc.env:LR_VALUE, 0.0003} lr_scheduler: class_path: torch.optim.lr_scheduler.CosineAnnealingLR init_args: diff --git a/configs/vision/pathology/online/classification/patch_camelyon.yaml b/configs/vision/pathology/online/classification/patch_camelyon.yaml index 95c3bb43..13817a71 100644 --- a/configs/vision/pathology/online/classification/patch_camelyon.yaml +++ b/configs/vision/pathology/online/classification/patch_camelyon.yaml @@ -17,7 +17,7 @@ trainer: filename: best save_last: true save_top_k: 1 - monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/BinaryAccuracy} + monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/BinaryBalancedAccuracy} mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: @@ -45,11 +45,9 @@ model: out_features: 1 criterion: torch.nn.BCEWithLogitsLoss optimizer: - class_path: torch.optim.SGD + class_path: torch.optim.AdamW init_args: - lr: &LR_VALUE ${oc.env:LR_VALUE, 0.01} - momentum: 0.9 - weight_decay: 0.0 + lr: ${oc.env:LR_VALUE, 0.0003} lr_scheduler: class_path: torch.optim.lr_scheduler.CosineAnnealingLR init_args: @@ -91,7 +89,7 @@ data: split: test dataloaders: train: - batch_size: &BATCH_SIZE ${oc.env:BATCH_SIZE, 4096} + batch_size: &BATCH_SIZE ${oc.env:BATCH_SIZE, 256} num_workers: &N_DATA_WORKERS ${oc.env:N_DATA_WORKERS, 4} shuffle: true val: diff --git a/configs/vision/tests/offline/panda.yaml b/configs/vision/tests/offline/panda.yaml index 28844dd1..4051b4ed 100644 --- a/configs/vision/tests/offline/panda.yaml +++ b/configs/vision/tests/offline/panda.yaml @@ -34,11 +34,9 @@ model: output_size: &NUM_CLASSES 6 criterion: torch.nn.CrossEntropyLoss optimizer: - class_path: torch.optim.SGD + class_path: torch.optim.AdamW init_args: - lr: &LR_VALUE ${oc.env:LR_VALUE, 0.00004} - momentum: 0.9 - weight_decay: 0.0 + lr: ${oc.env:LR_VALUE, 0.0003} lr_scheduler: class_path: torch.optim.lr_scheduler.CosineAnnealingLR init_args: diff --git a/configs/vision/tests/offline/patch_camelyon.yaml b/configs/vision/tests/offline/patch_camelyon.yaml index 64c503bf..e09a44c6 100644 --- a/configs/vision/tests/offline/patch_camelyon.yaml +++ b/configs/vision/tests/offline/patch_camelyon.yaml @@ -47,11 +47,9 @@ model: out_features: 1 criterion: torch.nn.BCEWithLogitsLoss optimizer: - class_path: torch.optim.SGD + class_path: torch.optim.AdamW init_args: - lr: ${oc.env:LR_VALUE, 0.1} - momentum: 0.9 - weight_decay: 0.0 + lr: ${oc.env:LR_VALUE, 0.0003} lr_scheduler: class_path: torch.optim.lr_scheduler.CosineAnnealingLR init_args: diff --git a/configs/vision/tests/online/patch_camelyon.yaml b/configs/vision/tests/online/patch_camelyon.yaml index 14420dc5..4b870941 100644 --- a/configs/vision/tests/online/patch_camelyon.yaml +++ b/configs/vision/tests/online/patch_camelyon.yaml @@ -18,11 +18,9 @@ model: out_features: 1 criterion: torch.nn.BCEWithLogitsLoss optimizer: - class_path: torch.optim.SGD + class_path: torch.optim.AdamW init_args: - lr: ${oc.env:LR_VALUE, 0.1} - momentum: 0.9 - weight_decay: 0.0 + lr: ${oc.env:LR_VALUE, 0.0003} lr_scheduler: class_path: torch.optim.lr_scheduler.CosineAnnealingLR init_args: diff --git a/docs/images/leaderboard.svg b/docs/images/leaderboard.svg index 46da272c..2031c979 100644 --- a/docs/images/leaderboard.svg +++ b/docs/images/leaderboard.svg @@ -6,7 +6,7 @@ - 2024-10-11T17:12:02.777458 + 2024-10-18T15:48:36.884888 image/svg+xml @@ -43,529 +43,529 @@ L 297.171875 44.961439 L 297.171875 70.161439 L 241.371875 70.161439 L 241.371875 44.961439 -" clip-path="url(#pbddcf3c616)" style="fill: #0000ff"/> +" clip-path="url(#p854563dae4)" style="fill: #0a0aff"/> +" clip-path="url(#p854563dae4)" style="fill: #0909ff"/> +" clip-path="url(#p854563dae4)" style="fill: #4b4bff"/> +" clip-path="url(#p854563dae4)" style="fill: #7f7fff"/> +" clip-path="url(#p854563dae4)" style="fill: #3d3dff"/> +" clip-path="url(#p854563dae4)" style="fill: #9090ff"/> +" clip-path="url(#p854563dae4)" style="fill: #0000ff"/> +" clip-path="url(#p854563dae4)" style="fill: #0000ff"/> +" clip-path="url(#p854563dae4)" style="fill: #bfbfff"/> +" clip-path="url(#p854563dae4)" style="fill: #0000ff"/> +" clip-path="url(#p854563dae4)" style="fill: #1212ff"/> +" clip-path="url(#p854563dae4)" style="fill: #2222ff"/> +" clip-path="url(#p854563dae4)" style="fill: #3535ff"/> +" clip-path="url(#p854563dae4)" style="fill: #6969ff"/> +" clip-path="url(#p854563dae4)" style="fill: #2424ff"/> +" clip-path="url(#p854563dae4)" style="fill: #8282ff"/> +" clip-path="url(#p854563dae4)" style="fill: #5656ff"/> +" clip-path="url(#p854563dae4)" style="fill: #ffffff"/> +" clip-path="url(#p854563dae4)" style="fill: #4040ff"/> +" clip-path="url(#p854563dae4)" style="fill: #0000ff"/> +" clip-path="url(#p854563dae4)" style="fill: #0707ff"/> +" clip-path="url(#p854563dae4)" style="fill: #acacff"/> +" clip-path="url(#p854563dae4)" style="fill: #2424ff"/> +" clip-path="url(#p854563dae4)" style="fill: #8787ff"/> +" clip-path="url(#p854563dae4)" style="fill: #7878ff"/> +" clip-path="url(#p854563dae4)" style="fill: #6868ff"/> +" clip-path="url(#p854563dae4)" style="fill: #0000ff"/> +" clip-path="url(#p854563dae4)" style="fill: #3a3aff"/> +" clip-path="url(#p854563dae4)" style="fill: #0000ff"/> +" clip-path="url(#p854563dae4)" style="fill: #2c2cff"/> +" clip-path="url(#p854563dae4)" style="fill: #7272ff"/> +" clip-path="url(#p854563dae4)" style="fill: #d6d6ff"/> +" clip-path="url(#p854563dae4)" style="fill: #b9b9ff"/> +" clip-path="url(#p854563dae4)" style="fill: #3838ff"/> +" clip-path="url(#p854563dae4)" style="fill: #3636ff"/> +" clip-path="url(#p854563dae4)" style="fill: #1717ff"/> +" clip-path="url(#p854563dae4)" style="fill: #4c4cff"/> +" clip-path="url(#p854563dae4)" style="fill: #0000ff"/> +" clip-path="url(#p854563dae4)" style="fill: #7474ff"/> +" clip-path="url(#p854563dae4)" style="fill: #ccccff"/> +" clip-path="url(#p854563dae4)" style="fill: #3f3fff"/> +" clip-path="url(#p854563dae4)" style="fill: #3838ff"/> +" clip-path="url(#p854563dae4)" style="fill: #6c6cff"/> +" clip-path="url(#p854563dae4)" style="fill: #ffffff"/> +" clip-path="url(#p854563dae4)" style="fill: #4c4cff"/> +" clip-path="url(#p854563dae4)" style="fill: #3737ff"/> +" clip-path="url(#p854563dae4)" style="fill: #2929ff"/> +" clip-path="url(#p854563dae4)" style="fill: #6767ff"/> +" clip-path="url(#p854563dae4)" style="fill: #0000ff"/> +" clip-path="url(#p854563dae4)" style="fill: #d0d0ff"/> +" clip-path="url(#p854563dae4)" style="fill: #3939ff"/> +" clip-path="url(#p854563dae4)" style="fill: #a6a6ff"/> +" clip-path="url(#p854563dae4)" style="fill: #5454ff"/> +" clip-path="url(#p854563dae4)" style="fill: #4d4dff"/> +" clip-path="url(#p854563dae4)" style="fill: #4242ff"/> +" clip-path="url(#p854563dae4)" style="fill: #7d7dff"/> +" clip-path="url(#p854563dae4)" style="fill: #3434ff"/> +" clip-path="url(#p854563dae4)" style="fill: #0000ff"/> +" clip-path="url(#p854563dae4)" style="fill: #2020ff"/> +" clip-path="url(#p854563dae4)" style="fill: #aaaaff"/> +" clip-path="url(#p854563dae4)" style="fill: #4444ff"/> +" clip-path="url(#p854563dae4)" style="fill: #efefff"/> +" clip-path="url(#p854563dae4)" style="fill: #babaff"/> +" clip-path="url(#p854563dae4)" style="fill: #ddddff"/> +" clip-path="url(#p854563dae4)" style="fill: #ffffff"/> +" clip-path="url(#p854563dae4)" style="fill: #d0d0ff"/> +" clip-path="url(#p854563dae4)" style="fill: #a2a2ff"/> +" clip-path="url(#p854563dae4)" style="fill: #7070ff"/> +" clip-path="url(#p854563dae4)" style="fill: #8d8dff"/> +" clip-path="url(#p854563dae4)" style="fill: #8585ff"/> +" clip-path="url(#p854563dae4)" style="fill: #3f3fff"/> +" clip-path="url(#p854563dae4)" style="fill: #f8f8ff"/> +" clip-path="url(#p854563dae4)" style="fill: #6f6fff"/> +" clip-path="url(#p854563dae4)" style="fill: #bdbdff"/> +" clip-path="url(#p854563dae4)" style="fill: #3636ff"/> +" clip-path="url(#p854563dae4)" style="fill: #b6b6ff"/> +" clip-path="url(#p854563dae4)" style="fill: #ababff"/> +" clip-path="url(#p854563dae4)" style="fill: #ffffff"/> +" clip-path="url(#p854563dae4)" style="fill: #ffffff"/> +" clip-path="url(#p854563dae4)" style="fill: #e2e2ff"/> +" clip-path="url(#p854563dae4)" style="fill: #9292ff"/> +" clip-path="url(#p854563dae4)" style="fill: #aaaaff"/> +" clip-path="url(#p854563dae4)" style="fill: #ffffff"/> +" clip-path="url(#p854563dae4)" style="fill: #c1c1ff"/> +" clip-path="url(#p854563dae4)" style="fill: #ffffff"/> +" clip-path="url(#p854563dae4)" style="fill: #d9d9ff"/> +" clip-path="url(#p854563dae4)" style="fill: #cbcbff"/> +" clip-path="url(#p854563dae4)" style="fill: #ffffff"/> @@ -1106,12 +1106,12 @@ z - - + @@ -1298,7 +1298,7 @@ z - + @@ -1516,7 +1516,7 @@ z - + @@ -1654,7 +1654,7 @@ z - + @@ -1717,53 +1717,12 @@ z - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + @@ -1911,7 +1911,7 @@ z - + @@ -1953,7 +1953,7 @@ z - + @@ -2064,7 +2064,7 @@ z - + @@ -2106,7 +2106,7 @@ z - + @@ -2139,29 +2139,17 @@ z - + - - - - - + + - + + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -2297,33 +2295,33 @@ z - + - - + + - + - + - + - + @@ -2377,43 +2375,43 @@ z - + - + - + - + - + - + - + - - + + @@ -2467,33 +2465,33 @@ z - + - - + + - + - - + + - + - + @@ -2537,43 +2535,43 @@ z - - + + - - - + + + - + - - + + - + - + - - + + - - - + + + @@ -2587,53 +2585,53 @@ z - + - + - + - - + + - - + + - - + + - - + + - - - + + + - + - + @@ -2647,13 +2645,13 @@ z - - + + - - - + + + @@ -2667,73 +2665,73 @@ z - + - + - + - - + + - - + + - - + + - + - + - + - + - + - + - + - + @@ -2777,43 +2775,43 @@ z - + - - + + - + - + - + - + - + - + @@ -2857,43 +2855,43 @@ z - + - - + + - + - - + + - - + + - - + + - + - + @@ -2937,43 +2935,43 @@ z - + - + - - + + - - + + - + - - + + - + - + @@ -3017,43 +3015,43 @@ z - - + + - - + + - + - - + + - + - - + + - + - + @@ -3099,7 +3097,7 @@ z - + diff --git a/docs/images/starplot.png b/docs/images/starplot.png index 97e2cebd..5f600e9c 100644 Binary files a/docs/images/starplot.png and b/docs/images/starplot.png differ diff --git a/docs/leaderboards.md b/docs/leaderboards.md index e2aaba2a..66e53f4e 100644 --- a/docs/leaderboards.md +++ b/docs/leaderboards.md @@ -27,7 +27,7 @@ For details on the FM-backbones and instructions to replicate the results, check ## Evaluation protocol -*eva* uses a task- & model-independent and fixed default set up which closely follows the standard evaluation protocol proposed by [1] (with adjustments for slide-level tasks to ensure convergence and computational efficiency). +*eva* uses a fixed protocol customized to each category of tasks. The setup has proven to be performant and robust independent of task and model size & architecture and generally prioritizes fairness and comparability over state-of-the-art performance. We selected this approach to prioritize reliable, robust and fair FM-evaluation while being in line with common literature. @@ -38,26 +38,21 @@ We selected this approach to prioritize reliable, robust and fair FM-evaluation | **Dropout** | 0.0 | 0.0 | 0.0 | | **Hidden activation function** | n/a | ReLU | n/a | | **Output activation function** | none | none | none | -| **Number of steps** | 12,500 | 12,500 (2) | 2,000 | -| **Base batch size** | 4,096 (1) | 32 | 64 | -| **Base learning rate** | 0.01 (1) | 0.001 | 0.0001 | -| **Early stopping** | 5% * [Max epochs] | 10% * [Max epochs] (3) | 10% * [Max epochs] (3) | +| **Number of steps** | 12,500 | 12,500 (1) | 2,000 | +| **Base batch size** | 256 | 32 | 64 | +| **Base learning rate** | 0.0003 | 0.001 | 0.0001 | +| **Early stopping** | 5% * [Max epochs] | 10% * [Max epochs] (2) | 10% * [Max epochs] (2) | | **Optimizer** | SGD | AdamW | AdamW | | **Momentum** | 0.9 | n/a | n/a | | **Weight Decay** | 0.0 | n/a | n/a | | **betas** | n/a | [0.9, 0.999] | [0.9, 0.999] | | **LR Schedule** | Cosine without warmup | Cosine without warmup | PolynomialLR | | **Loss** | Cross entropy | Cross entropy | Dice | -| **number of patches per slide**| 1 | dataset specific (4) | dataset specific (4) | +| **number of patches per slide**| 1 | dataset specific (3) | dataset specific (3) | -(1) For smaller datasets (e.g. BACH with 400 samples) we reduce the batch size to 256 and scale the learning rate accordingly. +(1) Upper cap at a maximum of 100 epochs. -(2) Upper cap at a maximum of 100 epochs. +(2) Lower cap at a minimum of 8 epochs. -(3) Lower cap at a minimum of 8 epochs. - -(4) Number of patches per slide depends on task and slide size. E.g. for PANDA and Camelyon16 we use a max of 1,000 and 10,000 random patches per slide respectively. - - -- [1]: [Virchow: A Million-Slide Digital Pathology Foundation Model, 2024](https://arxiv.org/pdf/2309.07778.pdf) +(3) Number of patches per slide depends on task and slide size. E.g. for `PANDASmall` and `Camelyon16Small` we use a max of 200 and 1000 random patches per slide respectively. diff --git a/tools/data/leaderboard.csv b/tools/data/leaderboard.csv index c78416df..f30a3a33 100644 --- a/tools/data/leaderboard.csv +++ b/tools/data/leaderboard.csv @@ -1,12 +1,12 @@ bach,crc,mhist,patch_camelyon,camelyon16_small,panda_small,consep,monusac,model -0.77,0.936,0.751,0.905,0.767,0.625,0.63,0.537,dino_vits16_lunit -0.715,0.942,0.766,0.925,0.797,0.64,0.68,0.54,owkin_phikon -0.797,0.95,0.835,0.939,0.834,0.656,0.662,0.554,dino_vitl16_uni -0.767,0.951,0.836,0.942,0.82,0.645,0.69,0.588,bioptimus_h_optimus_0 -0.758,0.953,0.814,0.948,0.814,0.664,0.661,0.558,prov_gigapath -0.81,0.934,0.823,0.949,0.832,0.633,0.69,0.586,histai_hibou_l -0.8,0.949,0.831,0.902,0.789,0.618,0.611,0.549,dino_vits16_kaiko -0.825,0.948,0.826,0.887,0.814,0.654,0.688,0.599,dino_vits8_kaiko -0.846,0.959,0.839,0.906,0.816,0.621,0.636,0.551,dino_vitb16_kaiko -0.867,0.952,0.814,0.921,0.818,0.638,0.703,0.641,dino_vitb8_kaiko -0.862,0.935,0.822,0.907,0.812,0.65,0.679,0.59,dino_vitl14_kaiko +0.783,0.94,0.773,0.901,0.767,0.625,0.63,0.537,dino_vits16_lunit +0.722,0.936,0.799,0.922,0.797,0.64,0.68,0.54,owkin_phikon +0.797,0.947,0.844,0.936,0.834,0.656,0.662,0.554,dino_vitl16_uni +0.758,0.958,0.839,0.942,0.82,0.645,0.69,0.588,bioptimus_h_optimus_0 +0.761,0.952,0.829,0.945,0.814,0.664,0.661,0.558,prov_gigapath +0.816,0.931,0.826,0.951,0.832,0.633,0.69,0.586,histai_hibou_l +0.802,0.938,0.829,0.904,0.789,0.618,0.611,0.549,dino_vits16_kaiko +0.829,0.952,0.814,0.885,0.814,0.654,0.688,0.599,dino_vits8_kaiko +0.835,0.958,0.835,0.907,0.816,0.621,0.636,0.551,dino_vitb16_kaiko +0.858,0.957,0.823,0.918,0.818,0.638,0.703,0.641,dino_vitb8_kaiko +0.864,0.936,0.828,0.908,0.812,0.65,0.679,0.59,dino_vitl14_kaiko