Skip to content

Commit

Permalink
updating dryrun
Browse files Browse the repository at this point in the history
  • Loading branch information
august-knox committed Nov 25, 2024
2 parents f3cd851 + f359d5c commit 29fe3a9
Show file tree
Hide file tree
Showing 16 changed files with 193 additions and 102 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/requirements/docs.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# docs
sphinx==8.1.3
sphinx-rtd-theme==3.0.1
sphinx-rtd-theme==3.0.2
codespell==2.3.0
pandas==2.2.3
pyyaml==6.0.2
Expand Down
33 changes: 22 additions & 11 deletions .github/workflows/run.yml
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ jobs:
- name: Dry run dynamic amg2023-openmp on Dane with allocation modifier
run: |
./bin/benchpark experiment init --dest=amg2023-openmp amg2023 openmp=oui
./bin/benchpark experiment init --dest=amg2023-openmp amg2023+openmp
./bin/benchpark setup ./amg2023-openmp LLNL-Dane-DELL-sapphirerapids-OmniPath workspace/
. workspace/setup.sh
ramble \
Expand Down Expand Up @@ -143,7 +143,7 @@ jobs:
- name: Dry run dynamic kripke-openmp on nosite-x86_64 with allocation modifier
run: |
./bin/benchpark experiment init --dest=kripke-openmp kripke openmp=oui
./bin/benchpark experiment init --dest=kripke-openmp kripke+openmp
./bin/benchpark setup ./kripke-openmp nosite-x86_64 workspace/
. workspace/setup.sh
ramble \
Expand All @@ -154,7 +154,7 @@ jobs:
- name: Dry run dynamic kripke-rocm on LLNL-Tioga-HPECray-zen3-MI250X-Slingshot with allocation modifier
run: |
./bin/benchpark experiment init --dest=kripke-rocm kripke rocm=oui
./bin/benchpark experiment init --dest=kripke-rocm kripke+rocm
./bin/benchpark setup ./kripke-openmp LLNL-Tioga-HPECray-zen3-MI250X-Slingshot workspace/
. workspace/setup.sh
ramble \
Expand Down Expand Up @@ -186,7 +186,7 @@ jobs:
- name: Dry run dynamic saxpy/rocm with static Tioga
run: |
./bin/benchpark experiment init --dest=saxpy-rocm saxpy rocm=oui
./bin/benchpark experiment init --dest=saxpy-rocm saxpy+rocm
./bin/benchpark setup ./saxpy-rocm LLNL-Tioga-HPECray-zen3-MI250X-Slingshot workspace/
. workspace/setup.sh
ramble \
Expand All @@ -198,7 +198,7 @@ jobs:
- name: Dry run dynamic saxpy/rocm with dynamic Tioga
run: |
./bin/benchpark system init --dest=tioga-system2 tioga rocm=551 compiler=cce ~gtl
./bin/benchpark experiment init --dest=saxpy-rocm2 saxpy rocm=oui
./bin/benchpark experiment init --dest=saxpy-rocm2 saxpy+rocm
./bin/benchpark setup ./saxpy-rocm2 ./tioga-system2 workspace/
. workspace/setup.sh
ramble \
Expand All @@ -210,7 +210,7 @@ jobs:
- name: Dry run dynamic saxpy/cuda with dynamic Sierra
run: |
./bin/benchpark system init --dest=sierra-system sierra cuda=10-1-243 compiler=xl
./bin/benchpark experiment init --dest=saxpy-cuda saxpy cuda=oui
./bin/benchpark experiment init --dest=saxpy-cuda saxpy+cuda
./bin/benchpark setup ./saxpy-cuda ./sierra-system workspace/
. workspace/setup.sh
ramble \
Expand Down Expand Up @@ -262,7 +262,7 @@ jobs:
- name: Dry run dynamic quicksilver-openmp on nosite-x86_64 with allocation modifier
run: |
./bin/benchpark experiment init --dest=quicksilver-openmp quicksilver openmp=oui experiment=weak
./bin/benchpark experiment init --dest=quicksilver-openmp quicksilver+openmp +weak~single_node
./bin/benchpark setup ./quicksilver-openmp nosite-x86_64 workspace/
. workspace/setup.sh
ramble \
Expand Down Expand Up @@ -334,7 +334,7 @@ jobs:
- name: Dry run dynamic saxpy/openmp with dynamic CTS ruby
run: |
./bin/benchpark system init --dest=ruby-system cts cluster=ruby
./bin/benchpark experiment init --dest=saxpy-openmp saxpy openmp=oui
./bin/benchpark experiment init --dest=saxpy-openmp saxpy+openmp
./bin/benchpark setup ./saxpy-openmp ./ruby-system workspace/
. workspace/setup.sh
ramble \
Expand All @@ -346,7 +346,7 @@ jobs:
- name: Dry run dynamic saxpy/openmp with dynamic CTS dane
run: |
./bin/benchpark system init --dest=dane-system cts cluster=dane
./bin/benchpark experiment init --dest=saxpy-openmp2 saxpy openmp=oui
./bin/benchpark experiment init --dest=saxpy-openmp2 saxpy+openmp
./bin/benchpark setup ./saxpy-openmp2 ./dane-system workspace/
. workspace/setup.sh
ramble \
Expand All @@ -358,7 +358,7 @@ jobs:
- name: Dry run dynamic saxpy/openmp with dynamic CTS magma
run: |
./bin/benchpark system init --dest=magma-system cts cluster=magma
./bin/benchpark experiment init --dest=saxpy-openmp3 saxpy openmp=oui
./bin/benchpark experiment init --dest=saxpy-openmp3 saxpy+openmp
./bin/benchpark setup ./saxpy-openmp3 ./magma-system workspace/
. workspace/setup.sh
ramble \
Expand All @@ -370,7 +370,7 @@ jobs:
- name: Dry run dynamic saxpy/openmp with dynamic generic x86
run: |
./bin/benchpark system init --dest=x86-system genericx86
./bin/benchpark experiment init --dest=saxpy-omp-generic saxpy openmp=oui
./bin/benchpark experiment init --dest=saxpy-omp-generic saxpy+openmp
./bin/benchpark setup ./saxpy-omp-generic ./x86-system workspace/
. workspace/setup.sh
ramble \
Expand Down Expand Up @@ -400,3 +400,14 @@ jobs:
--disable-progress-bar \
--disable-logger \
workspace setup --dry-run
- name: Dry run dynamic ior/mpi with dynamic CTS ruby
run: |
./bin/benchpark experiment init --dest=ior-mpi ior
./bin/benchpark setup ./ior-mpi ./ruby-system workspace/
. workspace/setup.sh
ramble \
--workspace-dir workspace/ior-mpi/Cts-6d48f81/workspace \
--disable-progress-bar \
--disable-logger \
workspace setup --dry-run
7 changes: 7 additions & 0 deletions docs/llnl-tutorial.rst
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,13 @@ Running on an LLNL System
This tutorial will guide you through the process of using Benchpark on LLNL
systems.

To run Benchpark, you will need to install its requirements: go to the
Benchpark root directory::

python -m venv my-env
. my-env/bin/activate
pip install -r requirements.txt

------------------------
CTS (Ruby, Dane, Magma)
------------------------
Expand Down
26 changes: 13 additions & 13 deletions experiments/amg2023/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,10 +55,10 @@ class Amg2023(
def compute_applications_section(self):
# TODO: Replace with conflicts clause
scaling_modes = {
"strong": self.spec.satisfies("strong=oui"),
"weak": self.spec.satisfies("weak=oui"),
"throughput": self.spec.satisfies("throughput=oui"),
"single_node": self.spec.satisfies("single_node=oui"),
"strong": self.spec.satisfies("+strong"),
"weak": self.spec.satisfies("+weak"),
"throughput": self.spec.satisfies("+throughput"),
"single_node": self.spec.satisfies("+single_node"),
}

scaling_mode_enabled = [key for key, value in scaling_modes.items() if value]
Expand All @@ -73,15 +73,15 @@ def compute_applications_section(self):
# Per-process size (in zones) in each dimension
problem_sizes = {"nx": 80, "ny": 80, "nz": 80}

if self.spec.satisfies("single_node=oui"):
if self.spec.satisfies("+single_node"):
n_resources = 1
# TODO: Check if n_ranks / n_resources_per_node <= 1
for pk, pv in num_procs.items():
self.add_experiment_variable(pk, pv, True)
n_resources *= pv
for nk, nv in problem_sizes.items():
self.add_experiment_variable(nk, nv, True)
elif self.spec.satisfies("throughput=oui"):
elif self.spec.satisfies("+throughput"):
n_resources = 1
for pk, pv in num_procs.items():
self.add_experiment_variable(pk, pv, True)
Expand All @@ -93,7 +93,7 @@ def compute_applications_section(self):
)
for nk, nv in scaled_variables.items():
self.add_experiment_variable(nk, nv, True)
elif self.spec.satisfies("strong=oui"):
elif self.spec.satisfies("+strong"):
scaled_variables = self.generate_strong_scaling_params(
{tuple(num_procs.keys()): list(num_procs.values())},
int(self.spec.variants["scaling-factor"][0]),
Expand All @@ -109,7 +109,7 @@ def compute_applications_section(self):
]
for nk, nv in problem_sizes.items():
self.add_experiment_variable(nk, nv, True)
elif self.spec.satisfies("weak=oui"):
elif self.spec.satisfies("+weak"):
scaled_variables = self.generate_weak_scaling_params(
{tuple(num_procs.keys()): list(num_procs.values())},
{tuple(problem_sizes.keys()): list(problem_sizes.values())},
Expand All @@ -125,10 +125,10 @@ def compute_applications_section(self):
for k, v in scaled_variables.items():
self.add_experiment_variable(k, v, True)

if self.spec.satisfies("openmp=oui"):
if self.spec.satisfies("+openmp"):
self.add_experiment_variable("n_ranks", n_resources, True)
self.add_experiment_variable("n_threads_per_proc", 1, True)
elif self.spec.satisfies("cuda=oui") or self.spec.satisfies("rocm=oui"):
elif self.spec.satisfies("+cuda") or self.spec.satisfies("+rocm"):
self.add_experiment_variable("n_gpus", n_resources, True)

def compute_spack_section(self):
Expand All @@ -141,16 +141,16 @@ def compute_spack_section(self):
system_specs["compiler"] = "default-compiler"
system_specs["mpi"] = "default-mpi"
system_specs["lapack"] = "lapack"
if self.spec.satisfies("cuda=oui"):
if self.spec.satisfies("+cuda"):
system_specs["cuda_version"] = "{default_cuda_version}"
system_specs["cuda_arch"] = "{cuda_arch}"
system_specs["blas"] = "cublas-cuda"
if self.spec.satisfies("rocm=oui"):
if self.spec.satisfies("+rocm"):
system_specs["rocm_arch"] = "{rocm_arch}"
system_specs["blas"] = "blas-rocm"

# set package spack specs
if self.spec.satisfies("cuda=oui") or self.spec.satisfies("rocm=oui"):
if self.spec.satisfies("+cuda") or self.spec.satisfies("+rocm"):
# empty package_specs value implies external package
self.add_spack_spec(system_specs["blas"])
# empty package_specs value implies external package
Expand Down
92 changes: 92 additions & 0 deletions experiments/ior/experiment.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
# Copyright 2023 Lawrence Livermore National Security, LLC and other
# Benchpark Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: Apache-2.0

from benchpark.error import BenchparkError
from benchpark.directives import variant
from benchpark.experiment import Experiment
from benchpark.scaling import StrongScaling
from benchpark.scaling import WeakScaling


class Ior(
Experiment,
StrongScaling,
WeakScaling,
):
variant(
"workload",
default="ior",
description="base IOR or other problem",
)

variant(
"version",
default="3.3.0",
description="app version",
)

def compute_applications_section(self):
# TODO: Replace with conflicts clause
scaling_modes = {
"strong": self.spec.satisfies("+strong"),
"weak": self.spec.satisfies("+weak"),
"single_node": self.spec.satisfies("+single_node"),
}

scaling_mode_enabled = [key for key, value in scaling_modes.items() if value]
if len(scaling_mode_enabled) != 1:
raise BenchparkError(
f"Only one type of scaling per experiment is allowed for application package {self.name}"
)

num_nodes = {"n_nodes": 1}
t = "{b}/256"
self.add_experiment_variable("t", t, True)

if self.spec.satisfies("+single_node"):
for pk, pv in num_nodes.items():
self.add_experiment_variable(pk, pv, True)
self.add_experiment_variable("b", "268435456", True)
elif self.spec.satisfies("+strong"):
scaled_variables = self.generate_strong_scaling_params(
{tuple(num_nodes.keys()): list(num_nodes.values())},
int(self.spec.variants["scaling-factor"][0]),
int(self.spec.variants["scaling-iterations"][0]),
)
for k, v in scaled_variables.items():
self.add_experiment_variable(k, v, True)
# 256 mb
self.add_experiment_variable("b", "268435456 / {n_nodes}", True)
elif self.spec.satisfies("+weak"):
scaled_variables = self.generate_weak_scaling_params(
{tuple(num_nodes.keys()): list(num_nodes.values())},
{tuple(num_nodes.keys()): list(num_nodes.values())},
int(self.spec.variants["scaling-factor"][0]),
int(self.spec.variants["scaling-iterations"][0]),
)
for k, v in scaled_variables.items():
self.add_experiment_variable(k, v, True)

self.add_experiment_variable("b", "268435456", True)

self.add_experiment_variable("t", t, True)
self.add_experiment_variable(
"n_ranks", "{sys_cores_per_node} * {n_nodes}", True
)

def compute_spack_section(self):
# get package version
app_version = self.spec.variants["version"][0]

# get system config options
# TODO: Get compiler/mpi/package handles directly from system.py
system_specs = {}
system_specs["compiler"] = "default-compiler"
system_specs["mpi"] = "default-mpi"

# set package spack specs
self.add_spack_spec(system_specs["mpi"])

self.add_spack_spec(self.name, [f"ior@{app_version}", system_specs["compiler"]])
30 changes: 15 additions & 15 deletions experiments/kripke/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,10 @@ class Kripke(
def compute_applications_section(self):
# TODO: Replace with conflicts clause
scaling_modes = {
"strong": self.spec.satisfies("strong=oui"),
"weak": self.spec.satisfies("weak=oui"),
"throughput": self.spec.satisfies("throughput=oui"),
"single_node": self.spec.satisfies("single_node=oui"),
"strong": self.spec.satisfies("+strong"),
"weak": self.spec.satisfies("+weak"),
"throughput": self.spec.satisfies("+throughput"),
"single_node": self.spec.satisfies("+single_node"),
}

scaling_mode_enabled = [key for key, value in scaling_modes.items() if value]
Expand All @@ -67,15 +67,15 @@ def compute_applications_section(self):
for k, v in input_variables.items():
self.add_experiment_variable(k, v, True)

if self.spec.satisfies("single_node=oui"):
if self.spec.satisfies("+single_node"):
n_resources = 1
# TODO: Check if n_ranks / n_resources_per_node <= 1
for pk, pv in num_procs.items():
self.add_experiment_variable(pk, pv, True)
n_resources *= pv
for nk, nv in problem_sizes.items():
self.add_experiment_variable(nk, nv, True)
elif self.spec.satisfies("throughput=oui"):
elif self.spec.satisfies("+throughput"):
n_resources = 1
for pk, pv in num_procs.items():
self.add_experiment_variable(pk, pv, True)
Expand All @@ -87,7 +87,7 @@ def compute_applications_section(self):
)
for nk, nv in scaled_variables.items():
self.add_experiment_variable(nk, nv, True)
elif self.spec.satisfies("strong=oui"):
elif self.spec.satisfies("+strong"):
scaled_variables = self.generate_strong_scaling_params(
{tuple(num_procs.keys()): list(num_procs.values())},
int(self.spec.variants["scaling-factor"][0]),
Expand All @@ -103,7 +103,7 @@ def compute_applications_section(self):
]
for nk, nv in problem_sizes.items():
self.add_experiment_variable(nk, nv, True)
elif self.spec.satisfies("weak=oui"):
elif self.spec.satisfies("+weak"):
scaled_variables = self.generate_weak_scaling_params(
{tuple(num_procs.keys()): list(num_procs.values())},
{tuple(problem_sizes.keys()): list(problem_sizes.values())},
Expand All @@ -119,17 +119,17 @@ def compute_applications_section(self):
for k, v in scaled_variables.items():
self.add_experiment_variable(k, v, True)

if self.spec.satisfies("openmp=oui"):
if self.spec.satisfies("+openmp"):
self.add_experiment_variable("n_ranks", n_resources, True)
self.add_experiment_variable("n_threads_per_proc", 1, True)
elif self.spec.satisfies("cuda=oui") or self.spec.satisfies("rocm=oui"):
elif self.spec.satisfies("+cuda") or self.spec.satisfies("+rocm"):
self.add_experiment_variable("n_gpus", n_resources, True)

if self.spec.satisfies("openmp=oui"):
if self.spec.satisfies("+openmp"):
self.add_experiment_variable("arch", "OpenMP")
elif self.spec.satisfies("cuda=oui"):
elif self.spec.satisfies("+cuda"):
self.add_experiment_variable("arch", "CUDA")
elif self.spec.satisfies("rocm=oui"):
elif self.spec.satisfies("+rocm"):
self.add_experiment_variable("arch", "HIP")

def compute_spack_section(self):
Expand All @@ -141,10 +141,10 @@ def compute_spack_section(self):
system_specs = {}
system_specs["compiler"] = "default-compiler"
system_specs["mpi"] = "default-mpi"
if self.spec.satisfies("cuda=oui"):
if self.spec.satisfies("+cuda"):
system_specs["cuda_version"] = "{default_cuda_version}"
system_specs["cuda_arch"] = "{cuda_arch}"
if self.spec.satisfies("rocm=oui"):
if self.spec.satisfies("+rocm"):
system_specs["rocm_arch"] = "{rocm_arch}"

# set package spack specs
Expand Down
Loading

0 comments on commit 29fe3a9

Please sign in to comment.