Skip to content

Commit

Permalink
address review feedback
Browse files Browse the repository at this point in the history
  • Loading branch information
tianleiwu committed Oct 4, 2023
1 parent 8c37fc8 commit 9d829ba
Show file tree
Hide file tree
Showing 4 changed files with 129 additions and 247 deletions.
20 changes: 16 additions & 4 deletions onnxruntime/python/tools/transformers/benchmark_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -542,7 +542,7 @@ def measure_gpu_usage(self):
while True:
for i in range(device_count):
max_gpu_usage[i] = max(max_gpu_usage[i], self.get_used_memory(i))
time.sleep(0.005) # 2ms
time.sleep(0.005) # 5ms
if not self.keep_measuring:
break
return [
Expand All @@ -555,7 +555,7 @@ def measure_gpu_usage(self):
]


def measure_memory(is_gpu, func, monitor_type="cuda"):
def measure_memory(is_gpu, func, monitor_type="cuda", start_memory=None):
memory_monitor_type = None
if monitor_type == "rocm":
memory_monitor_type = RocmMemoryMonitor
Expand All @@ -565,10 +565,16 @@ def measure_memory(is_gpu, func, monitor_type="cuda"):
monitor = memory_monitor_type(False)

if is_gpu:
memory_before_test = monitor.measure_gpu_usage()
if start_memory is not None:
memory_before_test = start_memory
else:
memory_before_test = monitor.measure_gpu_usage()
if memory_before_test is None:
return None

if func is None:
return memory_before_test

with ThreadPoolExecutor() as executor:
monitor = memory_monitor_type()
mem_thread = executor.submit(monitor.measure_gpu_usage)
Expand All @@ -595,7 +601,13 @@ def measure_memory(is_gpu, func, monitor_type="cuda"):
return None

# CPU memory
memory_before_test = monitor.measure_cpu_usage()
if start_memory is not None:
memory_before_test = start_memory
else:
memory_before_test = monitor.measure_cpu_usage()

if func is None:
return memory_before_test

with ThreadPoolExecutor() as executor:
monitor = MemoryMonitor()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ If you use CUDA 12.*, you will need build onnxruntime-gpu from source.
```
conda create -n py38 python=3.8
conda activate py38
pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu118
pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu118
pip install --upgrade polygraphy onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com
pip install -r requirements-cuda.txt
```
Expand Down
Loading

0 comments on commit 9d829ba

Please sign in to comment.