From c686c018ab1f9a905a768bd25e065b566c9f2db4 Mon Sep 17 00:00:00 2001 From: Tianlei Wu Date: Wed, 4 Oct 2023 22:25:48 +0000 Subject: [PATCH] update logging --- .../transformers/models/stable_diffusion/demo_txt2img.py | 4 ---- .../models/stable_diffusion/engine_builder_ort_cuda.py | 4 ++-- .../models/stable_diffusion/engine_builder_ort_trt.py | 4 ++-- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_txt2img.py b/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_txt2img.py index 0eeab19e8328a..f6e00063a6391 100644 --- a/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_txt2img.py +++ b/onnxruntime/python/tools/transformers/models/stable_diffusion/demo_txt2img.py @@ -20,8 +20,6 @@ # limitations under the License. # -------------------------------------------------------------------------- -import logging - import coloredlogs from cuda import cudart from demo_utils import init_pipeline, parse_arguments, repeat_prompt @@ -29,8 +27,6 @@ from engine_builder import EngineType, get_engine_type from pipeline_txt2img import Txt2ImgPipeline -logger = logging.getLogger(__name__) - if __name__ == "__main__": coloredlogs.install(fmt="%(funcName)20s: %(message)s") diff --git a/onnxruntime/python/tools/transformers/models/stable_diffusion/engine_builder_ort_cuda.py b/onnxruntime/python/tools/transformers/models/stable_diffusion/engine_builder_ort_cuda.py index e6e8e9c040881..936d04e8a1c43 100644 --- a/onnxruntime/python/tools/transformers/models/stable_diffusion/engine_builder_ort_cuda.py +++ b/onnxruntime/python/tools/transformers/models/stable_diffusion/engine_builder_ort_cuda.py @@ -29,7 +29,7 @@ def __init__(self, onnx_path, device_id: int = 0, enable_cuda_graph=False, disab if disable_optimization: session_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_DISABLE_ALL - print("creating CUDA EP session for ", onnx_path) + logger.info("creating CUDA EP session for %s", onnx_path) ort_session = ort.InferenceSession( onnx_path, session_options, @@ -38,7 +38,7 @@ def __init__(self, onnx_path, device_id: int = 0, enable_cuda_graph=False, disab "CPUExecutionProvider", ], ) - print("created CUDA EP session for ", onnx_path) + logger.info("created CUDA EP session for %s", onnx_path) device = torch.device("cuda", device_id) super().__init__(ort_session, device, enable_cuda_graph) diff --git a/onnxruntime/python/tools/transformers/models/stable_diffusion/engine_builder_ort_trt.py b/onnxruntime/python/tools/transformers/models/stable_diffusion/engine_builder_ort_trt.py index 253cdcc45bf2e..a6bbd4ee7eeb7 100644 --- a/onnxruntime/python/tools/transformers/models/stable_diffusion/engine_builder_ort_trt.py +++ b/onnxruntime/python/tools/transformers/models/stable_diffusion/engine_builder_ort_trt.py @@ -32,7 +32,7 @@ def __init__(self, engine_path, device_id, onnx_path, fp16, input_profile, works session_options = ort.SessionOptions() session_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_DISABLE_ALL - print("creating TRT EP session for ", onnx_path) + logger.info("creating TRT EP session for %s", onnx_path) ort_session = ort.InferenceSession( onnx_path, session_options, @@ -40,7 +40,7 @@ def __init__(self, engine_path, device_id, onnx_path, fp16, input_profile, works ("TensorrtExecutionProvider", self.ort_trt_provider_options), ], ) - print("created TRT EP session for ", onnx_path) + logger.info("created TRT EP session for %s", onnx_path) device = torch.device("cuda", device_id) super().__init__(ort_session, device, enable_cuda_graph)