Skip to content

Commit

Permalink
refactor : from error to warning
Browse files Browse the repository at this point in the history
  • Loading branch information
Data-Iab committed Nov 17, 2022
1 parent 418775a commit 5eee797
Showing 1 changed file with 7 additions and 2 deletions.
9 changes: 7 additions & 2 deletions alonet/torch2trt/TRTEngineBuilder.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@


from typing import Dict, List, Tuple
from time import sleep


def GiB(val):
Expand Down Expand Up @@ -160,16 +161,20 @@ def get_engine(self):
# FP16
if self.FP16_allowed:
if not builder.platform_has_fast_fp16:
raise RuntimeError("FP16 is not optimized in this platform. Check " +
print("FP16 is not optimized in this platform. Check " +
"https://docs.nvidia.com/deeplearning/tensorrt/support-matrix/index.html#hardware-precision-matrix"
)
# Fast logs, sleep for logs readability
sleep(0.5)
config.set_flag(trt.BuilderFlag.FP16)
# INT8
if self.INT8_allowed:
if not builder.platform_has_fast_int8:
raise RuntimeError("FP16 is not optimized in this platform. Check " +
print("FP16 is not optimized in this platform. Check " +
"https://docs.nvidia.com/deeplearning/tensorrt/support-matrix/index.html#hardware-precision-matrix"
)
# Fast logs, sleep for logs readability
sleep(0.5)
config.set_quantization_flag(trt.QuantizationFlag.CALIBRATE_BEFORE_FUSION)
config.set_flag(trt.BuilderFlag.INT8)
config.int8_calibrator = self.calibrator
Expand Down

0 comments on commit 5eee797

Please sign in to comment.