diff --git a/rl/llm/engines/local.py b/rl/llm/engines/local.py index 04b9136..f18dc69 100644 --- a/rl/llm/engines/local.py +++ b/rl/llm/engines/local.py @@ -392,7 +392,6 @@ def __exit__(self, exc_type, exc_value, traceback): del self.vllm gc.collect() torch.cuda.empty_cache() - torch.distributed.destroy_process_group() LOGGER.info("VLLM model unloaded.") def generate(self, prompt: InferenceInput) -> InferenceOutput: