Skip to content

Commit

Permalink
Adding option to disable GPU (#1016)
Browse files Browse the repository at this point in the history
  • Loading branch information
raivisdejus authored Dec 1, 2024
1 parent f96bb7b commit ea57282
Show file tree
Hide file tree
Showing 6 changed files with 23 additions and 6 deletions.
2 changes: 1 addition & 1 deletion buzz/__version__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
VERSION = "1.2.0"
VERSION = "1.2.1"
8 changes: 7 additions & 1 deletion buzz/transcriber/recording_transcriber.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,11 +70,14 @@ def start(self):
model_path = self.model_path
keep_samples = int(self.keep_sample_seconds * self.sample_rate)

force_cpu = os.getenv("BUZZ_FORCE_CPU", "false")
use_cuda = torch.cuda.is_available() and force_cpu == "false"

if torch.cuda.is_available():
logging.debug(f"CUDA version detected: {torch.version.cuda}")

if self.transcription_options.model.model_type == ModelType.WHISPER:
device = "cuda" if torch.cuda.is_available() else "cpu"
device = "cuda" if use_cuda else "cpu"
model = whisper.load_model(model_path, device=device)
elif self.transcription_options.model.model_type == ModelType.WHISPER_CPP:
model = WhisperCpp(model_path)
Expand All @@ -92,6 +95,9 @@ def start(self):
logging.debug("Unsupported CUDA version (<12), using CPU")
device = "cpu"

if force_cpu != "false":
device = "cpu"

model = faster_whisper.WhisperModel(
model_size_or_path=model_path,
download_root=model_root_dir,
Expand Down
9 changes: 8 additions & 1 deletion buzz/transcriber/whisper_file_transcriber.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,7 @@ def transcribe_faster_whisper(cls, task: FileTranscriptionTask) -> List[Segment]
model_root_dir = user_cache_dir("Buzz")
model_root_dir = os.path.join(model_root_dir, "models")
model_root_dir = os.getenv("BUZZ_MODEL_ROOT", model_root_dir)
force_cpu = os.getenv("BUZZ_FORCE_CPU", "false")

device = "auto"
if platform.system() == "Windows":
Expand All @@ -158,6 +159,9 @@ def transcribe_faster_whisper(cls, task: FileTranscriptionTask) -> List[Segment]
logging.debug("Unsupported CUDA version (<12), using CPU")
device = "cpu"

if force_cpu != "false":
device = "cpu"

model = faster_whisper.WhisperModel(
model_size_or_path=model_size_or_path,
download_root=model_root_dir,
Expand Down Expand Up @@ -200,7 +204,10 @@ def transcribe_faster_whisper(cls, task: FileTranscriptionTask) -> List[Segment]

@classmethod
def transcribe_openai_whisper(cls, task: FileTranscriptionTask) -> List[Segment]:
device = "cuda" if torch.cuda.is_available() else "cpu"
force_cpu = os.getenv("BUZZ_FORCE_CPU", "false")
use_cuda = torch.cuda.is_available() and force_cpu == "false"

device = "cuda" if use_cuda else "cpu"
model = whisper.load_model(task.model_path, device=device)

if task.transcription_options.word_level_timings:
Expand Down
6 changes: 4 additions & 2 deletions buzz/transformers_whisper.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,8 +162,10 @@ def transcribe(
task: str,
word_timestamps: bool = False,
):
device = "cuda" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
force_cpu = os.getenv("BUZZ_FORCE_CPU", "false")
use_cuda = torch.cuda.is_available() and force_cpu == "false"
device = "cuda" if use_cuda else "cpu"
torch_dtype = torch.float16 if use_cuda else torch.float32

use_safetensors = True
if os.path.exists(self.model_id):
Expand Down
2 changes: 2 additions & 0 deletions docs/docs/preferences.md
Original file line number Diff line number Diff line change
Expand Up @@ -94,3 +94,5 @@ Defaults to [user_cache_dir](https://pypi.org/project/platformdirs/).
**BUZZ_LOCALE** - Buzz UI locale to use. Defaults to one of supported system locales.

**BUZZ_DOWNLOAD_COOKIEFILE** - Location of a [cookiefile](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#how-do-i-pass-cookies-to-yt-dlp) to use for downloading private videos or as workaround for anti-bot protection.

**BUZZ_FORCE_CPU** - Will force Buzz to use CPU and not GPU, useful for setups with older GPU if that is slower than GPU or GPU has issues. Example usage `BUZZ_FORCE_CPU=true`.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "buzz-captions"
version = "1.2.0"
version = "1.2.1"
description = ""
authors = ["Chidi Williams <[email protected]>"]
license = "MIT"
Expand Down

0 comments on commit ea57282

Please sign in to comment.