Skip to content

Commit

Permalink
Give an informative log for whisper on exceptions. (#473)
Browse files Browse the repository at this point in the history
  • Loading branch information
csukuangfj authored Dec 8, 2023
1 parent 868c339 commit 0e23f82
Show file tree
Hide file tree
Showing 7 changed files with 76 additions and 14 deletions.
12 changes: 12 additions & 0 deletions python-api-examples/generate-subtitles.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,17 @@ def get_args():
""",
)

parser.add_argument(
"--whisper-tail-paddings",
default=-1,
type=int,
help="""Number of tail padding frames.
We have removed the 30-second constraint from whisper, so you need to
choose the amount of tail padding frames by yourself.
Use -1 to use a default value for tail padding.
""",
)

parser.add_argument(
"--decoding-method",
type=str,
Expand Down Expand Up @@ -294,6 +305,7 @@ def create_recognizer(args) -> sherpa_onnx.OfflineRecognizer:
debug=args.debug,
language=args.whisper_language,
task=args.whisper_task,
tail_paddings=args.whisper_tail_paddings,
)
else:
raise ValueError("Please specify at least one model")
Expand Down
12 changes: 12 additions & 0 deletions python-api-examples/non_streaming_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,17 @@ def add_whisper_model_args(parser: argparse.ArgumentParser):
""",
)

parser.add_argument(
"--whisper-tail-paddings",
default=-1,
type=int,
help="""Number of tail padding frames.
We have removed the 30-second constraint from whisper, so you need to
choose the amount of tail padding frames by yourself.
Use -1 to use a default value for tail padding.
""",
)


def add_model_args(parser: argparse.ArgumentParser):
add_transducer_model_args(parser)
Expand Down Expand Up @@ -913,6 +924,7 @@ def create_recognizer(args) -> sherpa_onnx.OfflineRecognizer:
decoding_method=args.decoding_method,
language=args.whisper_language,
task=args.whisper_task,
tail_paddings=args.whisper_tail_paddings,
)
elif args.tdnn_model:
assert_file_exists(args.tdnn_model)
Expand Down
12 changes: 12 additions & 0 deletions python-api-examples/offline-decode-files.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,17 @@ def get_args():
""",
)

parser.add_argument(
"--whisper-tail-paddings",
default=-1,
type=int,
help="""Number of tail padding frames.
We have removed the 30-second constraint from whisper, so you need to
choose the amount of tail padding frames by yourself.
Use -1 to use a default value for tail padding.
""",
)

parser.add_argument(
"--decoding-method",
type=str,
Expand Down Expand Up @@ -391,6 +402,7 @@ def main():
debug=args.debug,
language=args.whisper_language,
task=args.whisper_task,
tail_paddings=args.whisper_tail_paddings,
)
elif args.tdnn_model:
assert_file_exists(args.tdnn_model)
Expand Down
12 changes: 12 additions & 0 deletions python-api-examples/two-pass-speech-recognition-from-microphone.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,17 @@ def add_second_pass_whisper_model_args(parser: argparse.ArgumentParser):
""",
)

parser.add_argument(
"--second-whisper-tail-paddings",
default=-1,
type=int,
help="""Number of tail padding frames.
We have removed the 30-second constraint from whisper, so you need to
choose the amount of tail padding frames by yourself.
Use -1 to use a default value for tail padding.
""",
)


def add_second_pass_non_streaming_model_args(parser: argparse.ArgumentParser):
add_second_pass_transducer_model_args(parser)
Expand Down Expand Up @@ -314,6 +325,7 @@ def create_second_pass_recognizer(args) -> sherpa_onnx.OfflineRecognizer:
decoding_method="greedy_search",
language=args.second_whisper_language,
task=args.second_whisper_task,
tail_paddings=args.second_whisper_tail_paddings,
)
else:
raise ValueError("Please specify at least one model for the second pass")
Expand Down
12 changes: 12 additions & 0 deletions python-api-examples/vad-with-non-streaming-asr.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,17 @@ def get_args():
""",
)

parser.add_argument(
"--whisper-tail-paddings",
default=-1,
type=int,
help="""Number of tail padding frames.
We have removed the 30-second constraint from whisper, so you need to
choose the amount of tail padding frames by yourself.
Use -1 to use a default value for tail padding.
""",
)

parser.add_argument(
"--decoding-method",
type=str,
Expand Down Expand Up @@ -256,6 +267,7 @@ def create_recognizer(args) -> sherpa_onnx.OfflineRecognizer:
debug=args.debug,
language=args.whisper_language,
task=args.whisper_task,
tail_paddings=args.whisper_tail_paddings,
)
else:
raise ValueError("Please specify at least one model")
Expand Down
28 changes: 14 additions & 14 deletions sherpa-onnx/csrc/offline-recognizer-whisper-impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,18 +116,12 @@ class OfflineRecognizerWhisperImpl : public OfflineRecognizerImpl {

NormalizeFeatures(f.data(), num_frames, feat_dim);

// note that 50 is an experience value.
// see also ../../scripts/whisper/test.py
//
// You can replace 50 by other values, say, 100.
// note that 1000 is an experience-value.
// You can replace 1000 by other values, say, 100.
//
// Since we have removed the 30 seconds constraint, we need
// tail_padding_frames so that whisper is able to detect the eot token.
int32_t tail_padding_frames = 50;
if (model_->IsMultiLingual()) {
// 300 is an experience value. If it throws, please use a larger value.
tail_padding_frames = 300;
}
int32_t tail_padding_frames = 1000;

if (config_.model_config.whisper.tail_paddings > 0) {
tail_padding_frames = config_.model_config.whisper.tail_paddings;
Expand All @@ -140,11 +134,13 @@ class OfflineRecognizerWhisperImpl : public OfflineRecognizerImpl {

Ort::Value mel = Ort::Value::CreateTensor<float>(
model_->Allocator(), shape.data(), shape.size());

float *p_mel = mel.GetTensorMutableData<float>();
std::copy(f.data(), f.data() + actual_frames * feat_dim, p_mel);
std::copy(f.data(), f.data() + num_frames * feat_dim, p_mel);

std::fill_n(p_mel + num_frames * feat_dim,
(actual_frames - num_frames) * feat_dim, 0);

memset(p_mel + f.size(), 0,
(actual_frames - num_frames) * feat_dim * sizeof(float));
mel = Transpose12(model_->Allocator(), &mel);

try {
Expand All @@ -156,8 +152,12 @@ class OfflineRecognizerWhisperImpl : public OfflineRecognizerImpl {
auto r = Convert(results[0], symbol_table_);
s->SetResult(r);
} catch (const Ort::Exception &ex) {
SHERPA_ONNX_LOGE("\n\nCaught exception:\n\n%s\n\nReturn an empty result",
ex.what());
SHERPA_ONNX_LOGE(
"\n\nCaught exception:\n\n%s\n\nReturn an empty result. Number of "
"input frames: %d, Current tail "
"paddings: %d. If you see a lot of such exceptions, please consider "
"using a larger --whisper-tail-paddings",
ex.what(), num_frames, tail_padding_frames);
return;
}
}
Expand Down
2 changes: 2 additions & 0 deletions sherpa-onnx/python/sherpa_onnx/offline_recognizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,7 @@ def from_whisper(
decoding_method: str = "greedy_search",
debug: bool = False,
provider: str = "cpu",
tail_paddings: int = -1,
):
"""
Please refer to
Expand Down Expand Up @@ -305,6 +306,7 @@ def from_whisper(
decoder=decoder,
language=language,
task=task,
tail_paddings=tail_paddings,
),
tokens=tokens,
num_threads=num_threads,
Expand Down

0 comments on commit 0e23f82

Please sign in to comment.