Skip to content

Commit

Permalink
Fix code style issues (k2-fsa#774)
Browse files Browse the repository at this point in the history
  • Loading branch information
csukuangfj authored Apr 16, 2024
1 parent 9eab337 commit e3e919c
Show file tree
Hide file tree
Showing 20 changed files with 45 additions and 57 deletions.
5 changes: 2 additions & 3 deletions sherpa-onnx/csrc/features.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,7 @@ void FeatureExtractorConfig::Register(ParseOptions *po) {
po->Register("feat-dim", &feature_dim,
"Feature dimension. Must match the one expected by the model.");

po->Register("low-freq", &low_freq,
"Low cutoff frequency for mel bins");
po->Register("low-freq", &low_freq, "Low cutoff frequency for mel bins");

po->Register("high-freq", &high_freq,
"High cutoff frequency for mel bins "
Expand Down Expand Up @@ -67,7 +66,7 @@ class FeatureExtractor::Impl {
opts_.mel_opts.num_bins = config.feature_dim;

opts_.mel_opts.high_freq = config.high_freq;
opts_.mel_opts.low_freq = config.low_freq;
opts_.mel_opts.low_freq = config.low_freq;

opts_.mel_opts.is_librosa = config.is_librosa;

Expand Down
2 changes: 1 addition & 1 deletion sherpa-onnx/csrc/offline-lm-config.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ void OfflineLMConfig::Register(ParseOptions *po) {
po->Register("lm", &model, "Path to LM model.");
po->Register("lm-scale", &scale, "LM scale.");
po->Register("lm-num-threads", &lm_num_threads,
"Number of threads to run the neural network of LM model");
"Number of threads to run the neural network of LM model");
po->Register("lm-provider", &lm_provider,
"Specify a provider to LM model use: cpu, cuda, coreml");
}
Expand Down
10 changes: 4 additions & 6 deletions sherpa-onnx/csrc/offline-recognizer-transducer-impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,8 @@ class OfflineRecognizerTransducerImpl : public OfflineRecognizerImpl {
InitHotwords();
}
if (config_.decoding_method == "greedy_search") {
decoder_ =
std::make_unique<OfflineTransducerGreedySearchDecoder>(
model_.get(), config_.blank_penalty);
decoder_ = std::make_unique<OfflineTransducerGreedySearchDecoder>(
model_.get(), config_.blank_penalty);
} else if (config_.decoding_method == "modified_beam_search") {
if (!config_.lm_config.model.empty()) {
lm_ = OfflineLM::Create(config.lm_config);
Expand All @@ -106,9 +105,8 @@ class OfflineRecognizerTransducerImpl : public OfflineRecognizerImpl {
model_(std::make_unique<OfflineTransducerModel>(mgr,
config_.model_config)) {
if (config_.decoding_method == "greedy_search") {
decoder_ =
std::make_unique<OfflineTransducerGreedySearchDecoder>(
model_.get(), config_.blank_penalty);
decoder_ = std::make_unique<OfflineTransducerGreedySearchDecoder>(
model_.get(), config_.blank_penalty);
} else if (config_.decoding_method == "modified_beam_search") {
if (!config_.lm_config.model.empty()) {
lm_ = OfflineLM::Create(mgr, config.lm_config);
Expand Down
3 changes: 1 addition & 2 deletions sherpa-onnx/csrc/offline-transducer-greedy-search-decoder.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,7 @@ class OfflineTransducerGreedySearchDecoder : public OfflineTransducerDecoder {
public:
explicit OfflineTransducerGreedySearchDecoder(OfflineTransducerModel *model,
float blank_penalty)
: model_(model),
blank_penalty_(blank_penalty) {}
: model_(model), blank_penalty_(blank_penalty) {}

std::vector<OfflineTransducerDecoderResult> Decode(
Ort::Value encoder_out, Ort::Value encoder_out_length,
Expand Down
6 changes: 3 additions & 3 deletions sherpa-onnx/csrc/offline-websocket-server-impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -102,9 +102,9 @@ void OfflineWebsocketDecoder::Decode() {
asio::post(server_->GetConnectionContext(),
[this, hdl, result = ss[i]->GetResult()]() {
websocketpp::lib::error_code ec;
server_->GetServer().send(
hdl, result.AsJsonString(),
websocketpp::frame::opcode::text, ec);
server_->GetServer().send(hdl, result.AsJsonString(),
websocketpp::frame::opcode::text,
ec);
if (ec) {
server_->GetServer().get_alog().write(
websocketpp::log::alevel::app, ec.message());
Expand Down
2 changes: 1 addition & 1 deletion sherpa-onnx/csrc/online-lm-config.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ void OnlineLMConfig::Register(ParseOptions *po) {
po->Register("lm", &model, "Path to LM model.");
po->Register("lm-scale", &scale, "LM scale.");
po->Register("lm-num-threads", &lm_num_threads,
"Number of threads to run the neural network of LM model");
"Number of threads to run the neural network of LM model");
po->Register("lm-provider", &lm_provider,
"Specify a provider to LM model use: cpu, cuda, coreml");
}
Expand Down
2 changes: 1 addition & 1 deletion sherpa-onnx/csrc/online-lm-config.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ struct OnlineLMConfig {
OnlineLMConfig() = default;

OnlineLMConfig(const std::string &model, float scale, int32_t lm_num_threads,
const std::string &lm_provider)
const std::string &lm_provider)
: model(model),
scale(scale),
lm_num_threads(lm_num_threads),
Expand Down
3 changes: 1 addition & 2 deletions sherpa-onnx/csrc/online-model-config.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,7 @@ struct OnlineModelConfig {
const OnlineWenetCtcModelConfig &wenet_ctc,
const OnlineZipformer2CtcModelConfig &zipformer2_ctc,
const std::string &tokens, int32_t num_threads,
int32_t warm_up, bool debug,
const std::string &provider,
int32_t warm_up, bool debug, const std::string &provider,
const std::string &model_type)
: transducer(transducer),
paraformer(paraformer),
Expand Down
8 changes: 4 additions & 4 deletions sherpa-onnx/csrc/online-recognizer-transducer-impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@
#include "sherpa-onnx/csrc/online-transducer-greedy-search-decoder.h"
#include "sherpa-onnx/csrc/online-transducer-model.h"
#include "sherpa-onnx/csrc/online-transducer-modified-beam-search-decoder.h"
#include "sherpa-onnx/csrc/onnx-utils.h"
#include "sherpa-onnx/csrc/symbol-table.h"
#include "sherpa-onnx/csrc/utils.h"
#include "sherpa-onnx/csrc/onnx-utils.h"

namespace sherpa_onnx {

Expand Down Expand Up @@ -185,7 +185,7 @@ class OnlineRecognizerTransducerImpl : public OnlineRecognizerImpl {
}

// Warmping up engine with wp: warm_up count and max-batch-size
void WarmpUpRecognizer(int32_t warmup, int32_t mbs) const {
void WarmpUpRecognizer(int32_t warmup, int32_t mbs) const override {
auto max_batch_size = mbs;
if (warmup <= 0 || warmup > 100) {
return;
Expand All @@ -210,8 +210,8 @@ class OnlineRecognizerTransducerImpl : public OnlineRecognizerImpl {
for (int32_t i = 0; i != warmup; ++i) {
auto states = model_->StackStates(states_vec);
Ort::Value x = Ort::Value::CreateTensor(memory_info, features_vec.data(),
features_vec.size(), x_shape.data(),
x_shape.size());
features_vec.size(),
x_shape.data(), x_shape.size());
auto x_copy = Clone(model_->Allocator(), &x);
auto pair = model_->RunEncoder(std::move(x), std::move(states),
std::move(x_copy));
Expand Down
2 changes: 1 addition & 1 deletion sherpa-onnx/csrc/online-recognizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ class OnlineRecognizer {
*
* @param warmup Number of warmups.
* @param mbs : max-batch-size Max batch size for the models
*/
*/
void WarmpUpRecognizer(int32_t warmup, int32_t mbs) const;

/** Decode multiple streams in parallel
Expand Down
7 changes: 3 additions & 4 deletions sherpa-onnx/csrc/online-rnn-lm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@
#include "onnxruntime_cxx_api.h" // NOLINT
#include "sherpa-onnx/csrc/macros.h"
#include "sherpa-onnx/csrc/onnx-utils.h"
#include "sherpa-onnx/csrc/text-utils.h"
#include "sherpa-onnx/csrc/session.h"
#include "sherpa-onnx/csrc/text-utils.h"

namespace sherpa_onnx {

Expand Down Expand Up @@ -42,10 +42,9 @@ class OnlineRnnLM::Impl {
// nn_lm_scores
std::array<int64_t, 2> x_shape{1, 1};
Ort::Value x = Ort::Value::CreateTensor<int64_t>(allocator_, x_shape.data(),
x_shape.size());
x_shape.size());
*x.GetTensorMutableData<int64_t>() = hyp->ys.back();
auto lm_out =
ScoreToken(std::move(x), Convert(hyp->nn_lm_states));
auto lm_out = ScoreToken(std::move(x), Convert(hyp->nn_lm_states));
hyp->nn_lm_scores.value = std::move(lm_out.first);
hyp->nn_lm_states = Convert(std::move(lm_out.second));
}
Expand Down
15 changes: 7 additions & 8 deletions sherpa-onnx/csrc/online-transducer-greedy-search-decoder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -71,11 +71,9 @@ void OnlineTransducerGreedySearchDecoder::StripLeadingBlanks(
r->tokens = std::vector<int64_t>(start, end);
}


void OnlineTransducerGreedySearchDecoder::Decode(
Ort::Value encoder_out,
std::vector<OnlineTransducerDecoderResult> *result) {

std::vector<int64_t> encoder_out_shape =
encoder_out.GetTensorTypeAndShapeInfo().GetShape();

Expand Down Expand Up @@ -106,7 +104,8 @@ void OnlineTransducerGreedySearchDecoder::Decode(
r.decoder_out.GetTensorTypeAndShapeInfo().GetShape();
decoder_out_shape[0] = batch_size;
decoder_out = Ort::Value::CreateTensor<float>(model_->Allocator(),
decoder_out_shape.data(), decoder_out_shape.size());
decoder_out_shape.data(),
decoder_out_shape.size());
UseCachedDecoderOut(*result, &decoder_out);
} else {
Ort::Value decoder_input = model_->BuildDecoderInput(*result);
Expand All @@ -116,8 +115,8 @@ void OnlineTransducerGreedySearchDecoder::Decode(
for (int32_t t = 0; t != num_frames; ++t) {
Ort::Value cur_encoder_out =
GetEncoderOutFrame(model_->Allocator(), &encoder_out, t);
Ort::Value logit = model_->RunJoiner(
std::move(cur_encoder_out), View(&decoder_out));
Ort::Value logit =
model_->RunJoiner(std::move(cur_encoder_out), View(&decoder_out));

float *p_logit = logit.GetTensorMutableData<float>();

Expand Down Expand Up @@ -145,9 +144,9 @@ void OnlineTransducerGreedySearchDecoder::Decode(

// export the per-token log scores
if (y != 0 && y != unk_id_) {
LogSoftmax(p_logit, vocab_size); // renormalize probabilities,
// save time by doing it only for
// emitted symbols
LogSoftmax(p_logit, vocab_size); // renormalize probabilities,
// save time by doing it only for
// emitted symbols
const float *p_logprob = p_logit; // rename p_logit as p_logprob,
// now it contains normalized
// probability
Expand Down
3 changes: 1 addition & 2 deletions sherpa-onnx/csrc/online-transducer-greedy-search-decoder.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,7 @@ namespace sherpa_onnx {
class OnlineTransducerGreedySearchDecoder : public OnlineTransducerDecoder {
public:
OnlineTransducerGreedySearchDecoder(OnlineTransducerModel *model,
int32_t unk_id,
float blank_penalty)
int32_t unk_id, float blank_penalty)
: model_(model), unk_id_(unk_id), blank_penalty_(blank_penalty) {}

OnlineTransducerDecoderResult GetEmptyResult() const override;
Expand Down
2 changes: 1 addition & 1 deletion sherpa-onnx/csrc/online-transducer-model.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ class OnlineTransducerModel {
* This has to be called before GetEncoderInitStates(), so the `encoder_embed`
* init state has the correct `embed_dim` of its output.
*/
virtual void SetFeatureDim(int32_t feature_dim) { }
virtual void SetFeatureDim(int32_t feature_dim) {}

/** Run the encoder.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ void OnlineTransducerModifiedBeamSearchDecoder::Decode(
// score of the transducer
// export the per-token log scores
if (new_token != 0 && new_token != unk_id_) {
const Hypothesis& prev_i = prev[hyp_index];
const Hypothesis &prev_i = prev[hyp_index];
// subtract 'prev[i]' path scores, which were added before
// getting topk tokens
float y_prob = p_logprob[k] - prev_i.log_prob - prev_i.lm_log_prob;
Expand Down
12 changes: 6 additions & 6 deletions sherpa-onnx/csrc/stack-test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@ TEST(Stack, Test1DTensors) {
std::array<int64_t, 1> b_shape{3};

Ort::Value a = Ort::Value::CreateTensor<float>(allocator, a_shape.data(),
a_shape.size());
a_shape.size());

Ort::Value b = Ort::Value::CreateTensor<float>(allocator, b_shape.data(),
b_shape.size());
b_shape.size());
float *pa = a.GetTensorMutableData<float>();
float *pb = b.GetTensorMutableData<float>();
for (int32_t i = 0; i != static_cast<int32_t>(a_shape[0]); ++i) {
Expand Down Expand Up @@ -51,11 +51,11 @@ TEST(Stack, Test2DTensorsDim0) {
std::array<int64_t, 2> a_shape{2, 3};
std::array<int64_t, 2> b_shape{2, 3};

Ort::Value a = Ort::Value::CreateTensor<float>(
allocator, a_shape.data(), a_shape.size());
Ort::Value a = Ort::Value::CreateTensor<float>(allocator, a_shape.data(),
a_shape.size());

Ort::Value b = Ort::Value::CreateTensor<float>(
allocator, b_shape.data(), b_shape.size());
Ort::Value b = Ort::Value::CreateTensor<float>(allocator, b_shape.data(),
b_shape.size());

float *pa = a.GetTensorMutableData<float>();
float *pb = b.GetTensorMutableData<float>();
Expand Down
6 changes: 2 additions & 4 deletions sherpa-onnx/python/csrc/features.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,8 @@ static void PybindFeatureExtractorConfig(py::module *m) {
using PyClass = FeatureExtractorConfig;
py::class_<PyClass>(*m, "FeatureExtractorConfig")
.def(py::init<int32_t, int32_t, float, float, float>(),
py::arg("sampling_rate") = 16000,
py::arg("feature_dim") = 80,
py::arg("low_freq") = 20.0f,
py::arg("high_freq") = -400.0f,
py::arg("sampling_rate") = 16000, py::arg("feature_dim") = 80,
py::arg("low_freq") = 20.0f, py::arg("high_freq") = -400.0f,
py::arg("dither") = 0.0f)
.def_readwrite("sampling_rate", &PyClass::sampling_rate)
.def_readwrite("feature_dim", &PyClass::feature_dim)
Expand Down
3 changes: 1 addition & 2 deletions sherpa-onnx/python/csrc/offline-recognizer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,7 @@ static void PybindOfflineRecognizerConfig(py::module *m) {
py::arg("ctc_fst_decoder_config") = OfflineCtcFstDecoderConfig(),
py::arg("decoding_method") = "greedy_search",
py::arg("max_active_paths") = 4, py::arg("hotwords_file") = "",
py::arg("hotwords_score") = 1.5,
py::arg("blank_penalty") = 0.0)
py::arg("hotwords_score") = 1.5, py::arg("blank_penalty") = 0.0)
.def_readwrite("feat_config", &PyClass::feat_config)
.def_readwrite("model_config", &PyClass::model_config)
.def_readwrite("lm_config", &PyClass::lm_config)
Expand Down
3 changes: 1 addition & 2 deletions sherpa-onnx/python/csrc/offline-transducer-model-config.cc
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

#include "sherpa-onnx/python/csrc/offline-transducer-model-config.h"


#include <string>
#include <vector>

Expand All @@ -16,7 +15,7 @@ void PybindOfflineTransducerModelConfig(py::module *m) {
using PyClass = OfflineTransducerModelConfig;
py::class_<PyClass>(*m, "OfflineTransducerModelConfig")
.def(py::init<const std::string &, const std::string &,
const std::string &>(),
const std::string &>(),
py::arg("encoder_filename"), py::arg("decoder_filename"),
py::arg("joiner_filename"))
.def_readwrite("encoder_filename", &PyClass::encoder_filename)
Expand Down
6 changes: 3 additions & 3 deletions sherpa-onnx/python/csrc/online-model-config.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,9 @@ void PybindOnlineModelConfig(py::module *m) {
.def(py::init<const OnlineTransducerModelConfig &,
const OnlineParaformerModelConfig &,
const OnlineWenetCtcModelConfig &,
const OnlineZipformer2CtcModelConfig &,
const std::string &, int32_t, int32_t,
bool, const std::string &, const std::string &>(),
const OnlineZipformer2CtcModelConfig &, const std::string &,
int32_t, int32_t, bool, const std::string &,
const std::string &>(),
py::arg("transducer") = OnlineTransducerModelConfig(),
py::arg("paraformer") = OnlineParaformerModelConfig(),
py::arg("wenet_ctc") = OnlineWenetCtcModelConfig(),
Expand Down

0 comments on commit e3e919c

Please sign in to comment.