"driver/vscode:/vscode.git/clone" did not exist on "012b5253775fbbf5a4186e48da683404ac3c4a24"
Commit 688b6eac authored by SWHL's avatar SWHL
Browse files

Update files

parents
*.pth
# Created by .ignore support plugin (hsz.mobi)
### Python template
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
.pytest_cache
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
# *.manifest
# *.spec
*.res
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
#idea
.vs
.vscode
.idea
#models
*.ttf
*.ttc
*.bin
*.mapping
*.xml
*.pdiparams
*.pdiparams.info
*.pdmodel
.DS_Store
\ No newline at end of file
# RapidASR (base wenet)
- Our vision is to offer an out-of-box engineering implementation for ASR
- A cpp implementation of recognize-onnx.py in [Wenet-asr](https://github.com/wenet-e2e/wenet) in which it implements the inference with ONNXRuntime.
- For a version of pure CPP code, we need to do a bit of work to rewrite some components.
- Special thanks to its original author SlyneD.
- Less is more. Less dependency, more usability.
- Just offline mode, not support stream mode, aka separate files can be recognized.
- **QQ Group: 645751008**
## Notice:
- The project is under the protection of GPL V2, Apache license and commercial license.
- For so/dll/c++ interface, it complies with GPL V2.
- For python interface, it belongs to Apache license.
- For a commercial license, please contact us: znsoft@163.com (commercial license only).
## Commercial support
For a commercial user, we offer a library to resample input data including mp3, mp4, mkv and so on.
Please visit: [RapidAI/RapidAudioKit](https://github.com/RapidAI/RapidAudioKit)
cmake_minimum_required(VERSION 3.15)
project(rapidasr)
if(WIN32)
add_definitions(-D_RAPIDASR_API_EXPORT)
else()
SET(SDK_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/sdk/linux)
include_directories(${SDK_ROOT}/include/ ${SDK_ROOT}/include/kenlm ${SDK_ROOT}/include/ctc_decoder ${SDK_ROOT}/include/kaldi)
message(UNIX)
link_directories(${SDK_ROOT}/lib /usr/lib)
endif()
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/include)
set(SRC_FILE source/librpasrapi.cpp source/rapidasr.cpp )
set(SRC_FEAT feat/feature_pipeline.cpp feat/fft.cpp)
add_library(rapidasr SHARED ${SRC_FILE} ${SRC_FEAT})
link_libraries(fst kenlm kenlm_builder kenlm_filter kenlm_interpolate kenlm_util ctc_decoder onnxruntime yaml-cpp bz2 lzma z)
add_executable(rapidasrtest source/testapp.cpp)
target_link_libraries(rapidasrtest PUBLIC rapidasr ctc_decoder kenlm kenlm_builder kenlm_filter kenlm_interpolate kenlm_util)
\ No newline at end of file
## RapidASR CPP
- Our vision is to offer an out-of-box engineering implementation for ASR
- A cpp implementation of recognize-onnx.py in [Wenet-asr](https://github.com/wenet-e2e/wenet) in which it implements the inference with ONNXRuntime.
- For a version of pure CPP code, we need to do a bit of work to rewrite some components.
- Special thanks to its original author SlyneD.
- Less is more. Less dependency, more usability.
- Just offline mode, not support stream mode, aka separate files can be recognized.
- **QQ Group: 645751008**
### Supported modes:
- CTC_GREEDY_SEARCH
- CTC_RPEFIX_BEAM_SEARCH
- ATTENSION_RESCORING
### Models
- The model is original from [wenetspeech/s0](https://github.com/wenet-e2e/wenet/tree/main/examples/wenetspeech/s0) and tested with `recognize-onnx.py`.
- Download [Bidirectional model](http://mobvoi-speech-public.ufile.ucloud.cn/public/wenet/wenetspeech/20211025_conformer_bidecoder_exp.tar.gz)
- Download:
- URL:https://pan.baidu.com/s/1BTR-uR_8WWBFpvOisNR_PA
- Extract code:9xjz
- Sample Rate: 16000Hz
- Sample Depth: 16bits
- Channel: single
### Build
- Windows
```
Visual studio 2019 & cmake 3.20
cd thirdpart
build_win.cmd x86|x64
```
- Linux
```
cmake
```
\ No newline at end of file
{"key": "mywave", "wav": "test.wav", "txt": ""}
// Copyright (c) 2017 Personal (Binbin Zhang)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef FRONTEND_FBANK_H_
#define FRONTEND_FBANK_H_
#include <cstring>
#include <limits>
#include <random>
#include <utility>
#include <vector>
#include "feat/fft.h"
#include "utils/log.h"
namespace wenet {
// This code is based on kaldi Fbank implentation, please see
// https://github.com/kaldi-asr/kaldi/blob/master/src/feat/feature-fbank.cc
class Fbank {
public:
Fbank(int num_bins, int sample_rate, int frame_length, int frame_shift)
: num_bins_(num_bins),
sample_rate_(sample_rate),
frame_length_(frame_length),
frame_shift_(frame_shift),
use_log_(true),
remove_dc_offset_(true),
generator_(0),
distribution_(0, 1.0),
dither_(0.0) {
fft_points_ = UpperPowerOfTwo(frame_length_);
// generate bit reversal table and trigonometric function table
const int fft_points_4 = fft_points_ / 4;
bitrev_.resize(fft_points_);
sintbl_.resize(fft_points_ + fft_points_4);
make_sintbl(fft_points_, sintbl_.data());
make_bitrev(fft_points_, bitrev_.data());
int num_fft_bins = fft_points_ / 2;
float fft_bin_width = static_cast<float>(sample_rate_) / fft_points_;
int low_freq = 20, high_freq = sample_rate_ / 2;
float mel_low_freq = MelScale(low_freq);
float mel_high_freq = MelScale(high_freq);
float mel_freq_delta = (mel_high_freq - mel_low_freq) / (num_bins + 1);
bins_.resize(num_bins_);
center_freqs_.resize(num_bins_);
for (int bin = 0; bin < num_bins; ++bin) {
float left_mel = mel_low_freq + bin * mel_freq_delta,
center_mel = mel_low_freq + (bin + 1) * mel_freq_delta,
right_mel = mel_low_freq + (bin + 2) * mel_freq_delta;
center_freqs_[bin] = InverseMelScale(center_mel);
std::vector<float> this_bin(num_fft_bins);
int first_index = -1, last_index = -1;
for (int i = 0; i < num_fft_bins; ++i) {
float freq = (fft_bin_width * i); // Center frequency of this fft
// bin.
float mel = MelScale(freq);
if (mel > left_mel && mel < right_mel) {
float weight;
if (mel <= center_mel)
weight = (mel - left_mel) / (center_mel - left_mel);
else
weight = (right_mel - mel) / (right_mel - center_mel);
this_bin[i] = weight;
if (first_index == -1) first_index = i;
last_index = i;
}
}
CHECK(first_index != -1 && last_index >= first_index);
bins_[bin].first = first_index;
int size = last_index + 1 - first_index;
bins_[bin].second.resize(size);
for (int i = 0; i < size; ++i) {
bins_[bin].second[i] = this_bin[first_index + i];
}
}
// povey window
povey_window_.resize(frame_length_);
double a = M_2PI / (frame_length - 1);
for (int i = 0; i < frame_length; ++i) {
povey_window_[i] = pow(0.5 - 0.5 * cos(a * i), 0.85);
}
}
void set_use_log(bool use_log) { use_log_ = use_log; }
void set_remove_dc_offset(bool remove_dc_offset) {
remove_dc_offset_ = remove_dc_offset;
}
void set_dither(float dither) { dither_ = dither; }
int num_bins() const { return num_bins_; }
static inline float InverseMelScale(float mel_freq) {
return 700.0f * (expf(mel_freq / 1127.0f) - 1.0f);
}
static inline float MelScale(float freq) {
return 1127.0f * logf(1.0f + freq / 700.0f);
}
static int UpperPowerOfTwo(int n) {
return static_cast<int>(pow(2, ceil(log(n) / log(2))));
}
// preemphasis
void PreEmphasis(float coeff, std::vector<float>* data) const {
if (coeff == 0.0) return;
for (int i = data->size() - 1; i > 0; i--)
(*data)[i] -= coeff * (*data)[i - 1];
(*data)[0] -= coeff * (*data)[0];
}
// Apply povey window on data in place
void Povey(std::vector<float>* data) const {
CHECK_GE(data->size(), povey_window_.size());
for (size_t i = 0; i < povey_window_.size(); ++i) {
(*data)[i] *= povey_window_[i];
}
}
// Compute fbank feat, return num frames
int Compute(const std::vector<float>& wave,
std::vector<std::vector<float>>* feat) {
int num_samples = wave.size();
if (num_samples < frame_length_) return 0;
int num_frames = 1 + ((num_samples - frame_length_) / frame_shift_);
feat->resize(num_frames);
std::vector<float> fft_real(fft_points_, 0), fft_img(fft_points_, 0);
std::vector<float> power(fft_points_ / 2);
for (int i = 0; i < num_frames; ++i) {
std::vector<float> data(wave.data() + i * frame_shift_,
wave.data() + i * frame_shift_ + frame_length_);
// optional add noise
if (dither_ != 0.0) {
for (size_t j = 0; j < data.size(); ++j)
data[j] += dither_ * distribution_(generator_);
}
// optinal remove dc offset
if (remove_dc_offset_) {
float mean = 0.0;
for (size_t j = 0; j < data.size(); ++j) mean += data[j];
mean /= data.size();
for (size_t j = 0; j < data.size(); ++j) data[j] -= mean;
}
PreEmphasis(0.97, &data);
Povey(&data);
// copy data to fft_real
memset(fft_img.data(), 0, sizeof(float) * fft_points_);
memset(fft_real.data() + frame_length_, 0,
sizeof(float) * (fft_points_ - frame_length_));
memcpy(fft_real.data(), data.data(), sizeof(float) * frame_length_);
fft(bitrev_.data(), sintbl_.data(), fft_real.data(), fft_img.data(),
fft_points_);
// power
for (int j = 0; j < fft_points_ / 2; ++j) {
power[j] = fft_real[j] * fft_real[j] + fft_img[j] * fft_img[j];
}
(*feat)[i].resize(num_bins_);
// cepstral coefficients, triangle filter array
for (int j = 0; j < num_bins_; ++j) {
float mel_energy = 0.0;
int s = bins_[j].first;
for (size_t k = 0; k < bins_[j].second.size(); ++k) {
mel_energy += bins_[j].second[k] * power[s + k];
}
// optional use log
if (use_log_) {
if (mel_energy < std::numeric_limits<float>::epsilon())
mel_energy = std::numeric_limits<float>::epsilon();
mel_energy = logf(mel_energy);
}
(*feat)[i][j] = mel_energy;
// printf("%f ", mel_energy);
}
// printf("\n");
}
return num_frames;
}
private:
int num_bins_;
int sample_rate_;
int frame_length_, frame_shift_;
int fft_points_;
bool use_log_;
bool remove_dc_offset_;
std::vector<float> center_freqs_;
std::vector<std::pair<int, std::vector<float>>> bins_;
std::vector<float> povey_window_;
std::default_random_engine generator_;
std::normal_distribution<float> distribution_;
float dither_;
// bit reversal table
std::vector<int> bitrev_;
// trigonometric function table
std::vector<float> sintbl_;
};
} // namespace wenet
#endif // FRONTEND_FBANK_H_
// Copyright (c) 2017 Personal (Binbin Zhang)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "feat/feature_pipeline.h"
#include <algorithm>
#include <utility>
namespace wenet {
FeaturePipeline::FeaturePipeline(const FeaturePipelineConfig& config)
: config_(config),
feature_dim_(config.num_bins),
fbank_(config.num_bins, config.sample_rate, config.frame_length,
config.frame_shift),
num_frames_(0),
input_finished_(false) {}
void FeaturePipeline::AcceptWaveform(const std::vector<float>& wav) {
std::vector<std::vector<float>> feats;
std::vector<float> waves;
waves.insert(waves.end(), remained_wav_.begin(), remained_wav_.end());
waves.insert(waves.end(), wav.begin(), wav.end());
int num_frames = fbank_.Compute(waves, &feats);
for (size_t i = 0; i < feats.size(); ++i) {
feature_queue_.Push(std::move(feats[i]));
}
num_frames_ += num_frames;
int left_samples = waves.size() - config_.frame_shift * num_frames;
remained_wav_.resize(left_samples);
std::copy(waves.begin() + config_.frame_shift * num_frames, waves.end(),
remained_wav_.begin());
// We are still adding wave, notify input is not finished
finish_condition_.notify_one();
}
void FeaturePipeline::set_input_finished() {
CHECK(!input_finished_);
{
std::lock_guard<std::mutex> lock(mutex_);
input_finished_ = true;
}
finish_condition_.notify_one();
}
bool FeaturePipeline::ReadOne(std::vector<float>* feat) {
if (!feature_queue_.Empty()) {
*feat = std::move(feature_queue_.Pop());
return true;
} else {
std::unique_lock<std::mutex> lock(mutex_);
while (!input_finished_) {
// This will release the lock and wait for notify_one()
// from AcceptWaveform() or set_input_finished()
finish_condition_.wait(lock);
if (!feature_queue_.Empty()) {
*feat = std::move(feature_queue_.Pop());
return true;
}
}
CHECK(input_finished_);
CHECK(feature_queue_.Empty());
return false;
}
}
bool FeaturePipeline::Read(int num_frames,
std::vector<std::vector<float>>* feats) {
feats->clear();
std::vector<float> feat;
while (feats->size() < num_frames) {
if (ReadOne(&feat)) {
feats->push_back(std::move(feat));
} else {
return false;
}
}
return true;
}
void FeaturePipeline::Reset() {
input_finished_ = false;
num_frames_ = 0;
remained_wav_.clear();
feature_queue_.Clear();
}
} // namespace wenet
// Copyright (c) 2017 Personal (Binbin Zhang)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef FRONTEND_FEATURE_PIPELINE_H_
#define FRONTEND_FEATURE_PIPELINE_H_
#include <mutex>
#include <queue>
#include <string>
#include <vector>
#include "feat/fbank.h"
#include "utils/blocking_queue.h"
#include "utils/log.h"
namespace wenet {
struct FeaturePipelineConfig {
int num_bins;
int sample_rate;
int frame_length;
int frame_shift;
FeaturePipelineConfig(int num_bins, int sample_rate)
: num_bins(num_bins), // 80 dim fbank
sample_rate(sample_rate) { // 16k sample rate
frame_length = sample_rate / 1000 * 25; // frame length 25ms
frame_shift = sample_rate / 1000 * 10; // frame shift 10ms
}
void Info() const {
LOG(INFO) << "feature pipeline config"
<< " num_bins " << num_bins << " frame_length " << frame_length
<< "frame_shift" << frame_shift;
}
};
// Typically, FeaturePipeline is used in two threads: one thread A calls
// AcceptWaveform() to add raw wav data and set_input_finished() to notice
// the end of input wav, another thread B (decoder thread) calls Read() to
// consume features.So a BlockingQueue is used to make this class thread safe.
// The Read() is designed as a blocking method when there is no feature
// in feature_queue_ and the input is not finished.
// See bin/decoder_main.cc, websocket/websocket_server.cc and
// decoder/torch_asr_decoder.cc for usage
class FeaturePipeline {
public:
explicit FeaturePipeline(const FeaturePipelineConfig& config);
// The feature extraction is done in AcceptWaveform().
void AcceptWaveform(const std::vector<float>& wav);
// Current extracted frames number.
int num_frames() const { return num_frames_; }
int feature_dim() const { return feature_dim_; }
const FeaturePipelineConfig& config() const { return config_; }
// The caller should call this method when speech input is end.
// Never call AcceptWaveform() after calling set_input_finished() !
void set_input_finished();
// Return False if input is finished and no feature could be read.
// Return True if a feature is read.
// This function is a blocking method. It will block the thread when
// there is no feature in feature_queue_ and the input is not finished.
bool ReadOne(std::vector<float>* feat);
// Read #num_frames frame features.
// Return False if less then #num_frames features are read and the
// input is finished.
// Return True if #num_frames features are read.
// This function is a blocking method when there is no feature
// in feature_queue_ and the input is not finished.
bool Read(int num_frames, std::vector<std::vector<float>>* feats);
void Reset();
bool IsLastFrame(int frame) const {
return input_finished_ && (frame == num_frames_ - 1);
}
private:
const FeaturePipelineConfig& config_;
int feature_dim_;
Fbank fbank_;
BlockingQueue<std::vector<float>> feature_queue_;
int num_frames_;
bool input_finished_;
// The feature extraction is done in AcceptWaveform().
// This wavefrom sample points are consumed by frame size.
// The residual wavefrom sample points after framing are
// kept to be used in next AcceptWaveform() calling.
std::vector<float> remained_wav_;
// Used to block the Read when there is no feature in feature_queue_
// and the input is not finished.
mutable std::mutex mutex_;
std::condition_variable finish_condition_;
};
} // namespace wenet
#endif // FRONTEND_FEATURE_PIPELINE_H_
// Copyright (c) 2016 HR
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "fft.h"
namespace wenet {
void make_sintbl(int n, float* sintbl) {
int i, n2, n4, n8;
float c, s, dc, ds, t;
n2 = n / 2;
n4 = n / 4;
n8 = n / 8;
t = sin(M_PI / n);
dc = 2 * t * t;
ds = sqrt(dc * (2 - dc));
t = 2 * dc;
c = sintbl[n4] = 1;
s = sintbl[0] = 0;
for (i = 1; i < n8; ++i) {
c -= dc;
dc += t * c;
s += ds;
ds -= t * s;
sintbl[i] = s;
sintbl[n4 - i] = c;
}
if (n8 != 0) sintbl[n8] = sqrt(0.5);
for (i = 0; i < n4; ++i) sintbl[n2 - i] = sintbl[i];
for (i = 0; i < n2 + n4; ++i) sintbl[i + n2] = -sintbl[i];
}
void make_bitrev(int n, int* bitrev) {
int i, j, k, n2;
n2 = n / 2;
i = j = 0;
for (;;) {
bitrev[i] = j;
if (++i >= n) break;
k = n2;
while (k <= j) {
j -= k;
k /= 2;
}
j += k;
}
}
// bitrev: bit reversal table
// sintbl: trigonometric function table
// x:real part
// y:image part
// n: fft length
int fft(const int* bitrev, const float* sintbl, float* x, float* y, int n) {
int i, j, k, ik, h, d, k2, n4, inverse;
float t, s, c, dx, dy;
/* preparation */
if (n < 0) {
n = -n;
inverse = 1; /* inverse transform */
} else {
inverse = 0;
}
n4 = n / 4;
if (n == 0) {
return 0;
}
/* bit reversal */
for (i = 0; i < n; ++i) {
j = bitrev[i];
if (i < j) {
t = x[i];
x[i] = x[j];
x[j] = t;
t = y[i];
y[i] = y[j];
y[j] = t;
}
}
/* transformation */
for (k = 1; k < n; k = k2) {
h = 0;
k2 = k + k;
d = n / k2;
for (j = 0; j < k; ++j) {
c = sintbl[h + n4];
if (inverse)
s = -sintbl[h];
else
s = sintbl[h];
for (i = j; i < n; i += k2) {
ik = i + k;
dx = s * y[ik] + c * x[ik];
dy = c * y[ik] - s * x[ik];
x[ik] = x[i] - dx;
x[i] += dx;
y[ik] = y[i] - dy;
y[i] += dy;
}
h += d;
}
}
if (inverse) {
/* divide by n in case of the inverse transformation */
for (i = 0; i < n; ++i) {
x[i] /= n;
y[i] /= n;
}
}
return 0; /* finished successfully */
}
} // namespace wenet
// Copyright (c) 2016 HR
#ifndef FRONTEND_FFT_H_
#define FRONTEND_FFT_H_
#ifndef M_PI
#define M_PI 3.1415926535897932384626433832795
#endif
#ifndef M_2PI
#define M_2PI 6.283185307179586476925286766559005
#endif
namespace wenet {
// Fast Fourier Transform
void make_sintbl(int n, float* sintbl);
void make_bitrev(int n, int* bitrev);
int fft(const int* bitrev, const float* sintbl, float* x, float* y, int n);
} // namespace wenet
#endif // FRONTEND_FFT_H_
// Copyright (c) 2016 Personal (Binbin Zhang)
// Created on 2016-08-15
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef FRONTEND_WAV_H_
#define FRONTEND_WAV_H_
#include <assert.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include "utils/log.h"
namespace wenet {
struct WavHeader {
char riff[4]; // "riff"
unsigned int size;
char wav[4]; // "WAVE"
char fmt[4]; // "fmt "
unsigned int fmt_size;
uint16_t format;
uint16_t channels;
unsigned int sample_rate;
unsigned int bytes_per_second;
uint16_t block_size;
uint16_t bit;
char data[4]; // "data"
unsigned int data_size;
};
class WavReader {
public:
explicit WavReader(const std::string& filename, vector<float>& wave) { Open(filename, wave); }
bool Open(const std::string& filename,vector<float>& wave) {
FILE* fp = fopen(filename.c_str(), "rb");
if (NULL == fp) {
LOG(WARNING) << "Error in read " << filename;
return false;
}
WavHeader header;
fread(&header, 1, sizeof(header), fp);
if (header.fmt_size < 16) {
fprintf(stderr,
"WaveData: expect PCM format data "
"to have fmt chunk of at least size 16.\n");
return false;
} else if (header.fmt_size > 16) {
int offset = 44 - 8 + header.fmt_size - 16;
fseek(fp, offset, SEEK_SET);
fread(header.data, 8, sizeof(char), fp);
}
// check "riff" "WAVE" "fmt " "data"
// Skip any subchunks between "fmt" and "data". Usually there will
// be a single "fact" subchunk, but on Windows there can also be a
// "list" subchunk.
while (0 != strncmp(header.data, "data", 4)) {
// We will just ignore the data in these chunks.
fseek(fp, header.data_size, SEEK_CUR);
// read next subchunk
fread(header.data, 8, sizeof(char), fp);
}
num_channel_ = header.channels;
sample_rate_ = header.sample_rate;
bits_per_sample_ = header.bit;
num_data_ = header.data_size / (bits_per_sample_ / 8);
// data_ = new float[num_data_];
num_sample_ = num_data_ / num_channel_;
for (int i = 0; i < num_data_; ++i) {
switch (bits_per_sample_) {
case 8: {
char sample;
fread(&sample, 1, sizeof(char), fp);
wave.push_back( static_cast<float>(sample));
break;
}
case 16: {
int16_t sample;
fread(&sample, 1, sizeof(int16_t), fp);
wave.push_back(static_cast<float>(sample));
break;
}
case 32: {
int sample;
fread(&sample, 1, sizeof(int), fp);
wave.push_back(static_cast<float>(sample));
break;
}
default:
fprintf(stderr, "unsupported quantization bits");
exit(1);
}
}
fclose(fp);
return true;
}
int num_channel() const { return num_channel_; }
int sample_rate() const { return sample_rate_; }
int bits_per_sample() const { return bits_per_sample_; }
int num_sample() const { return num_sample_; }
int num_data() const { return num_data_; }
virtual ~WavReader() {
purgedata();
}
void purgedata()
{
//if (data_ != NULL)
//{
// delete[] data_;
// data_ = nullptr;
//}
}
//const float* data() const { return data_; }
private:
int num_channel_;
int sample_rate_;
int bits_per_sample_;
int num_sample_; // sample points per channel
int num_data_;
//float* data_;
};
class WavReaderMem {
public:
WavReaderMem() {}
WavReaderMem(const char * szBuf, int nLen, vector<float>& wave)
{
Open(szBuf, nLen,wave);
}
bool Open(const char* szBuf, int nLen,vector<float> & wave) {
if (NULL == szBuf) {
LOG(WARNING) << "Error in reading buffer. szBuf is empty." ;
return false;
}
WavHeader Header;
//fread(&header, 1, sizeof(header), fp);
memcpy(&Header, szBuf, sizeof(Header));
const char* pCurOffset = nullptr;
if (Header.fmt_size < 16) {
fprintf(stderr,
"WaveData: expect PCM format data "
"to have fmt chunk of at least size 16.\n");
return false;
}
else if (Header.fmt_size > 16) {
int offset = 44 - 8 + Header.fmt_size - 16;
//fseek(fp, offset, SEEK_SET);
pCurOffset = szBuf + offset;
//fread(header.data, 8, sizeof(char), fp);
memcpy(Header.data, pCurOffset, 8);
}
// from the beginning of the buffer.
//
// check "riff" "WAVE" "fmt " "data"
// Skip any subchunks between "fmt" and "data". Usually there will
// be a single "fact" subchunk, but on Windows there can also be a
// "list" subchunk.
while (0 != strncmp(Header.data, "data", 4)) {
// We will just ignore the data in these chunks.
//fseek(fp, Header.data_size, SEEK_CUR);
pCurOffset = pCurOffset + Header.data_size;
// read next subchunk
//fread(header.data, 8, sizeof(char), fp);
memcpy(Header.data, pCurOffset, 8);
}
num_channel_ = Header.channels;
sample_rate_ = Header.sample_rate;
bits_per_sample_ = Header.bit;
num_data_ = Header.data_size / (bits_per_sample_ / 8);
//data_ = new float[num_data_];
num_sample_ = num_data_ / num_channel_;
for (int i = 0; i < num_data_; ++i) {
switch (bits_per_sample_) {
case 8: {
const char* sample = (const char *)pCurOffset+i;
//fread(&sample, 1, sizeof(char), fp);
wave.push_back( static_cast<float>(*sample));
break;
}
case 16: {
int16_t * sample=(int16_t *)pCurOffset+i;
// fread(&sample, 1, sizeof(int16_t), fp);
wave.push_back(static_cast<float>(*sample));
break;
}
case 32: {
int *sample= (int*)pCurOffset+i;
//fread(&sample, 1, sizeof(int), fp);
wave.push_back(static_cast<float>(*sample));
break;
}
default:
fprintf(stderr, "unsupported quantization bits");
exit(1);
}
}
return true;
}
int num_channel() const { return num_channel_; }
int sample_rate() const { return sample_rate_; }
int bits_per_sample() const { return bits_per_sample_; }
int num_sample() const { return num_sample_; }
int num_data() const { return num_data_; }
~WavReaderMem()
{
purgedata();
}
void purgedata()
{
//if (data_ != NULL)
//{
// delete[] data_;
// data_ = nullptr;
//}
}
// const float* data() const { return data_; }
private:
int num_channel_;
int sample_rate_;
int bits_per_sample_;
int num_sample_; // sample points per channel
int num_data_;
// float* data_;
};
class WavWriter {
public:
WavWriter(const float* data, int num_sample, int num_channel, int sample_rate,
int bits_per_sample)
: data_(data),
num_sample_(num_sample),
num_channel_(num_channel),
sample_rate_(sample_rate),
bits_per_sample_(bits_per_sample) {}
void Write(const std::string& filename) {
FILE* fp = fopen(filename.c_str(), "w");
// init char 'riff' 'WAVE' 'fmt ' 'data'
WavHeader header;
char wav_header[44] = {0x52, 0x49, 0x46, 0x46, 0x00, 0x00, 0x00, 0x00, 0x57,
0x41, 0x56, 0x45, 0x66, 0x6d, 0x74, 0x20, 0x10, 0x00,
0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x64, 0x61, 0x74, 0x61, 0x00, 0x00, 0x00, 0x00};
memcpy(&header, wav_header, sizeof(header));
header.channels = num_channel_;
header.bit = bits_per_sample_;
header.sample_rate = sample_rate_;
header.data_size = num_sample_ * num_channel_ * (bits_per_sample_ / 8);
header.size = sizeof(header) - 8 + header.data_size;
header.bytes_per_second =
sample_rate_ * num_channel_ * (bits_per_sample_ / 8);
header.block_size = num_channel_ * (bits_per_sample_ / 8);
fwrite(&header, 1, sizeof(header), fp);
for (int i = 0; i < num_sample_; ++i) {
for (int j = 0; j < num_channel_; ++j) {
switch (bits_per_sample_) {
case 8: {
char sample = static_cast<char>(data_[i * num_channel_ + j]);
fwrite(&sample, 1, sizeof(sample), fp);
break;
}
case 16: {
int16_t sample = static_cast<int16_t>(data_[i * num_channel_ + j]);
fwrite(&sample, 1, sizeof(sample), fp);
break;
}
case 32: {
int sample = static_cast<int>(data_[i * num_channel_ + j]);
fwrite(&sample, 1, sizeof(sample), fp);
break;
}
}
}
}
fclose(fp);
}
private:
const float* data_;
int num_sample_; // total float points in data_
int num_channel_;
int sample_rate_;
int bits_per_sample_;
};
} // namespace wenet
#endif // FRONTEND_WAV_H_
#pragma once
inline int getInputName(Ort::Session* session, string& inputName,int nIndex=0) {
size_t numInputNodes = session->GetInputCount();
if (nIndex == -1)
return numInputNodes;
if (numInputNodes > nIndex) {
Ort::AllocatorWithDefaultOptions allocator;
{
char* t = session->GetInputName(nIndex, allocator);
inputName = t;
allocator.Free(t);
}
}
}
inline int getInputNameAll(Ort::Session* session, vector<string>&InputNames)
{
size_t numInputNodes = session->GetInputCount();
for(int i=0; i< numInputNodes; i++)
{
Ort::AllocatorWithDefaultOptions allocator;
{
char* t = session->GetInputName(i, allocator);
InputNames.push_back( t);
allocator.Free(t);
}
}
return numInputNodes;
}
inline int getOutputName(Ort::Session* session, string& outputName, int nIndex = 0) {
size_t numOutputNodes = session->GetOutputCount();
if (nIndex == -1)
return numOutputNodes;
if (numOutputNodes > nIndex) {
Ort::AllocatorWithDefaultOptions allocator;
{
char* t = session->GetOutputName(nIndex, allocator);
outputName = t;
allocator.Free(t);
}
}
return numOutputNodes;
}
inline int getOutputNameAll(Ort::Session* session, vector<string>& OutputNames) {
size_t numOutputNodes = session->GetOutputCount();
for (int i = 0; i < numOutputNodes; i++)
{
Ort::AllocatorWithDefaultOptions allocator;
{
char* t = session->GetOutputName(i, allocator);
OutputNames.push_back(t);
allocator.Free(t);
}
}
return numOutputNodes;
}
\ No newline at end of file
#pragma once
#ifdef WIN32
#ifdef D_RAPIDASR_API_EXPORT
#define _RAPIDASRAPI __declspec(dllexport)
#else
#define _RAPIDASRAPI __declspec(dllimport)
#endif
#else
#define _RAPIDASRAPI
#endif
#ifdef __cplusplus
extern "C" {
#endif
typedef void* RAPIDASR_HANDLE;
typedef void* RAPIDASR_RESULT;
typedef unsigned char RAPIDASR_BOOL;
#define RAPIDASR_TRUE 1
#define RAPIDASR_FALSE 0
#define RP_DEFAULT_THREAD_NUM 4
typedef enum
{
RPASRM_CTC_GREEDY_SEARCH=0,
RPASRM_CTC_RPEFIX_BEAM_SEARCH = 1,
RPASRM_ATTENSION_RESCORING = 2,
}RAPIDASR_MODE;
// APIs for qmasr
_RAPIDASRAPI RAPIDASR_HANDLE RpASR_init(const char* szModelDir, int nThread);
_RAPIDASRAPI RAPIDASR_RESULT RpASRRecogBuffer(RAPIDASR_HANDLE handle, const char* szBuf, int nLen, RAPIDASR_MODE Mode);
_RAPIDASRAPI RAPIDASR_RESULT RpASRRecogFile(RAPIDASR_HANDLE handle, const char* szWavfile, RAPIDASR_MODE Mode);
_RAPIDASRAPI const char* RpASRGetResult(RAPIDASR_RESULT Result,int nIndex);
_RAPIDASRAPI const int RpASRGetRetNumber(RAPIDASR_RESULT Result);
_RAPIDASRAPI void RpASRFreeResult(RAPIDASR_RESULT Result);
_RAPIDASRAPI void RpASR_Uninit(RAPIDASR_HANDLE Handle);
#ifdef __cplusplus
}
#endif
\ No newline at end of file
#pragma once
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <vector>
#include <iostream>
#include <sstream>
using namespace std;
#include "yaml-cpp/yaml.h"
#include "feat/fbank.h"
#include "feat/feature_pipeline.h"
#include "feat/wav.h"
#include "feat/fft.h"
#include "ctc_beam_search_decoder.h"
#include "onnxruntime/onnxruntime_run_options_config_keys.h"
#include "onnxruntime/onnxruntime_cxx_api.h"
// user defined headers
#include "rpdatadef.h"
#include "commfunc.h"
#include "librpasrapi.h"
#include "rapidasr.h"
#pragma once
class CQmASRRecog
{
private:
Ort::Session* m_session_encoder=nullptr;
Ort::Session* m_session_decoder = nullptr;
Ort::Env envDecoder = Ort::Env(ORT_LOGGING_LEVEL_ERROR, "QmASR_decoder");
Ort::Env envEncoder = Ort::Env(ORT_LOGGING_LEVEL_ERROR, "QmASR_encoder");
Ort::SessionOptions sessionOptions = Ort::SessionOptions();
string m_strConfig, m_strDict;
vector<string> m_vecEncInputName, m_vecEncOutputName, m_vecDecInputName, m_vecDecOutputName;
vector<const char *> m_strEncInputName, m_strEncOutputName, m_strDecInputName, m_strDecOutputName;
bool m_bIsLoaded = false;
vector<std::string> m_Vocabulary;
float m_reverse_weight=0.f; // train.yamlжȡ
public :
CQmASRRecog(const char * szModelDir, int nThread);
CQmASRRecog(const char* szEncoder, const char* szDecoder, const char* szDict, const char* szConfig, int nThread);
~CQmASRRecog();
bool LoadModel(const char* szEncoder, const char* szDecoder, const char* szDict, const char* szConfig, int nNumThread);
bool LoadModel(const char* szModelDir, int nNumThread);
bool IsLoaded();
int ExtractFeature(vector<float>& wav, std::vector<std::vector<float>>& feats ,wenet::FeaturePipelineConfig& config); // ȡ
PRAPIDASR_RECOG_RESULT DoRecognize(vector<vector<float>>& feats, RAPIDASR_MODE Mode = RPASRM_CTC_GREEDY_SEARCH);
};
\ No newline at end of file
#pragma once
#define QM_ENCODER_MODEL "encoder.onnx"
#define QM_DECODER_MODEL "decoder.onnx"
#define QM_CONFIG_FILE "train.yaml"
#define QM_DICT_FILE "words.txt"
#define IGNORE_ID -1
#define QM_FEATURE_DIMENSION 80
#define QM_DEFAULT_SAMPLE_RATE 16000
#ifdef WIN32
#define OS_SEP "\\"
#else
#define OS_SEP "/"
#endif
typedef struct
{
void* p;
} RAPIDASR_CONTEXT,*PRAPIDASR_CONTEXT;
typedef enum
{
QAC_ERROR=-1,
QAC_OK=0,
}RAPIDASR_CODE;
typedef struct
{
RAPIDASR_CODE Result;
vector<string> Strings;
} RAPIDASR_RECOG_RESULT,*PRAPIDASR_RECOG_RESULT;
\ No newline at end of file
beam_size: 10
ctc_weight: -1.0
fp16: true
reverse_weight: -1.0
accum_grad: 16
cmvn_file: exp/conformer_bidecoder/global_cmvn
dataset_conf:
batch_conf:
batch_size: 32
batch_type: static
fbank_conf:
dither: 1.0
frame_length: 25
frame_shift: 10
num_mel_bins: 80
filter_conf:
max_length: 1200
min_length: 10
token_max_length: 100
token_min_length: 1
resample_conf:
resample_rate: 16000
shuffle: true
shuffle_conf:
shuffle_size: 1500
sort: true
sort_conf:
sort_size: 1000
spec_aug: true
spec_aug_conf:
max_f: 30
max_t: 50
num_f_mask: 2
num_t_mask: 2
speed_perturb: false
decoder: bitransformer
decoder_conf:
attention_heads: 8
dropout_rate: 0.1
linear_units: 2048
num_blocks: 3
positional_dropout_rate: 0.1
r_num_blocks: 3
self_attention_dropout_rate: 0.0
src_attention_dropout_rate: 0.0
encoder: conformer
encoder_conf:
activation_type: swish
attention_dropout_rate: 0.0
attention_heads: 8
cnn_module_kernel: 15
cnn_module_norm: layer_norm
dropout_rate: 0.1
input_layer: conv2d
linear_units: 2048
normalize_before: true
num_blocks: 12
output_size: 512
pos_enc_layer_type: rel_pos
positional_dropout_rate: 0.1
selfattention_layer_type: rel_selfattn
use_cnn_module: true
grad_clip: 5
input_dim: 80
is_json_cmvn: true
log_interval: 100
max_epoch: 50
model_conf:
ctc_weight: 0.3
length_normalized_loss: false
lsm_weight: 0.1
reverse_weight: 0.3
optim: adam
optim_conf:
lr: 0.001
output_dim: 5537
scheduler: warmuplr
scheduler_conf:
warmup_steps: 5000
beam_size: 10
ctc_weight: -1.0
fp16: true
reverse_weight: -1.0
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment