Commit c007ba1a authored by sunzhq2's avatar sunzhq2 Committed by xuxo
Browse files

update

parents
Pipeline #3464 failed with stages
in 0 seconds
# import onnxruntime as ort
# import numpy as np
# # 直接加载ONNX模型查看输入要求
# model_path = "/root/.cache/espnet_onnx/transformer_lm/full/default_encoder.onnx"
# try:
# sess = ort.InferenceSession(model_path, providers=['CPUExecutionProvider'])
# input_details = sess.get_inputs()
# print("ONNX模型输入要求:")
# for inp in input_details:
# print(f" 名称: {inp.name}, 形状: {inp.shape}, 类型: {inp.type}")
# except Exception as e:
# print(f"加载模型失败: {e}")
# import os
# import onnx
# import onnxruntime as ort
# import numpy as np
# # 检查ONNX模型文件
# model_path = "/root/.cache/espnet_onnx/transformer_lm/full/default_encoder.onnx"
# print("检查模型文件...")
# if os.path.exists(model_path):
# model_size = os.path.getsize(model_path)
# print(f"模型大小: {model_size} bytes")
# # 加载模型查看结构
# try:
# model = onnx.load(model_path)
# print(f"模型IR版本: {model.ir_version}")
# print(f"生产者: {model.producer_name} {model.producer_version}")
# print(f"模型输入: {len(model.graph.input)} 个")
# print(f"模型输出: {len(model.graph.output)} 个")
# print(f"节点数量: {len(model.graph.node)}")
# # 查找Where节点
# where_nodes = [node for node in model.graph.node if node.op_type == "Where"]
# print(f"找到 {len(where_nodes)} 个Where节点")
# for i, node in enumerate(where_nodes[:3]): # 只显示前3个
# print(f" Where节点 {i}: {node.name}")
# print(f" 输入: {[input for input in node.input]}")
# print(f" 输出: {[output for output in node.output]}")
# except Exception as e:
# print(f"加载模型失败: {e}")
# else:
# print(f"模型文件不存在: {model_path}")
import onnxruntime as ort
import numpy as np
model_path = "/root/.cache/espnet_onnx/transformer_lm/full/default_encoder.onnx"
print("=== 检查模型实际输入 ===")
sess = ort.InferenceSession(model_path, providers=['CPUExecutionProvider'])
# 详细检查输入
print("模型输入详细信息:")
for inp in sess.get_inputs():
print(f"\n输入: {inp.name}")
print(f" 形状: {inp.shape}")
print(f" 类型: {inp.type}")
# 打印每个维度
for i, dim in enumerate(inp.shape):
print(f" 维度[{i}]: {dim}")
# 尝试不同的输入名称
print("\n=== 尝试不同的输入名称 ===")
# 创建测试数据
batch_size = 1
time_frames = 100
n_mels = 80
dummy_feats = np.random.randn(batch_size, time_frames, n_mels).astype(np.float32)
# 获取所有可能的输入名称
input_names = [inp.name for inp in sess.get_inputs()]
print(f"模型接受的输入名称: {input_names}")
# 尝试所有可能的输入组合
test_inputs = []
# 常见的输入名称模式
common_names = [
'feats', 'speech', 'input', 'x',
'feats_length', 'speech_lengths', 'lengths', 'ilens'
]
for name in input_names:
print(f"\n测试输入: {name}")
# 根据名称猜测类型
if 'length' in name.lower() or 'lens' in name.lower():
# 可能是长度输入
dummy_input = np.array([time_frames], dtype=np.int64)
else:
# 可能是特征输入
dummy_input = dummy_feats
try:
outputs = sess.run(None, {name: dummy_input})
print(f" 成功! 使用单一输入: {name}")
print(f" 输出数量: {len(outputs)}")
for i, out in enumerate(outputs):
print(f" 输出{i}: {out.shape}")
break
except:
print(f" 失败: 单一输入{name}")
# 尝试多输入
if len(input_names) > 1:
print(f"\n尝试多输入组合: {input_names}")
# 准备输入字典
input_dict = {}
for name in input_names:
if 'length' in name.lower() or 'lens' in name.lower():
input_dict[name] = np.array([time_frames], dtype=np.int64)
else:
input_dict[name] = dummy_feats
try:
outputs = sess.run(None, input_dict)
print(f" 成功! 使用多输入")
for i, out in enumerate(outputs):
print(f" 输出{i}: {out.shape}")
except Exception as e:
print(f" 失败: {e}")
\ No newline at end of file
#!/usr/bin/env python3
"""
将已导出的ONNX模型转换为支持指定batch_size的模型
"""
import onnx
import onnx.shape_inference
import argparse
import os
def modify_onnx_batch_size(model_path, output_path, target_batch_size=24):
"""修改ONNX模型的batch_size
Args:
model_path: 输入模型路径
output_path: 输出模型路径
target_batch_size: 目标batch_size,-1表示动态batch,其他值表示固定batch
"""
# 加载模型
model = onnx.load(model_path)
# 获取模型输入信息
print(f"原始模型输入信息:")
for i, input_info in enumerate(model.graph.input):
print(f" Input {i}: {input_info.name}")
if input_info.type.tensor_type.HasField("shape"):
shape = input_info.type.tensor_type.shape
print(f" 原始形状: ", end="")
for j, dim in enumerate(shape.dim):
if dim.HasField("dim_value"):
print(f"{dim.dim_value}", end=" ")
elif dim.HasField("dim_param"):
print(f"{dim.dim_param}", end=" ")
print()
# 修改输入形状
for input_info in model.graph.input:
if input_info.type.tensor_type.HasField("shape"):
shape = input_info.type.tensor_type.shape
# 修改第一个维度(batch_size)
if len(shape.dim) > 0:
if target_batch_size == -1:
# 动态batch_size模式
if shape.dim[0].HasField("dim_value"):
shape.dim[0].dim_param = "batch_size"
shape.dim[0].ClearField("dim_value")
elif shape.dim[0].HasField("dim_param"):
# 已经是动态维度,保持不变
pass
else:
# 其他情况,设为动态维度
shape.dim[0].dim_param = "batch_size"
else:
# 固定batch_size模式
shape.dim[0].dim_value = target_batch_size
if shape.dim[0].HasField("dim_param"):
shape.dim[0].ClearField("dim_param")
# 修改输出形状
for output_info in model.graph.output:
if output_info.type.tensor_type.HasField("shape"):
shape = output_info.type.tensor_type.shape
if len(shape.dim) > 0:
if target_batch_size == -1:
# 动态batch_size模式
if shape.dim[0].HasField("dim_value"):
shape.dim[0].dim_param = "batch_size"
shape.dim[0].ClearField("dim_value")
else:
# 固定batch_size模式
if shape.dim[0].HasField("dim_value"):
shape.dim[0].dim_value = target_batch_size
elif shape.dim[0].HasField("dim_param"):
shape.dim[0].ClearField("dim_param")
shape.dim[0].dim_value = target_batch_size
# 运行形状推断
model = onnx.shape_inference.infer_shapes(model)
# 保存修改后的模型
onnx.save(model, output_path)
print(f"模型已保存到: {output_path}")
print(f"目标batch_size: {'动态' if target_batch_size == -1 else target_batch_size}")
# 验证修改结果
print(f"修改后模型输入信息:")
model_modified = onnx.load(output_path)
for i, input_info in enumerate(model_modified.graph.input):
print(f" Input {i}: {input_info.name}")
if input_info.type.tensor_type.HasField("shape"):
shape = input_info.type.tensor_type.shape
print(f" 修改后形状: ", end="")
for j, dim in enumerate(shape.dim):
if dim.HasField("dim_value"):
print(f"{dim.dim_value}", end=" ")
elif dim.HasField("dim_param"):
print(f"{dim.dim_param}", end=" ")
print()
def batch_convert_models(input_dir, output_dir, target_batch_size=24):
"""批量转换目录中的所有ONNX模型"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
onnx_files = [f for f in os.listdir(input_dir) if f.endswith('.onnx')]
print(f"找到 {len(onnx_files)} 个ONNX文件:")
for file in onnx_files:
print(f" - {file}")
for file in onnx_files:
input_path = os.path.join(input_dir, file)
output_path = os.path.join(output_dir, file)
print(f"\n正在转换: {file}")
try:
modify_onnx_batch_size(input_path, output_path, target_batch_size)
print(f"✓ {file} 转换成功")
except Exception as e:
print(f"✗ {file} 转换失败: {e}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='修改ONNX模型的batch_size')
parser.add_argument('--input', type=str, required=True, help='输入ONNX文件或目录路径')
parser.add_argument('--output', type=str, required=True, help='输出路径')
parser.add_argument('--batch_size', type=int, default=24, help='目标batch_size(-1表示动态batch)')
parser.add_argument('--batch_mode', action='store_true', help='批量模式,处理目录中的所有ONNX文件')
args = parser.parse_args()
if args.batch_mode:
# 批量模式
batch_convert_models(args.input, args.output, args.batch_size)
else:
# 单个文件模式
modify_onnx_batch_size(args.input, args.output, args.batch_size)
\ No newline at end of file
input_dir=/home/sunzhq/workspace/yidong-infer/conformer/onnx_models
output_dir=/home/sunzhq/workspace/yidong-infer/conformer/onnx_models_batch24
rm -rf ${output_dir}
mkdir -p ${output_dir}
cp -r ${input_dir}/* ${output_dir}
rm -rf ${output_dir}/transformer_lm/full/*
python convert_onnx_batch_size.py \
--input ${input_dir}/transformer_lm/full \
--output ${output_dir}/transformer_lm/full/ \
--batch_size 24 \
--batch_mode
\ No newline at end of file
import librosa
import os
sr=16000
audio_dir = "/data/datasets/1/data_aishell/wav/test"
dir_list = os.listdir(audio_dir)
tmp = []
# print(dir_list)
for index in dir_list:
audio_paths = os.listdir(os.path.join(audio_dir,index))
for audio_path in audio_paths:
y, sr = librosa.load(os.path.join(audio_dir,index,audio_path), sr=sr)
if len(y)/sr == 14.6999375:
print(os.path.join(audio_dir,index,audio_path))
tmp.append(len(y)/sr)
# print(sorted(tmp))
# print(audio_paths)
# y, sr = librosa.load(audio_path, sr=16000)
# print(f"音频总长: {len(y)/sr:.2f}秒 ({len(y)}采样点)")
\ No newline at end of file
[report]
omit =
tools/*
# Regexes for lines to exclude from consideration
exclude_lines =
# Have to re-enable the standard pragma
pragma: no cover
if __name__ == "__main__":
if __name__ == '__main__':
@abstractmethod
raise NotImplementedError
# ignored folders
doc/
src/
egs/
test/
tools/kaldi
tools/kaldi-io-for-python/
tools/kaldi_github/
tools/miniconda.sh
tools/nkf/
tools/venv/
tools/warp-ctc/
tools/warp-transducer/
tools/chainer_ctc/
tools/subword-nmt/
.pytest_cache
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: Bug
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**Basic environments:**
- OS information: [e.g., Linux 4.9.0-11-amd64 #1 SMP Debian 4.9.189-3+deb9u2 (2019-11-11) x86_64]
- python version: [e.g. 3.7.3 (default, Mar 27 2019, 22:11:17) [GCC 7.3.0]]
- espnet version: [e.g. espnet 0.8.0]
- Git hash [e.g. b88e89fc7246fed4c2842b55baba884fe1b4ecc2]
- Commit date [e.g. Tue Sep 1 09:32:54 2020 -0400]
- pytorch version [e.g. pytorch 1.4.0]
You can obtain them by the following command
```
cd <espnet-root>/tools
. ./activate_python.sh
echo "- OS information: `uname -mrsv`"
python3 << EOF
import sys, espnet, torch
pyversion = sys.version.replace('\n', ' ')
print(f"""- python version: \`{pyversion}\`
- espnet version: \`espnet {espnet.__version__}\`
- pytorch version: \`pytorch {torch.__version__}\`""")
EOF
cat << EOF
- Git hash: \`$(git rev-parse HEAD)\`
- Commit date: \`$(git log -1 --format='%cd')\`
EOF
```
**Environments from `torch.utils.collect_env`:**
e.g.,
```
Collecting environment information...
PyTorch version: 1.4.0
Is debug build: No
CUDA used to build PyTorch: 10.0
OS: CentOS Linux release 7.5.1804 (Core)
GCC version: (GCC) 7.2.0
CMake version: version 2.8.12.2
Python version: 3.7
Is CUDA available: Yes
CUDA runtime version: 10.0.130
GPU models and configuration:
GPU 0: TITAN RTX
GPU 1: TITAN RTX
GPU 2: TITAN RTX
GPU 3: TITAN RTX
Nvidia driver version: 440.33.01
cuDNN version: Could not collect
Versions of relevant libraries:
[pip3] numpy==1.18.5
[pip3] pytorch-ranger==0.1.1
[pip3] pytorch-wpe==0.0.0
[pip3] torch==1.4.0
[pip3] torch-complex==0.1.1
[pip3] torch-optimizer==0.0.1a14
[pip3] torchaudio==0.4.0
[pip3] warprnnt-pytorch==0.1
[conda] blas 1.0 mkl
[conda] mkl 2020.1 217
[conda] mkl-service 2.3.0 py37he904b0f_0
[conda] mkl_fft 1.1.0 py37h23d657b_0
[conda] mkl_random 1.1.1 py37h0573a6f_0
[conda] pytorch 1.4.0 py3.7_cuda10.0.130_cudnn7.6.3_0 pytorch
[conda] pytorch-ranger 0.1.1 pypi_0 pypi
[conda] pytorch-wpe 0.0.0 pypi_0 pypi
[conda] torch-complex 0.1.1 pypi_0 pypi
[conda] torch-optimizer 0.0.1a14 pypi_0 pypi
[conda] torchaudio 0.4.0 pypi_0 pypi
[conda] warprnnt-pytorch 0.1 pypi_0 pypi
```
You can obtain them by the following command
```
cd <espnet-root>/tools
. ./activate_python.sh
python3 -m torch.utils.collect_env
```
**Task information:**
- Task: [e.g., ASR, TTS, ST, ENH]
- Recipe: [e.g. librispeech]
- ESPnet1 or ESPnet2
**To Reproduce**
Steps to reproduce the behavior:
1. move to a recipe directory, e.g., `cd egs/librispeech/asr1`
2. execute `run.sh` with specific arguments, e.g., `run.sh --stage 3 --ngp 1`
3. specify the error log, e.g., `exp/xxx/yyy.log`
**Error logs**
Paste the error logs. If applicable, add screenshots to help explain your problem.
---
name: Installation issue template
about: Create a report for installation issues
title: ''
labels: Installation
assignees: ''
---
**Describe the issue**
A clear and concise description of what the issue is.
Please check https://espnet.github.io/espnet/installation.html in advance.
**Show the `check_install.py` status by using the following command**
```
cd <espnet-root>/tools
. ./activate_python.sh; python3 check_install.py
```
**Basic environments:**
- OS information: [e.g., Linux 4.9.0-11-amd64 #1 SMP Debian 4.9.189-3+deb9u2 (2019-11-11) x86_64]
- python version: [e.g. 3.7.3 (default, Mar 27 2019, 22:11:17) [GCC 7.3.0]]
- espnet version: [e.g. espnet 0.8.0]
- Git hash [e.g. b88e89fc7246fed4c2842b55baba884fe1b4ecc2]
- Commit date [e.g. Tue Sep 1 09:32:54 2020 -0400]
- pytorch version [e.g. pytorch 1.4.0]
You can obtain them by the following command
```
cd <espnet-root>/tools
. ./activate_python.sh
echo "- OS information: `uname -mrsv`"
python3 << EOF
import sys, espnet, torch
pyversion = sys.version.replace('\n', ' ')
print(f"""- python version: \`{pyversion}\`
- espnet version: \`espnet {espnet.__version__}\`
- pytorch version: \`pytorch {torch.__version__}\`""")
EOF
cat << EOF
- Git hash: \`$(git rev-parse HEAD)\`
- Commit date: \`$(git log -1 --format='%cd')\`
EOF
```
**Environments from `torch.utils.collect_env`:**
e.g.,
```
Collecting environment information...
PyTorch version: 1.4.0
Is debug build: No
CUDA used to build PyTorch: 10.0
OS: CentOS Linux release 7.5.1804 (Core)
GCC version: (GCC) 7.2.0
CMake version: version 2.8.12.2
Python version: 3.7
Is CUDA available: Yes
CUDA runtime version: 10.0.130
GPU models and configuration:
GPU 0: TITAN RTX
GPU 1: TITAN RTX
GPU 2: TITAN RTX
GPU 3: TITAN RTX
Nvidia driver version: 440.33.01
cuDNN version: Could not collect
Versions of relevant libraries:
[pip3] numpy==1.18.5
[pip3] pytorch-ranger==0.1.1
[pip3] pytorch-wpe==0.0.0
[pip3] torch==1.4.0
[pip3] torch-complex==0.1.1
[pip3] torch-optimizer==0.0.1a14
[pip3] torchaudio==0.4.0
[pip3] warprnnt-pytorch==0.1
[conda] blas 1.0 mkl
[conda] mkl 2020.1 217
[conda] mkl-service 2.3.0 py37he904b0f_0
[conda] mkl_fft 1.1.0 py37h23d657b_0
[conda] mkl_random 1.1.1 py37h0573a6f_0
[conda] pytorch 1.4.0 py3.7_cuda10.0.130_cudnn7.6.3_0 pytorch
[conda] pytorch-ranger 0.1.1 pypi_0 pypi
[conda] pytorch-wpe 0.0.0 pypi_0 pypi
[conda] torch-complex 0.1.1 pypi_0 pypi
[conda] torch-optimizer 0.0.1a14 pypi_0 pypi
[conda] torchaudio 0.4.0 pypi_0 pypi
[conda] warprnnt-pytorch 0.1 pypi_0 pypi
```
You can obtain them by the following command
```
cd <espnet-root>/tools
. ./activate_python.sh
python3 -m torch.utils.collect_env
```
**To Reproduce**
Steps to reproduce the behavior by showing us the specific installation commands with their arguments, e.g.,
```
cd <espnet-root>/tools
make TH_VERSION=1.3.1
```
**Error logs**
Paste the error logs. If applicable, add screenshots to help explain your problem.
# Number of days of inactivity before an issue becomes stale
daysUntilStale: 45
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 30
# Issues with these labels will never be considered stale
exemptLabels:
- Roadmap
- Bug
# Label to use when marking an issue as stale
staleLabel: Stale
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions.
unmarkComment: false
# Comment to post when closing a stale issue. Set to `false` to disable
closeComment: >
This issue is closed. Please re-open if needed.
name: Cancel
on:
workflow_run:
workflows: ["CI", "centos7", "debian11", "doc"]
types:
- requested
jobs:
cancel:
runs-on: ubuntu-latest
steps:
- uses: styfle/cancel-workflow-action@0.9.1
with:
workflow_id: ${{ github.event.workflow.id }}
name: centos7
on:
push:
branches:
- master
pull_request:
branches:
- master
jobs:
test_centos7:
runs-on: ubuntu-latest
container:
image: centos:7
env:
ESPNET_PYTHON_VERSION: 3.7
# NOTE: 1.9.0 raised libstdc++ version errors in pyworld.
# ImportError: /lib64/libstdc++.so.6: version `CXXABI_1.3.8' not found
# (required by /__w/espnet/espnet/tools/venv/envs/espnet/lib/python3.6/site-packages/pyworld/pyworld.cpython-36m-x86_64-linux-gnu.so)
# NOTE(kamo): The issue doens't exist for python3.7?
TH_VERSION: 1.13.1
CHAINER_VERSION: 6.0.0
USE_CONDA: true
CC: /opt/rh/devtoolset-7/root/usr/bin/gcc
CXX: /opt/rh/devtoolset-7/root/usr/bin/g++
MAKE: /opt/rh/devtoolset-7/root/usr/bin/make
# To avoid UnicodeEncodeError for python<=3.6
LC_ALL: en_US.UTF-8
steps:
- uses: actions/checkout@master
- name: check OS
run: cat /etc/os-release
- name: install dependencies
run: |
# NOTE(kamo): cmake sndfile will be download using anacond:
yum install -y git centos-release-scl bzip2 wget which unzip bc patch
yum-config-manager --enable rhel-server-rhscl-7-rpms
yum install -y devtoolset-7-gcc-c++ devtoolset-7-make sox ncurses-devel libtool automake autoconf
localedef -f UTF-8 -i en_US en_US
- name: install espnet
run: |
# NOTE(karita) this line exited 1
# source scl_source enable devtoolset-7
PATH="/opt/rh/devtoolset-7/root/usr/bin:${PATH:-}"
./ci/install.sh
- name: test shell
run: |
PATH="/opt/rh/devtoolset-7/root/usr/bin:${PATH:-}"
./ci/test_shell.sh
- name: test python
run: |
PATH="/opt/rh/devtoolset-7/root/usr/bin:${PATH:-}"
./ci/test_python.sh
name: Check kaldi scripts
on:
push:
branches:
- master
pull_request:
branches:
- master
jobs:
check_kaldi_symlinks:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- run: ci/check_kaldi_symlinks.sh
name: CI
on:
push:
branches:
- master
pull_request:
branches:
- master
jobs:
linter_and_test:
runs-on: ${{ matrix.os }}
strategy:
max-parallel: 20
matrix:
os: [ubuntu-latest]
python-version: [3.7, 3.8, 3.9]
pytorch-version: [1.10.2, 1.11.0, 1.12.1, 1.13.1]
chainer-version: [6.0.0]
# NOTE(kamo): Conda is tested by Circle-CI
use-conda: [false]
include:
- os: ubuntu-latest
python-version: "3.10"
pytorch-version: 1.13.1
chainer-verssion: 6.0.0
use-conda: false
steps:
- uses: actions/checkout@master
- uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ matrix.pytorch-version }}-${{ hashFiles('**/setup.py') }}-${{ hashFiles('**/Makefile') }}
- uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
architecture: 'x64'
- name: install dependencies
run: |
sudo apt-get update -qq
# NOTE(kamo): g++-7 doesn't exist in ubuntu-latest
sudo apt-get install -qq -y cmake libsndfile1-dev bc sox
- name: install espnet
env:
ESPNET_PYTHON_VERSION: ${{ matrix.python-version }}
TH_VERSION: ${{ matrix.pytorch-version }}
CHAINER_VERSION: ${{ matrix.chainer-version }}
USE_CONDA: ${{ matrix.use-conda }}
run: |
./ci/install.sh
- name: test shell
run: |
./ci/test_shell.sh
- name: test python
run: ./ci/test_python.sh
- uses: codecov/codecov-action@v2
with:
flags: test_python
- name: coverage erase
run: |
source tools/activate_python.sh
coverage erase
- name: install kaldi
run: |
./ci/install_kaldi.sh
- name: test utils
run: ./ci/test_utils.sh
- uses: codecov/codecov-action@v2
with:
flags: test_utils
- name: coverage erase
run: |
source tools/activate_python.sh
coverage erase
- name: test espnet1 integration
run: ./ci/test_integration_espnet1.sh
- uses: codecov/codecov-action@v2
with:
flags: test_integration_espnet1
- name: coverage erase
run: |
source tools/activate_python.sh
coverage erase
- name: test espnet2 integration
run: ./ci/test_integration_espnet2.sh
- uses: codecov/codecov-action@v2
with:
flags: test_integration_espnet2
name: debian11
on:
push:
branches:
- master
pull_request:
branches:
- master
jobs:
test_debian11:
runs-on: ubuntu-latest
container:
image: debian:11
env:
ESPNET_PYTHON_VERSION: 3.7
TH_VERSION: 1.13.1
CHAINER_VERSION: 6.0.0
USE_CONDA: true
# To avoid UnicodeEncodeError for python<=3.6
LC_ALL: en_US.UTF-8
steps:
- uses: actions/checkout@master
- name: check OS
run: cat /etc/os-release
- name: install dependencies
run: |
apt-get update -qq
# NOTE(kamo): cmake sndfile will be download using anacond:
apt-get install -qq -y \
build-essential git unzip bzip2 wget curl bc locales make sox \
libncurses5-dev automake libtool pkg-config
localedef -f UTF-8 -i en_US en_US
- name: install espnet
run: ./ci/install.sh
- name: test shell
run: ./ci/test_shell.sh
- name: test python
run: ./ci/test_python.sh
- name: install kaldi
run: ./ci/install_kaldi.sh
- name: test utils
run: ./ci/test_utils.sh
- name: test espnet1 integration
run: ./ci/test_integration_espnet1.sh
- name: test espnet2 integration
run: ./ci/test_integration_espnet2.sh
name: doc
on:
push:
branches:
- master
pull_request:
branches:
- master
jobs:
linter_and_test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- uses: actions/cache@v1
with:
path: ~/.cache/pip
key: pip-${{ hashFiles('**/setup.py') }}
- uses: actions/setup-python@v1
with:
python-version: 3.8
architecture: 'x64'
- name: check OS
run: cat /etc/os-release
- name: install dependencies
run: |
sudo apt-get update -qq
sudo apt-get install -qq -y cmake python3-dev git pandoc ffmpeg bc
- name: install espnet
env:
ESPNET_PYTHON_VERSION: 3.8
TH_VERSION: 1.13.1
CHAINER_VERSION: 6.0.0
USE_CONDA: false
run: ./ci/install.sh
- name: generate doc
run: ./ci/doc.sh
- name: deploy
if: github.ref == 'refs/heads/master'
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: doc/build
name: docker-builder
on:
pull_request:
types: [closed]
branches:
- master
paths:
- 'tools/**'
- setup.py
jobs:
docker:
runs-on: ubuntu-latest
if: github.event.pull_request.merged == true
steps:
- uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push CPU container
run: |
cd docker
docker build --build-arg FROM_TAG=runtime-latest \
-f prebuilt/devel.dockerfile \
--target devel \
-t espnet/espnet:cpu-latest .
docker push espnet/espnet:cpu-latest
- name: Build and push GPU container
run: |
cd docker
docker build --build-arg FROM_TAG=cuda-latest \
--build-arg CUDA_VER=11.1 \
-f prebuilt/devel.dockerfile \
--target devel \
-t espnet/espnet:gpu-latest .
docker push espnet/espnet:gpu-latest
on: [push]
jobs:
paper:
runs-on: ubuntu-latest
name: Paper Draft
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Build draft PDF
uses: openjournals/openjournals-draft-action@master
with:
journal: joss
# This should be the path to the paper within your repo.
paper-path: doc/paper/espnet-se++/paper.md
- name: Upload
uses: actions/upload-artifact@v1
with:
name: paper
# This is the output path where Pandoc will write the compiled
# PDF. Note, this should be the same directory as the input
# paper.md
path: doc/paper/espnet-se++/paper.pdf
name: MacOS
on:
push:
branches:
- master
pull_request:
branches:
- master
jobs:
test_macos:
runs-on: macOS-latest
strategy:
matrix:
python-version: ["3.10"]
pytorch-version: [1.13.1]
use-conda: [true, false]
steps:
- uses: actions/checkout@master
- uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ matrix.pytorch-version }}-${{ hashFiles('**/setup.py') }}-${{ hashFiles('**/Makefile') }}
- uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
architecture: 'x64'
- name: install espnet
env:
ESPNET_PYTHON_VERSION: ${{ matrix.python-version }}
TH_VERSION: ${{ matrix.pytorch-version }}
CHAINER_VERSION: 6.0.0
USE_CONDA: ${{ matrix.use-conda }}
# FIXME(kamo): clang is used by default, but I don't know how to use "-fopenmp" with clang
WITH_OMP: OFF
# NOTE(kamo): If it's hard to build with clang, please consider using gcc
# However, I couldn't build pysptk with gcc on MacOS
# CC: /usr/local/bin/gcc-11
# CXX: /usr/local/bin/g++-11
run: |
./ci/install.sh
# This workflows will upload a Python Package using Twine when a release is created
# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
name: Upload Python Package
on:
push:
tags:
- 'v*'
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v1
with:
python-version: '3.8'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install setuptools wheel twine
- name: Build and publish
env:
TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
run: |
python setup.py sdist bdist_wheel
twine upload dist/*
name: Test import espnet
on:
push:
branches:
- master
pull_request:
branches:
- master
jobs:
test_import:
runs-on: ${{ matrix.os }}
strategy:
max-parallel: 20
matrix:
os: [ubuntu-latest]
python-version: ["3.10"]
pytorch-version: [1.13.1]
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v1
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
sudo apt-get install -qq -y libsndfile1-dev
python3 -m pip install --upgrade pip setuptools wheel
- name: Install espnet with the least requirement
env:
TH_VERSION: ${{ matrix.pytorch-version }}
run: |
python3 -m pip install -U numba
./tools/installers/install_torch.sh false ${TH_VERSION} CPU
./tools/installers/install_chainer.sh CPU
python3 setup.py bdist_wheel
python3 -m pip install dist/espnet-*.whl
# log
python3 -m pip freeze
- name: Import all modules (Try1)
run: |
python3 ./ci/test_import_all.py
- name: Install espnet with the full requirement
env:
TH_VERSION: ${{ matrix.pytorch-version }}
run: |
python3 -m pip install "$(ls dist/espnet-*.whl)[all]"
# log
python3 -m pip freeze
- name: Import all modules (Try2)
run: |
python3 ./ci/test_import_all.py
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment