inference.sh 1 KB
Newer Older
oahzxl's avatar
oahzxl committed
1
2
3
4
5
# add '--gpus [N]' to use N gpus for inference
# add '--enable_workflow' to use parallel workflow for data processing
# add '--use_precomputed_alignments [path_to_alignments]' to use precomputed msa
# add '--chunk_size [N]' to use chunk to reduce peak memory
# add '--inplace' to use inplace to save memory
6
7

python inference.py target.fasta data/pdb_mmcif/mmcif_files \
LuGY's avatar
LuGY committed
8
    --output_dir ./ \
zhuww's avatar
zhuww committed
9
10
    --gpus 2 \
    --param_path /data/params/params_model_1.npz  \
LuGY's avatar
LuGY committed
11
12
13
14
15
16
17
18
    --uniref90_database_path data/uniref90/uniref90.fasta \
    --mgnify_database_path data/mgnify/mgy_clusters_2018_12.fa \
    --pdb70_database_path data/pdb70/pdb70 \
    --uniclust30_database_path data/uniclust30/uniclust30_2018_08/uniclust30_2018_08 \
    --bfd_database_path data/bfd/bfd_metaclust_clu_complete_id30_c90_final_seq.sorted_opt \
    --jackhmmer_binary_path `which jackhmmer` \
    --hhblits_binary_path `which hhblits` \
    --hhsearch_binary_path `which hhsearch` \
19
20
21
    --kalign_binary_path `which kalign` \
    --chunk_size 4 \
    --inplace