inference.sh 956 Bytes
Newer Older
oahzxl's avatar
oahzxl committed
1
2
3
4
5
# add '--gpus [N]' to use N gpus for inference
# add '--enable_workflow' to use parallel workflow for data processing
# add '--use_precomputed_alignments [path_to_alignments]' to use precomputed msa
# add '--chunk_size [N]' to use chunk to reduce peak memory
# add '--inplace' to use inplace to save memory
6
7

python inference.py target.fasta data/pdb_mmcif/mmcif_files \
8
9
    --output_dir ./outputs \
    --gpus 2 \
LuGY's avatar
LuGY committed
10
    --uniref90_database_path data/uniref90/uniref90.fasta \
11
    --mgnify_database_path data/mgnify/mgy_clusters_2022_05.fa \
LuGY's avatar
LuGY committed
12
    --pdb70_database_path data/pdb70/pdb70 \
13
    --uniref30_database_path data/uniref30/UniRef30_2021_03 \
LuGY's avatar
LuGY committed
14
15
16
17
    --bfd_database_path data/bfd/bfd_metaclust_clu_complete_id30_c90_final_seq.sorted_opt \
    --jackhmmer_binary_path `which jackhmmer` \
    --hhblits_binary_path `which hhblits` \
    --hhsearch_binary_path `which hhsearch` \
18
19
20
    --kalign_binary_path `which kalign` \
    --enable_workflow \
    --inplace