Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
Fairseq
Commits
0d90e35f
Commit
0d90e35f
authored
Feb 15, 2018
by
Myle Ott
Committed by
Sergey Edunov
Feb 27, 2018
Browse files
More unit test fixes
parent
29c82741
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
10 additions
and
12 deletions
+10
-12
fairseq/options.py
fairseq/options.py
+5
-2
fairseq/sequence_generator.py
fairseq/sequence_generator.py
+2
-1
generate.py
generate.py
+0
-2
interactive.py
interactive.py
+0
-2
tests/test_binaries.py
tests/test_binaries.py
+2
-4
train.py
train.py
+1
-1
No files found.
fairseq/options.py
View file @
0d90e35f
...
...
@@ -7,6 +7,8 @@
import
argparse
import
torch
from
fairseq.criterions
import
CRITERION_REGISTRY
from
fairseq.models
import
ARCH_MODEL_REGISTRY
,
ARCH_CONFIG_REGISTRY
from
fairseq.optim
import
OPTIMIZER_REGISTRY
...
...
@@ -117,8 +119,9 @@ def add_dataset_args(parser, train=False, gen=False):
def
add_distributed_training_args
(
parser
):
group
=
parser
.
add_argument_group
(
'Distributed training'
)
group
.
add_argument
(
'--distributed-world-size'
,
default
=
1
,
type
=
int
,
metavar
=
'N'
,
help
=
'total number of GPUs across all nodes, default: 1 GPU'
)
group
.
add_argument
(
'--distributed-world-size'
,
type
=
int
,
metavar
=
'N'
,
default
=
torch
.
cuda
.
device_count
(),
help
=
'total number of GPUs across all nodes (default: all visible GPUs)'
)
group
.
add_argument
(
'--distributed-rank'
,
default
=
0
,
type
=
int
,
help
=
'rank of the current worker'
)
group
.
add_argument
(
'--distributed-backend'
,
default
=
'nccl'
,
type
=
str
,
...
...
fairseq/sequence_generator.py
View file @
0d90e35f
...
...
@@ -90,6 +90,7 @@ class SequenceGenerator(object):
for
model
in
self
.
models
:
if
isinstance
(
model
.
decoder
,
FairseqIncrementalDecoder
):
stack
.
enter_context
(
model
.
decoder
.
incremental_inference
())
with
utils
.
maybe_no_grad
():
return
self
.
_generate
(
src_tokens
,
src_lengths
,
beam_size
,
maxlen
)
def
_generate
(
self
,
src_tokens
,
src_lengths
,
beam_size
=
None
,
maxlen
=
None
):
...
...
generate.py
View file @
0d90e35f
...
...
@@ -18,8 +18,6 @@ def main(args):
print
(
args
)
use_cuda
=
torch
.
cuda
.
is_available
()
and
not
args
.
cpu
if
hasattr
(
torch
,
'set_grad_enabled'
):
torch
.
set_grad_enabled
(
False
)
# Load dataset
if
args
.
replace_unk
is
None
:
...
...
interactive.py
View file @
0d90e35f
...
...
@@ -18,8 +18,6 @@ def main(args):
print
(
args
)
use_cuda
=
torch
.
cuda
.
is_available
()
and
not
args
.
cpu
if
hasattr
(
torch
,
'set_grad_enabled'
):
torch
.
set_grad_enabled
(
False
)
# Load ensemble
print
(
'| loading model(s) from {}'
.
format
(
', '
.
join
(
args
.
path
)))
...
...
tests/test_binaries.py
View file @
0d90e35f
...
...
@@ -11,7 +11,6 @@ import random
import
sys
import
tempfile
import
unittest
from
unittest
import
mock
import
torch
...
...
@@ -84,10 +83,9 @@ class TestBinaries(unittest.TestCase):
'--save-dir'
,
data_dir
,
'--max-epoch'
,
'1'
,
'--no-progress-bar'
,
'--distributed-world-size'
,
'1'
,
],
)
with
mock
.
patch
(
'train.torch.cuda.device_count'
)
as
device_count
:
device_count
.
return_value
=
1
train
.
main
(
train_args
)
def
generate
(
self
,
data_dir
):
...
...
train.py
View file @
0d90e35f
...
...
@@ -19,7 +19,7 @@ def main(args):
if
args
.
distributed_port
>
0
\
or
args
.
distributed_init_method
is
not
None
:
distributed_main
(
args
)
elif
torch
.
cuda
.
device_count
()
>
1
:
elif
args
.
distributed_world_size
>
1
:
multiprocessing_main
(
args
)
else
:
singleprocess_main
(
args
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment