Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
Fairseq
Commits
d494485f
Commit
d494485f
authored
Jun 27, 2018
by
Alexei Baevski
Committed by
Myle Ott
Jul 25, 2018
Browse files
fix raw text for language modeling
parent
7358296b
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
3 additions
and
3 deletions
+3
-3
fairseq/data/token_block_dataset.py
fairseq/data/token_block_dataset.py
+2
-2
fairseq/tasks/language_modeling.py
fairseq/tasks/language_modeling.py
+1
-1
No files found.
fairseq/data/token_block_dataset.py
View file @
d494485f
...
...
@@ -47,7 +47,7 @@ class TokenBlockDataset(torch.utils.data.Dataset):
self
.
slice_indices
=
[
block_at
(
i
)
for
i
in
range
(
length
)]
elif
break_mode
==
'complete'
:
assert
sizes
is
not
None
and
sum
(
sizes
)
==
len
(
tokens
)
assert
sizes
is
not
None
and
sum
(
sizes
)
==
len
(
tokens
)
,
'{} != {}'
.
format
(
sum
(
sizes
),
len
(
tokens
))
tok_idx
=
0
sz_idx
=
0
curr_size
=
0
...
...
@@ -62,7 +62,7 @@ class TokenBlockDataset(torch.utils.data.Dataset):
if
curr_size
>
0
:
self
.
slice_indices
.
append
((
tok_idx
,
tok_idx
+
curr_size
))
elif
break_mode
==
'eos'
:
assert
sizes
is
not
None
and
sum
(
sizes
)
==
len
(
tokens
)
assert
sizes
is
not
None
and
sum
(
sizes
)
==
len
(
tokens
)
,
'{} != {}'
.
format
(
sum
(
sizes
),
len
(
tokens
))
curr
=
0
for
sz
in
sizes
:
# skip samples with just 1 example (which would be just the eos token)
...
...
fairseq/tasks/language_modeling.py
View file @
d494485f
...
...
@@ -48,7 +48,7 @@ class LanguageModelingTask(FairseqTask):
path
=
os
.
path
.
join
(
self
.
args
.
data
,
split
)
if
self
.
args
.
raw_text
and
IndexedRawTextDataset
.
exists
(
path
):
ds
=
IndexedRawTextDataset
(
path
,
self
.
dictionary
)
tokens
=
ds
.
tokens_list
tokens
=
[
t
for
l
in
ds
.
tokens_list
for
t
in
l
]
elif
not
self
.
args
.
raw_text
and
IndexedInMemoryDataset
.
exists
(
path
):
ds
=
IndexedInMemoryDataset
(
path
,
fix_lua_indexing
=
True
)
tokens
=
ds
.
buffer
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment