Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
Megatron-LM
Commits
f559787d
Commit
f559787d
authored
Mar 31, 2021
by
Mostofa Patwary
Browse files
updated filter_ngrams.py
parent
0c01c2fe
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
14 additions
and
5 deletions
+14
-5
tools/openwebtext/filter_ngrams.py
tools/openwebtext/filter_ngrams.py
+14
-5
No files found.
tools/openwebtext/filter_ngrams.py
View file @
f559787d
...
@@ -304,7 +304,7 @@ def get_ngrams_below_threshold(args, ngrams, ngrams_below_threshold, \
...
@@ -304,7 +304,7 @@ def get_ngrams_below_threshold(args, ngrams, ngrams_below_threshold, \
args
.
get_ngram_freq_only
=
True
args
.
get_ngram_freq_only
=
True
# Open the large file to process in parallel
# Open the large file to process in parallel
num_workers
=
40
num_workers
=
args
.
num_threads
pool
=
multiprocessing
.
Pool
(
num_workers
)
pool
=
multiprocessing
.
Pool
(
num_workers
)
fin
=
open
(
dedup_file
,
'r'
,
encoding
=
'utf-8'
)
fin
=
open
(
dedup_file
,
'r'
,
encoding
=
'utf-8'
)
free_ngram_abt_partial
=
partial
(
free_ngram
,
args
=
args
,
key
=
dedup_key
,
\
free_ngram_abt_partial
=
partial
(
free_ngram
,
args
=
args
,
key
=
dedup_key
,
\
...
@@ -345,14 +345,15 @@ def clean_ngrams_below_threshold(args, ngrams_below_threshold, dedup_file, \
...
@@ -345,14 +345,15 @@ def clean_ngrams_below_threshold(args, ngrams_below_threshold, dedup_file, \
start_time
=
time
.
time
()
start_time
=
time
.
time
()
# Now actually filter the dataset
# Now actually filter the dataset
args
.
get_ngram_freq_only
=
False
args
.
get_ngram_freq_only
=
False
id_prefix
=
'-'
.
join
(
args
.
tasks
[::
2
])
#id_prefix = '-'.join(args.tasks[::2])
id_prefix
=
'-'
.
join
(
args
.
tasks
[::
1
])
# get the range of the size of the ngrams
# get the range of the size of the ngrams
ngrams_freq_sorted
=
compute_ngram_freq_sorted
(
args
,
ngrams_below_threshold
)
ngrams_freq_sorted
=
compute_ngram_freq_sorted
(
args
,
ngrams_below_threshold
)
# Open the large file to process in parallel
# Open the large file to process in parallel
counter
=
splitted
=
ignored
=
split_mt_thld
=
trimmed_count
=
0
counter
=
splitted
=
ignored
=
split_mt_thld
=
trimmed_count
=
0
num_workers
=
40
num_workers
=
args
.
num_threads
pool
=
multiprocessing
.
Pool
(
num_workers
)
pool
=
multiprocessing
.
Pool
(
num_workers
)
fin
=
open
(
dedup_file
,
'r'
,
encoding
=
'utf-8'
)
fin
=
open
(
dedup_file
,
'r'
,
encoding
=
'utf-8'
)
free_ngram_clean_partial
=
partial
(
free_ngram
,
args
=
args
,
key
=
dedup_key
,
\
free_ngram_clean_partial
=
partial
(
free_ngram
,
args
=
args
,
key
=
dedup_key
,
\
...
@@ -377,10 +378,16 @@ def clean_ngrams_below_threshold(args, ngrams_below_threshold, dedup_file, \
...
@@ -377,10 +378,16 @@ def clean_ngrams_below_threshold(args, ngrams_below_threshold, dedup_file, \
split_mt_thld
+=
1
split_mt_thld
+=
1
if
args
.
output
is
not
None
:
if
args
.
output
is
not
None
:
if
"split_id"
in
myjson
:
use_prefix
=
myjson
[
"split_id"
]
+
"-"
else
:
use_prefix
=
""
for
i
in
range
(
len
(
text_buf_ngram_free
)):
for
i
in
range
(
len
(
text_buf_ngram_free
)):
split_id_string
=
id_prefix
+
'-{:010d}'
.
format
(
int
(
\
split_id_string
=
id_prefix
+
'-{:010d}'
.
format
(
int
(
\
counter
))
+
'-{:0
10
d}'
.
format
(
int
(
i
))
counter
))
+
'-{:0
4
d}'
.
format
(
int
(
i
))
myjson
[
dedup_key
]
=
text_buf_ngram_free
[
i
]
myjson
[
dedup_key
]
=
text_buf_ngram_free
[
i
]
myjson
[
"split_id"
]
=
use_prefix
+
split_id_string
outjson
=
json
.
dumps
(
myjson
,
ensure_ascii
=
False
)
outjson
=
json
.
dumps
(
myjson
,
ensure_ascii
=
False
)
#outjson = json.dumps({"text":text_buf_ngram_free[i],
#outjson = json.dumps({"text":text_buf_ngram_free[i],
# id_prefix+"_split_id":split_id_string},
# id_prefix+"_split_id":split_id_string},
...
@@ -426,6 +433,8 @@ if __name__ == '__main__':
...
@@ -426,6 +433,8 @@ if __name__ == '__main__':
' e.g. cc.json text'
)
' e.g. cc.json text'
)
parser
.
add_argument
(
'--output'
,
type
=
str
,
default
=
None
,
parser
.
add_argument
(
'--output'
,
type
=
str
,
default
=
None
,
help
=
'Output file name to save dedup dataset'
)
help
=
'Output file name to save dedup dataset'
)
parser
.
add_argument
(
'--num-threads'
,
type
=
int
,
default
=
40
,
help
=
'Number of threads to use'
)
# Default dedup values
# Default dedup values
parser
.
add_argument
(
'--max-ngram-size'
,
type
=
int
,
default
=
13
,
parser
.
add_argument
(
'--max-ngram-size'
,
type
=
int
,
default
=
13
,
help
=
'Maximum size of ngram to use.'
)
help
=
'Maximum size of ngram to use.'
)
...
@@ -451,7 +460,7 @@ if __name__ == '__main__':
...
@@ -451,7 +460,7 @@ if __name__ == '__main__':
dedup_key
=
args
.
dedup_dataset
[
1
]
dedup_key
=
args
.
dedup_dataset
[
1
]
# Setup multi-processing
# Setup multi-processing
num_workers
=
40
num_workers
=
args
.
num_threads
if
args
.
load_dictionary
is
None
:
if
args
.
load_dictionary
is
None
:
# Build ngrams
# Build ngrams
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment