Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
83c552d3
"vscode:/vscode.git/clone" did not exist on "8f0f7271d00903d1f0d6c08d11b5ad05b30e5faa"
Unverified
Commit
83c552d3
authored
Jan 06, 2022
by
NielsRogge
Committed by
GitHub
Jan 06, 2022
Browse files
Add detectron2 to Github actions (#15053)
parent
5ab87cd4
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
11 additions
and
9 deletions
+11
-9
.github/workflows/self-scheduled.yml
.github/workflows/self-scheduled.yml
+2
-0
tests/test_modeling_layoutlmv2.py
tests/test_modeling_layoutlmv2.py
+9
-9
No files found.
.github/workflows/self-scheduled.yml
View file @
83c552d3
...
...
@@ -37,6 +37,7 @@ jobs:
pip install --upgrade pip
pip install .[integrations,sklearn,testing,onnxruntime,sentencepiece,torch-speech,vision,timm]
pip install https://github.com/kpu/kenlm/archive/master.zip
python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
-
name
:
Are GPUs recognized by our DL frameworks
run
:
|
...
...
@@ -241,6 +242,7 @@ jobs:
pip install --upgrade pip
pip install .[integrations,sklearn,testing,onnxruntime,sentencepiece,torch-speech,vision,timm]
pip install https://github.com/kpu/kenlm/archive/master.zip
python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
-
name
:
Are GPUs recognized by our DL frameworks
run
:
|
...
...
tests/test_modeling_layoutlmv2.py
View file @
83c552d3
...
...
@@ -478,11 +478,11 @@ class LayoutLMv2ModelTest(ModelTesterMixin, unittest.TestCase):
def
prepare_layoutlmv2_batch_inputs
():
# Here we prepare a batch of 2 sequences to test a LayoutLMv2 forward pass on:
# fmt: off
input_ids
=
torch
.
tensor
([[
101
,
1019
,
1014
,
1016
,
1037
,
12849
,
4747
,
1004
,
14246
,
2278
,
5439
,
4524
,
5002
,
2930
,
2193
,
2930
,
4341
,
3208
,
1005
,
1055
,
2171
,
2848
,
11300
,
3531
,
102
],[
101
,
4070
,
4034
,
7020
,
1024
,
3058
,
1015
,
1013
,
2861
,
1013
,
6070
,
19274
,
2772
,
6205
,
27814
,
16147
,
16147
,
4343
,
2047
,
10283
,
10969
,
14389
,
1012
,
2338
,
102
]]
,
device
=
torch_device
)
# noqa: E231
bbox
=
torch
.
tensor
([[[
0
,
0
,
0
,
0
],[
423
,
237
,
440
,
251
],[
427
,
272
,
441
,
287
],[
419
,
115
,
437
,
129
],[
961
,
885
,
992
,
912
],[
256
,
38
,
330
,
58
],[
256
,
38
,
330
,
58
],[
336
,
42
,
353
,
57
],[
360
,
39
,
401
,
56
],[
360
,
39
,
401
,
56
],[
411
,
39
,
471
,
59
],[
479
,
41
,
528
,
59
],[
533
,
39
,
630
,
60
],[
67
,
113
,
134
,
131
],[
141
,
115
,
209
,
132
],[
68
,
149
,
133
,
166
],[
141
,
149
,
187
,
164
],[
195
,
148
,
287
,
165
],[
195
,
148
,
287
,
165
],[
195
,
148
,
287
,
165
],[
295
,
148
,
349
,
165
],[
441
,
149
,
492
,
166
],[
497
,
149
,
546
,
164
],[
64
,
201
,
125
,
218
],[
1000
,
1000
,
1000
,
1000
]],[[
0
,
0
,
0
,
0
],[
662
,
150
,
754
,
166
],[
665
,
199
,
742
,
211
],[
519
,
213
,
554
,
228
],[
519
,
213
,
554
,
228
],[
134
,
433
,
187
,
454
],[
130
,
467
,
204
,
480
],[
130
,
467
,
204
,
480
],[
130
,
467
,
204
,
480
],[
130
,
467
,
204
,
480
],[
130
,
467
,
204
,
480
],[
314
,
469
,
376
,
482
],[
504
,
684
,
582
,
706
],[
941
,
825
,
973
,
900
],[
941
,
825
,
973
,
900
],[
941
,
825
,
973
,
900
],[
941
,
825
,
973
,
900
],[
610
,
749
,
652
,
765
],[
130
,
659
,
168
,
672
],[
176
,
657
,
237
,
672
],[
238
,
657
,
312
,
672
],[
443
,
653
,
628
,
672
],[
443
,
653
,
628
,
672
],[
716
,
301
,
825
,
317
],[
1000
,
1000
,
1000
,
1000
]]]
,
device
=
torch_device
)
# noqa: E231
input_ids
=
torch
.
tensor
([[
101
,
1019
,
1014
,
1016
,
1037
,
12849
,
4747
,
1004
,
14246
,
2278
,
5439
,
4524
,
5002
,
2930
,
2193
,
2930
,
4341
,
3208
,
1005
,
1055
,
2171
,
2848
,
11300
,
3531
,
102
],[
101
,
4070
,
4034
,
7020
,
1024
,
3058
,
1015
,
1013
,
2861
,
1013
,
6070
,
19274
,
2772
,
6205
,
27814
,
16147
,
16147
,
4343
,
2047
,
10283
,
10969
,
14389
,
1012
,
2338
,
102
]])
# noqa: E231
bbox
=
torch
.
tensor
([[[
0
,
0
,
0
,
0
],[
423
,
237
,
440
,
251
],[
427
,
272
,
441
,
287
],[
419
,
115
,
437
,
129
],[
961
,
885
,
992
,
912
],[
256
,
38
,
330
,
58
],[
256
,
38
,
330
,
58
],[
336
,
42
,
353
,
57
],[
360
,
39
,
401
,
56
],[
360
,
39
,
401
,
56
],[
411
,
39
,
471
,
59
],[
479
,
41
,
528
,
59
],[
533
,
39
,
630
,
60
],[
67
,
113
,
134
,
131
],[
141
,
115
,
209
,
132
],[
68
,
149
,
133
,
166
],[
141
,
149
,
187
,
164
],[
195
,
148
,
287
,
165
],[
195
,
148
,
287
,
165
],[
195
,
148
,
287
,
165
],[
295
,
148
,
349
,
165
],[
441
,
149
,
492
,
166
],[
497
,
149
,
546
,
164
],[
64
,
201
,
125
,
218
],[
1000
,
1000
,
1000
,
1000
]],[[
0
,
0
,
0
,
0
],[
662
,
150
,
754
,
166
],[
665
,
199
,
742
,
211
],[
519
,
213
,
554
,
228
],[
519
,
213
,
554
,
228
],[
134
,
433
,
187
,
454
],[
130
,
467
,
204
,
480
],[
130
,
467
,
204
,
480
],[
130
,
467
,
204
,
480
],[
130
,
467
,
204
,
480
],[
130
,
467
,
204
,
480
],[
314
,
469
,
376
,
482
],[
504
,
684
,
582
,
706
],[
941
,
825
,
973
,
900
],[
941
,
825
,
973
,
900
],[
941
,
825
,
973
,
900
],[
941
,
825
,
973
,
900
],[
610
,
749
,
652
,
765
],[
130
,
659
,
168
,
672
],[
176
,
657
,
237
,
672
],[
238
,
657
,
312
,
672
],[
443
,
653
,
628
,
672
],[
443
,
653
,
628
,
672
],[
716
,
301
,
825
,
317
],[
1000
,
1000
,
1000
,
1000
]]])
# noqa: E231
image
=
ImageList
(
torch
.
randn
((
2
,
3
,
224
,
224
)),
image_sizes
=
[(
224
,
224
),
(
224
,
224
)])
# noqa: E231
attention_mask
=
torch
.
tensor
([[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
],[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
],]
,
device
=
torch_device
)
# noqa: E231
token_type_ids
=
torch
.
tensor
([[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
],[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
]]
,
device
=
torch_device
)
# noqa: E231
attention_mask
=
torch
.
tensor
([[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
],[
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
],])
# noqa: E231
token_type_ids
=
torch
.
tensor
([[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
],[
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
]])
# noqa: E231
# fmt: on
return
input_ids
,
bbox
,
image
,
attention_mask
,
token_type_ids
...
...
@@ -505,11 +505,11 @@ class LayoutLMv2ModelIntegrationTest(unittest.TestCase):
# forward pass
outputs
=
model
(
input_ids
=
input_ids
,
bbox
=
bbox
,
image
=
image
,
attention_mask
=
attention_mask
,
token_type_ids
=
token_type_ids
,
input_ids
=
input_ids
.
to
(
torch_device
)
,
bbox
=
bbox
.
to
(
torch_device
)
,
image
=
image
.
to
(
torch_device
)
,
attention_mask
=
attention_mask
.
to
(
torch_device
)
,
token_type_ids
=
token_type_ids
.
to
(
torch_device
)
,
)
# verify the sequence output
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment