Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
vision
Commits
e946e87d
Unverified
Commit
e946e87d
authored
May 02, 2023
by
Philip Meier
Committed by
GitHub
May 02, 2023
Browse files
refactor download tests (#7546)
parent
6381f7b2
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
132 additions
and
242 deletions
+132
-242
test/test_datasets_download.py
test/test_datasets_download.py
+132
-242
No files found.
test/test_datasets_download.py
View file @
e946e87d
...
@@ -14,13 +14,7 @@ from urllib.request import Request, urlopen
...
@@ -14,13 +14,7 @@ from urllib.request import Request, urlopen
import
pytest
import
pytest
from
torchvision
import
datasets
from
torchvision
import
datasets
from
torchvision.datasets.utils
import
(
from
torchvision.datasets.utils
import
_get_redirect_url
,
USER_AGENT
_get_redirect_url
,
check_integrity
,
download_file_from_google_drive
,
download_url
,
USER_AGENT
,
)
def
limit_requests_per_time
(
min_secs_between_requests
=
2.0
):
def
limit_requests_per_time
(
min_secs_between_requests
=
2.0
):
...
@@ -84,47 +78,45 @@ urlopen = resolve_redirects()(urlopen)
...
@@ -84,47 +78,45 @@ urlopen = resolve_redirects()(urlopen)
@
contextlib
.
contextmanager
@
contextlib
.
contextmanager
def
log_download_attempts
(
def
log_download_attempts
(
urls_and_md5s
=
None
,
urls
,
file
=
"utils"
,
*
,
patch
=
True
,
dataset_module
,
mock_auxiliaries
=
None
,
):
):
def
add_mock
(
stack
,
name
,
file
,
**
kwargs
):
def
maybe_add_mock
(
*
,
module
,
name
,
stack
,
lst
=
None
):
patcher
=
unittest
.
mock
.
patch
(
f
"torchvision.datasets.
{
module
}
.
{
name
}
"
)
try
:
try
:
return
stack
.
enter_context
(
unittest
.
mock
.
patch
(
f
"torchvision.datasets.
{
file
}
.
{
name
}
"
,
**
kwargs
))
mock
=
stack
.
enter_context
(
patcher
)
except
AttributeError
as
error
:
except
AttributeError
:
if
file
!=
"utils"
:
return
return
add_mock
(
stack
,
name
,
"utils"
,
**
kwargs
)
else
:
raise
pytest
.
UsageError
from
error
if
urls_and_md5s
is
None
:
urls_and_md5s
=
set
()
if
mock_auxiliaries
is
None
:
mock_auxiliaries
=
patch
with
contextlib
.
ExitStack
()
as
stack
:
if
lst
is
not
None
:
url_mock
=
add_mock
(
stack
,
"download_url"
,
file
,
wraps
=
None
if
patch
else
download_url
)
lst
.
append
(
mock
)
google_drive_mock
=
add_mock
(
stack
,
"download_file_from_google_drive"
,
file
,
wraps
=
None
if
patch
else
download_file_from_google_drive
)
if
mock_auxiliaries
:
with
contextlib
.
ExitStack
()
as
stack
:
add_mock
(
stack
,
"extract_archive"
,
file
)
download_url_mocks
=
[]
download_file_from_google_drive_mocks
=
[]
for
module
in
[
dataset_module
,
"utils"
]:
maybe_add_mock
(
module
=
module
,
name
=
"download_url"
,
stack
=
stack
,
lst
=
download_url_mocks
)
maybe_add_mock
(
module
=
module
,
name
=
"download_file_from_google_drive"
,
stack
=
stack
,
lst
=
download_file_from_google_drive_mocks
,
)
maybe_add_mock
(
module
=
module
,
name
=
"extract_archive"
,
stack
=
stack
)
try
:
try
:
yield
urls_and_md5s
yield
finally
:
finally
:
for
args
,
kwargs
in
url_mock
.
call_args_list
:
for
download_url_mock
in
download_url_mocks
:
url
=
args
[
0
]
for
args
,
kwargs
in
download_url_mock
.
call_args_list
:
md5
=
args
[
-
1
]
if
len
(
args
)
==
4
else
kwargs
.
get
(
"md5"
)
urls
.
append
(
args
[
0
]
if
args
else
kwargs
[
"url"
])
urls_and_md5s
.
add
((
url
,
md5
))
for
args
,
kwargs
in
google_drive_mock
.
call_args_list
:
for
download_file_from_google_drive_mock
in
download_file_from_google_drive_mocks
:
id
=
args
[
0
]
for
args
,
kwargs
in
download_file_from_google_drive_mock
.
call_args_list
:
url
=
f
"https://drive.google.com/file/d/
{
id
}
"
file_id
=
args
[
0
]
if
args
else
kwargs
[
"file_id"
]
md5
=
args
[
3
]
if
len
(
args
)
==
4
else
kwargs
.
get
(
"md5"
)
urls
.
append
(
f
"https://drive.google.com/file/d/
{
file_id
}
"
)
urls_and_md5s
.
add
((
url
,
md5
))
def
retry
(
fn
,
times
=
1
,
wait
=
5.0
):
def
retry
(
fn
,
times
=
1
,
wait
=
5.0
):
...
@@ -170,45 +162,14 @@ def assert_url_is_accessible(url, timeout=5.0):
...
@@ -170,45 +162,14 @@ def assert_url_is_accessible(url, timeout=5.0):
urlopen
(
request
,
timeout
=
timeout
)
urlopen
(
request
,
timeout
=
timeout
)
def
assert_file_downloads_correctly
(
url
,
md5
,
tmpdir
,
timeout
=
5.0
):
def
collect_urls
(
dataset_cls
,
*
args
,
**
kwargs
):
file
=
path
.
join
(
tmpdir
,
path
.
basename
(
url
))
urls
=
[]
with
assert_server_response_ok
():
with
contextlib
.
suppress
(
Exception
),
log_download_attempts
(
with
open
(
file
,
"wb"
)
as
fh
:
urls
,
dataset_module
=
dataset_cls
.
__module__
.
split
(
"."
)[
-
1
]
request
=
Request
(
url
,
headers
=
{
"User-Agent"
:
USER_AGENT
})
):
response
=
urlopen
(
request
,
timeout
=
timeout
)
dataset_cls
(
*
args
,
**
kwargs
)
fh
.
write
(
response
.
read
())
assert
check_integrity
(
file
,
md5
=
md5
),
"The MD5 checksums mismatch"
class
DownloadConfig
:
def
__init__
(
self
,
url
,
md5
=
None
,
id
=
None
):
self
.
url
=
url
self
.
md5
=
md5
self
.
id
=
id
or
url
def
__repr__
(
self
)
->
str
:
return
self
.
id
def
make_download_configs
(
urls_and_md5s
,
name
=
None
):
return
[(
url
,
f
"
{
dataset_cls
.
__name__
}
,
{
url
}
"
)
for
url
in
urls
]
return
[
DownloadConfig
(
url
,
md5
=
md5
,
id
=
f
"
{
name
}
,
{
url
}
"
if
name
is
not
None
else
None
)
for
url
,
md5
in
urls_and_md5s
]
def
collect_download_configs
(
dataset_loader
,
name
=
None
,
**
kwargs
):
urls_and_md5s
=
set
()
try
:
with
log_download_attempts
(
urls_and_md5s
=
urls_and_md5s
,
**
kwargs
):
dataset
=
dataset_loader
()
except
Exception
:
dataset
=
None
if
name
is
None
and
dataset
is
not
None
:
name
=
type
(
dataset
).
__name__
return
make_download_configs
(
urls_and_md5s
,
name
)
# This is a workaround since fixtures, such as the built-in tmp_dir, can only be used within a test but not within a
# This is a workaround since fixtures, such as the built-in tmp_dir, can only be used within a test but not within a
...
@@ -223,12 +184,14 @@ def root():
...
@@ -223,12 +184,14 @@ def root():
def
places365
():
def
places365
():
return
itertools
.
chain
(
return
itertools
.
chain
.
from_iterable
(
*
[
[
collect_download_configs
(
collect_urls
(
lambda
:
datasets
.
Places365
(
ROOT
,
split
=
split
,
small
=
small
,
download
=
True
),
datasets
.
Places365
,
name
=
f
"Places365,
{
split
}
,
{
'small'
if
small
else
'large'
}
"
,
ROOT
,
file
=
"places365"
,
split
=
split
,
small
=
small
,
download
=
True
,
)
)
for
split
,
small
in
itertools
.
product
((
"train-standard"
,
"train-challenge"
,
"val"
),
(
False
,
True
))
for
split
,
small
in
itertools
.
product
((
"train-standard"
,
"train-challenge"
,
"val"
),
(
False
,
True
))
]
]
...
@@ -236,30 +199,26 @@ def places365():
...
@@ -236,30 +199,26 @@ def places365():
def
caltech101
():
def
caltech101
():
return
collect_
download_configs
(
lambda
:
datasets
.
Caltech101
(
ROOT
,
download
=
True
)
,
name
=
"Caltech101"
)
return
collect_
urls
(
datasets
.
Caltech101
,
ROOT
,
download
=
True
)
def
caltech256
():
def
caltech256
():
return
collect_
download_configs
(
lambda
:
datasets
.
Caltech256
(
ROOT
,
download
=
True
)
,
name
=
"Caltech256"
)
return
collect_
urls
(
datasets
.
Caltech256
,
ROOT
,
download
=
True
)
def
cifar10
():
def
cifar10
():
return
collect_
download_configs
(
lambda
:
datasets
.
CIFAR10
(
ROOT
,
download
=
True
)
,
name
=
"CIFAR10"
)
return
collect_
urls
(
datasets
.
CIFAR10
,
ROOT
,
download
=
True
)
def
cifar100
():
def
cifar100
():
return
collect_
download_configs
(
lambda
:
datasets
.
CIFAR100
(
ROOT
,
download
=
True
)
,
name
=
"CIFAR100"
)
return
collect_
urls
(
datasets
.
CIFAR100
,
ROOT
,
download
=
True
)
def
voc
():
def
voc
():
# TODO: Also test the "2007-test" key
# TODO: Also test the "2007-test" key
return
itertools
.
chain
(
return
itertools
.
chain
.
from_iterable
(
*
[
[
collect_download_configs
(
collect_urls
(
datasets
.
VOCSegmentation
,
ROOT
,
year
=
year
,
download
=
True
)
lambda
:
datasets
.
VOCSegmentation
(
ROOT
,
year
=
year
,
download
=
True
),
name
=
f
"VOC,
{
year
}
"
,
file
=
"voc"
,
)
for
year
in
(
"2007"
,
"2008"
,
"2009"
,
"2010"
,
"2011"
,
"2012"
)
for
year
in
(
"2007"
,
"2008"
,
"2009"
,
"2010"
,
"2011"
,
"2012"
)
]
]
)
)
...
@@ -267,59 +226,42 @@ def voc():
...
@@ -267,59 +226,42 @@ def voc():
def
mnist
():
def
mnist
():
with
unittest
.
mock
.
patch
.
object
(
datasets
.
MNIST
,
"mirrors"
,
datasets
.
MNIST
.
mirrors
[
-
1
:]):
with
unittest
.
mock
.
patch
.
object
(
datasets
.
MNIST
,
"mirrors"
,
datasets
.
MNIST
.
mirrors
[
-
1
:]):
return
collect_
download_configs
(
lambda
:
datasets
.
MNIST
(
ROOT
,
download
=
True
)
,
name
=
"MNIST"
)
return
collect_
urls
(
datasets
.
MNIST
,
ROOT
,
download
=
True
)
def
fashion_mnist
():
def
fashion_mnist
():
return
collect_
download_configs
(
lambda
:
datasets
.
FashionMNIST
(
ROOT
,
download
=
True
)
,
name
=
"FashionMNIST"
)
return
collect_
urls
(
datasets
.
FashionMNIST
,
ROOT
,
download
=
True
)
def
kmnist
():
def
kmnist
():
return
collect_
download_configs
(
lambda
:
datasets
.
KMNIST
(
ROOT
,
download
=
True
)
,
name
=
"KMNIST"
)
return
collect_
urls
(
datasets
.
KMNIST
,
ROOT
,
download
=
True
)
def
emnist
():
def
emnist
():
# the 'split' argument can be any valid one, since everything is downloaded anyway
# the 'split' argument can be any valid one, since everything is downloaded anyway
return
collect_
download_configs
(
lambda
:
datasets
.
EMNIST
(
ROOT
,
split
=
"byclass"
,
download
=
True
)
,
name
=
"EMNIST"
)
return
collect_
urls
(
datasets
.
EMNIST
,
ROOT
,
split
=
"byclass"
,
download
=
True
)
def
qmnist
():
def
qmnist
():
return
itertools
.
chain
(
return
itertools
.
chain
.
from_iterable
(
*
[
[
collect_urls
(
datasets
.
QMNIST
,
ROOT
,
what
=
what
,
download
=
True
)
for
what
in
(
"train"
,
"test"
,
"nist"
)]
collect_download_configs
(
lambda
:
datasets
.
QMNIST
(
ROOT
,
what
=
what
,
download
=
True
),
name
=
f
"QMNIST,
{
what
}
"
,
file
=
"mnist"
,
)
for
what
in
(
"train"
,
"test"
,
"nist"
)
]
)
)
def
moving_mnist
():
def
moving_mnist
():
return
collect_
download_configs
(
lambda
:
datasets
.
MovingMNIST
(
ROOT
,
download
=
True
)
,
name
=
"MovingMNIST"
)
return
collect_
urls
(
datasets
.
MovingMNIST
,
ROOT
,
download
=
True
)
def
omniglot
():
def
omniglot
():
return
itertools
.
chain
(
return
itertools
.
chain
.
from_iterable
(
*
[
[
collect_urls
(
datasets
.
Omniglot
,
ROOT
,
background
=
background
,
download
=
True
)
for
background
in
(
True
,
False
)]
collect_download_configs
(
lambda
:
datasets
.
Omniglot
(
ROOT
,
background
=
background
,
download
=
True
),
name
=
f
"Omniglot,
{
'background'
if
background
else
'evaluation'
}
"
,
)
for
background
in
(
True
,
False
)
]
)
)
def
phototour
():
def
phototour
():
return
itertools
.
chain
(
return
itertools
.
chain
.
from_iterable
(
*
[
[
collect_download_configs
(
collect_urls
(
datasets
.
PhotoTour
,
ROOT
,
name
=
name
,
download
=
True
)
lambda
:
datasets
.
PhotoTour
(
ROOT
,
name
=
name
,
download
=
True
),
name
=
f
"PhotoTour,
{
name
}
"
,
file
=
"phototour"
,
)
# The names postfixed with '_harris' point to the domain 'matthewalunbrown.com'. For some reason all
# The names postfixed with '_harris' point to the domain 'matthewalunbrown.com'. For some reason all
# requests timeout from within CI. They are disabled until this is resolved.
# requests timeout from within CI. They are disabled until this is resolved.
for
name
in
(
"notredame"
,
"yosemite"
,
"liberty"
)
# "notredame_harris", "yosemite_harris", "liberty_harris"
for
name
in
(
"notredame"
,
"yosemite"
,
"liberty"
)
# "notredame_harris", "yosemite_harris", "liberty_harris"
...
@@ -328,91 +270,51 @@ def phototour():
...
@@ -328,91 +270,51 @@ def phototour():
def
sbdataset
():
def
sbdataset
():
return
collect_download_configs
(
return
collect_urls
(
datasets
.
SBDataset
,
ROOT
,
download
=
True
)
lambda
:
datasets
.
SBDataset
(
ROOT
,
download
=
True
),
name
=
"SBDataset"
,
file
=
"voc"
,
)
def
sbu
():
def
sbu
():
return
collect_download_configs
(
return
collect_urls
(
datasets
.
SBU
,
ROOT
,
download
=
True
)
lambda
:
datasets
.
SBU
(
ROOT
,
download
=
True
),
name
=
"SBU"
,
file
=
"sbu"
,
)
def
semeion
():
def
semeion
():
return
collect_download_configs
(
return
collect_urls
(
datasets
.
SEMEION
,
ROOT
,
download
=
True
)
lambda
:
datasets
.
SEMEION
(
ROOT
,
download
=
True
),
name
=
"SEMEION"
,
file
=
"semeion"
,
)
def
stl10
():
def
stl10
():
return
collect_download_configs
(
return
collect_urls
(
datasets
.
STL10
,
ROOT
,
download
=
True
)
lambda
:
datasets
.
STL10
(
ROOT
,
download
=
True
),
name
=
"STL10"
,
)
def
svhn
():
def
svhn
():
return
itertools
.
chain
(
return
itertools
.
chain
.
from_iterable
(
*
[
[
collect_urls
(
datasets
.
SVHN
,
ROOT
,
split
=
split
,
download
=
True
)
for
split
in
(
"train"
,
"test"
,
"extra"
)]
collect_download_configs
(
lambda
:
datasets
.
SVHN
(
ROOT
,
split
=
split
,
download
=
True
),
name
=
f
"SVHN,
{
split
}
"
,
file
=
"svhn"
,
)
for
split
in
(
"train"
,
"test"
,
"extra"
)
]
)
)
def
usps
():
def
usps
():
return
itertools
.
chain
(
return
itertools
.
chain
.
from_iterable
(
*
[
[
collect_urls
(
datasets
.
USPS
,
ROOT
,
train
=
train
,
download
=
True
)
for
train
in
(
True
,
False
)]
collect_download_configs
(
lambda
:
datasets
.
USPS
(
ROOT
,
train
=
train
,
download
=
True
),
name
=
f
"USPS,
{
'train'
if
train
else
'test'
}
"
,
file
=
"usps"
,
)
for
train
in
(
True
,
False
)
]
)
)
def
celeba
():
def
celeba
():
return
collect_download_configs
(
return
collect_urls
(
datasets
.
CelebA
,
ROOT
,
download
=
True
)
lambda
:
datasets
.
CelebA
(
ROOT
,
download
=
True
),
name
=
"CelebA"
,
file
=
"celeba"
,
)
def
widerface
():
def
widerface
():
return
collect_download_configs
(
return
collect_urls
(
datasets
.
WIDERFace
,
ROOT
,
download
=
True
)
lambda
:
datasets
.
WIDERFace
(
ROOT
,
download
=
True
),
name
=
"WIDERFace"
,
file
=
"widerface"
,
)
def
kinetics
():
def
kinetics
():
return
itertools
.
chain
(
return
itertools
.
chain
.
from_iterable
(
*
[
[
collect_download_configs
(
collect_urls
(
lambda
:
datasets
.
Kinetics
(
datasets
.
Kinetics
,
path
.
join
(
ROOT
,
f
"Kinetics
{
num_classes
}
"
),
path
.
join
(
ROOT
,
f
"Kinetics
{
num_classes
}
"
),
frames_per_clip
=
1
,
frames_per_clip
=
1
,
num_classes
=
num_classes
,
num_classes
=
num_classes
,
split
=
split
,
split
=
split
,
download
=
True
,
download
=
True
,
),
name
=
f
"Kinetics,
{
num_classes
}
,
{
split
}
"
,
file
=
"kinetics"
,
)
)
for
num_classes
,
split
in
itertools
.
product
((
"400"
,
"600"
,
"700"
),
(
"train"
,
"val"
))
for
num_classes
,
split
in
itertools
.
product
((
"400"
,
"600"
,
"700"
),
(
"train"
,
"val"
))
]
]
...
@@ -420,58 +322,55 @@ def kinetics():
...
@@ -420,58 +322,55 @@ def kinetics():
def
kitti
():
def
kitti
():
return
itertools
.
chain
(
return
itertools
.
chain
.
from_iterable
(
*
[
[
collect_urls
(
datasets
.
Kitti
,
ROOT
,
train
=
train
,
download
=
True
)
for
train
in
(
True
,
False
)]
collect_download_configs
(
lambda
train
=
train
:
datasets
.
Kitti
(
ROOT
,
train
=
train
,
download
=
True
),
name
=
f
"Kitti,
{
'train'
if
train
else
'test'
}
"
,
file
=
"kitti"
,
)
for
train
in
(
True
,
False
)
]
)
)
def
make_parametrize_kwargs
(
download_configs
):
def
stanford_cars
():
argvalues
=
[]
return
itertools
.
chain
.
from_iterable
(
ids
=
[]
[
collect_urls
(
datasets
.
StanfordCars
,
ROOT
,
split
=
split
,
download
=
True
)
for
split
in
[
"train"
,
"test"
]]
for
config
in
download_configs
:
)
argvalues
.
append
((
config
.
url
,
config
.
md5
))
ids
.
append
(
config
.
id
)
def
url_parametrization
(
*
dataset_urls_and_ids_fns
):
return
dict
(
argnames
=
(
"url"
,
"md5"
),
argvalues
=
argvalues
,
ids
=
ids
)
return
pytest
.
mark
.
parametrize
(
"url"
,
[
@
pytest
.
mark
.
parametrize
(
pytest
.
param
(
url
,
id
=
id
)
**
make_parametrize_kwargs
(
for
dataset_urls_and_ids_fn
in
dataset_urls_and_ids_fns
itertools
.
chain
(
for
url
,
id
in
sorted
(
set
(
dataset_urls_and_ids_fn
()))
caltech101
(),
],
caltech256
(),
cifar10
(),
cifar100
(),
# The VOC download server is unstable. See https://github.com/pytorch/vision/issues/2953 for details.
# voc(),
mnist
(),
fashion_mnist
(),
kmnist
(),
emnist
(),
qmnist
(),
omniglot
(),
phototour
(),
sbdataset
(),
semeion
(),
stl10
(),
svhn
(),
usps
(),
celeba
(),
widerface
(),
kinetics
(),
kitti
(),
places365
(),
)
)
)
@
url_parametrization
(
caltech101
,
caltech256
,
cifar10
,
cifar100
,
# The VOC download server is unstable. See https://github.com/pytorch/vision/issues/2953 for details.
# voc,
mnist
,
fashion_mnist
,
kmnist
,
emnist
,
qmnist
,
omniglot
,
phototour
,
sbdataset
,
semeion
,
stl10
,
svhn
,
usps
,
celeba
,
widerface
,
kinetics
,
kitti
,
places365
,
sbu
,
)
)
def
test_url_is_accessible
(
url
,
md5
):
def
test_url_is_accessible
(
url
):
"""
"""
If you see this test failing, find the offending dataset in the parametrization and move it to
If you see this test failing, find the offending dataset in the parametrization and move it to
``test_url_is_not_accessible`` and link an issue detailing the problem.
``test_url_is_not_accessible`` and link an issue detailing the problem.
...
@@ -479,15 +378,11 @@ def test_url_is_accessible(url, md5):
...
@@ -479,15 +378,11 @@ def test_url_is_accessible(url, md5):
retry
(
lambda
:
assert_url_is_accessible
(
url
))
retry
(
lambda
:
assert_url_is_accessible
(
url
))
@
pytest
.
mark
.
parametrize
(
@
url_parametrization
(
**
make_parametrize_kwargs
(
stanford_cars
,
# https://github.com/pytorch/vision/issues/7545
itertools
.
chain
(
sbu
(),
# https://github.com/pytorch/vision/issues/7005
)
)
)
)
@
pytest
.
mark
.
xfail
@
pytest
.
mark
.
xfail
def
test_url_is_not_accessible
(
url
,
md5
):
def
test_url_is_not_accessible
(
url
):
"""
"""
As the name implies, this test is the 'inverse' of ``test_url_is_accessible``. Since the download servers are
As the name implies, this test is the 'inverse' of ``test_url_is_accessible``. Since the download servers are
beyond our control, some files might not be accessible for longer stretches of time. Still, we want to know if they
beyond our control, some files might not be accessible for longer stretches of time. Still, we want to know if they
...
@@ -497,8 +392,3 @@ def test_url_is_not_accessible(url, md5):
...
@@ -497,8 +392,3 @@ def test_url_is_not_accessible(url, md5):
``test_url_is_accessible``.
``test_url_is_accessible``.
"""
"""
retry
(
lambda
:
assert_url_is_accessible
(
url
))
retry
(
lambda
:
assert_url_is_accessible
(
url
))
@
pytest
.
mark
.
parametrize
(
**
make_parametrize_kwargs
(
itertools
.
chain
()))
def
test_file_downloads_correctly
(
url
,
md5
):
retry
(
lambda
:
assert_file_downloads_correctly
(
url
,
md5
))
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment