Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
3219a8da
Commit
3219a8da
authored
Jan 19, 2021
by
Hongkun Yu
Committed by
A. Unique TensorFlower
Jan 19, 2021
Browse files
Internal change
PiperOrigin-RevId: 352676136
parent
799f65f5
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
244 additions
and
154 deletions
+244
-154
official/modeling/multitask/train_lib.py
official/modeling/multitask/train_lib.py
+20
-7
official/nlp/continuous_finetune_lib.py
official/nlp/continuous_finetune_lib.py
+211
-0
official/nlp/continuous_finetune_lib_test.py
official/nlp/continuous_finetune_lib_test.py
+9
-6
official/nlp/train_ctl_continuous_finetune.py
official/nlp/train_ctl_continuous_finetune.py
+4
-141
No files found.
official/modeling/multitask/train_lib.py
View file @
3219a8da
...
@@ -27,10 +27,14 @@ from official.modeling.multitask import multitask
...
@@ -27,10 +27,14 @@ from official.modeling.multitask import multitask
def
run_experiment_wtih_multitask_eval
(
def
run_experiment_wtih_multitask_eval
(
*
,
*
,
distribution_strategy
:
tf
.
distribute
.
Strategy
,
train_task
:
base_task
.
Task
,
distribution_strategy
:
tf
.
distribute
.
Strategy
,
eval_tasks
:
multitask
.
MultiTask
,
mode
:
str
,
train_task
:
base_task
.
Task
,
eval_tasks
:
multitask
.
MultiTask
,
mode
:
str
,
params
:
configs
.
MultiEvalExperimentConfig
,
params
:
configs
.
MultiEvalExperimentConfig
,
model_dir
:
str
)
->
tf
.
keras
.
Model
:
model_dir
:
str
,
run_post_eval
:
bool
=
False
,
save_summary
:
bool
=
True
)
->
tf
.
keras
.
Model
:
"""Runs train/eval configured by the experiment params.
"""Runs train/eval configured by the experiment params.
Args:
Args:
...
@@ -41,6 +45,9 @@ def run_experiment_wtih_multitask_eval(
...
@@ -41,6 +45,9 @@ def run_experiment_wtih_multitask_eval(
or 'continuous_eval'.
or 'continuous_eval'.
params: MultiEvalExperimentConfig instance.
params: MultiEvalExperimentConfig instance.
model_dir: A 'str', a path to store model checkpoints and summaries.
model_dir: A 'str', a path to store model checkpoints and summaries.
run_post_eval: Whether to run post eval once after training, metrics logs
are returned.
save_summary: Whether to save train and validation summary.
Returns:
Returns:
model: `tf.keras.Model` instance.
model: `tf.keras.Model` instance.
...
@@ -92,9 +99,11 @@ def run_experiment_wtih_multitask_eval(
...
@@ -92,9 +99,11 @@ def run_experiment_wtih_multitask_eval(
global_step
=
global_step
,
global_step
=
global_step
,
steps_per_loop
=
params
.
trainer
.
steps_per_loop
,
steps_per_loop
=
params
.
trainer
.
steps_per_loop
,
checkpoint_manager
=
checkpoint_manager
,
checkpoint_manager
=
checkpoint_manager
,
summary_dir
=
os
.
path
.
join
(
model_dir
,
'train'
),
summary_dir
=
os
.
path
.
join
(
model_dir
,
'train'
)
if
save_summary
else
None
,
eval_summary_dir
=
os
.
path
.
join
(
model_dir
,
'validation'
),
eval_summary_dir
=
os
.
path
.
join
(
model_dir
,
'validation'
)
if
summary_interval
=
params
.
trainer
.
summary_interval
)
(
save_summary
)
else
None
,
summary_interval
=
params
.
trainer
.
summary_interval
if
(
save_summary
)
else
None
)
logging
.
info
(
'Starts to execute mode: %s'
,
mode
)
logging
.
info
(
'Starts to execute mode: %s'
,
mode
)
with
distribution_strategy
.
scope
():
with
distribution_strategy
.
scope
():
...
@@ -121,4 +130,8 @@ def run_experiment_wtih_multitask_eval(
...
@@ -121,4 +130,8 @@ def run_experiment_wtih_multitask_eval(
else
:
else
:
raise
NotImplementedError
(
'The mode is not implemented: %s'
%
mode
)
raise
NotImplementedError
(
'The mode is not implemented: %s'
%
mode
)
return
model
if
run_post_eval
:
return
model
,
evaluator
.
evaluate
(
tf
.
convert_to_tensor
(
params
.
trainer
.
validation_steps
))
else
:
return
model
,
{}
official/nlp/continuous_finetune_lib.py
0 → 100644
View file @
3219a8da
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TFM continuous finetuning+eval training driver library."""
import
gc
import
os
import
time
from
typing
import
Any
,
Mapping
,
Optional
from
absl
import
logging
import
tensorflow
as
tf
from
official.common
import
distribute_utils
from
official.core
import
config_definitions
from
official.core
import
task_factory
from
official.core
import
train_lib
from
official.core
import
train_utils
from
official.modeling
import
performance
from
official.modeling.multitask
import
configs
from
official.modeling.multitask
import
multitask
from
official.modeling.multitask
import
train_lib
as
multitask_train_lib
def
_flatten_dict
(
xs
):
"""Flatten a nested dictionary.
The nested keys are flattened to a tuple.
Example::
xs = {'foo': 1, 'bar': {'a': 2, 'b': {}}}
flat_xs = flatten_dict(xs)
print(flat_xs)
# {
# ('foo',): 1,
# ('bar', 'a'): 2,
# }
Note that empty dictionaries are ignored and
will not be restored by `unflatten_dict`.
Args:
xs: a nested dictionary
Returns:
The flattened dictionary.
"""
assert
isinstance
(
xs
,
dict
),
'input is not a dict'
def
_flatten
(
xs
,
prefix
):
if
not
isinstance
(
xs
,
dict
):
return
{
prefix
:
xs
}
result
=
{}
for
key
,
value
in
xs
.
items
():
path
=
prefix
+
(
key
,)
result
.
update
(
_flatten
(
value
,
path
))
return
result
return
_flatten
(
xs
,
())
def
run_continuous_finetune
(
mode
:
str
,
params
:
config_definitions
.
ExperimentConfig
,
model_dir
:
str
,
run_post_eval
:
bool
=
False
,
pretrain_steps
:
Optional
[
int
]
=
None
,
)
->
Mapping
[
str
,
Any
]:
"""Run modes with continuous training.
Currently only supports continuous_train_and_eval.
Args:
mode: A 'str', specifying the mode. continuous_train_and_eval - monitors a
checkpoint directory. Once a new checkpoint is discovered, loads the
checkpoint, finetune the model by training it (probably on another dataset
or with another task), then evaluate the finetuned model.
params: ExperimentConfig instance.
model_dir: A 'str', a path to store model checkpoints and summaries.
run_post_eval: Whether to run post eval once after training, metrics logs
are returned.
pretrain_steps: Optional, the number of total training steps for the
pretraining job.
Returns:
eval logs: returns eval metrics logs when run_post_eval is set to True,
othewise, returns {}.
"""
assert
mode
==
'continuous_train_and_eval'
,
(
'Only continuous_train_and_eval is supported by continuous_finetune. '
'Got mode: {}'
.
format
(
mode
))
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if
params
.
runtime
.
mixed_precision_dtype
:
performance
.
set_mixed_precision_policy
(
params
.
runtime
.
mixed_precision_dtype
,
params
.
runtime
.
loss_scale
)
distribution_strategy
=
distribute_utils
.
get_distribution_strategy
(
distribution_strategy
=
params
.
runtime
.
distribution_strategy
,
all_reduce_alg
=
params
.
runtime
.
all_reduce_alg
,
num_gpus
=
params
.
runtime
.
num_gpus
,
tpu_address
=
params
.
runtime
.
tpu
)
retry_times
=
0
while
not
tf
.
io
.
gfile
.
isdir
(
params
.
task
.
init_checkpoint
):
# Wait for the init_checkpoint directory to be created.
if
retry_times
>=
60
:
raise
ValueError
(
'ExperimentConfig.task.init_checkpoint must be a directory for '
'continuous_train_and_eval mode.'
)
retry_times
+=
1
time
.
sleep
(
60
)
summary_writer
=
tf
.
summary
.
create_file_writer
(
os
.
path
.
join
(
model_dir
,
'eval'
))
global_step
=
0
def
timeout_fn
():
if
pretrain_steps
and
global_step
<
pretrain_steps
:
# Keeps waiting for another timeout period.
logging
.
info
(
'Continue waiting for new checkpoint as current pretrain '
'global_step=%d and target is %d.'
,
global_step
,
pretrain_steps
)
return
False
# Quits the loop.
return
True
for
pretrain_ckpt
in
tf
.
train
.
checkpoints_iterator
(
checkpoint_dir
=
params
.
task
.
init_checkpoint
,
min_interval_secs
=
10
,
timeout
=
params
.
trainer
.
continuous_eval_timeout
,
timeout_fn
=
timeout_fn
):
with
distribution_strategy
.
scope
():
global_step
=
train_utils
.
read_global_step_from_checkpoint
(
pretrain_ckpt
)
# Replaces params.task.init_checkpoint to make sure that we load
# exactly this pretrain checkpoint.
if
params
.
trainer
.
best_checkpoint_export_subdir
:
best_ckpt_subdir
=
'{}_{}'
.
format
(
params
.
trainer
.
best_checkpoint_export_subdir
,
global_step
)
params_replaced
=
params
.
replace
(
task
=
{
'init_checkpoint'
:
pretrain_ckpt
},
trainer
=
{
'best_checkpoint_export_subdir'
:
best_ckpt_subdir
})
else
:
params_replaced
=
params
.
replace
(
task
=
{
'init_checkpoint'
:
pretrain_ckpt
})
params_replaced
.
lock
()
logging
.
info
(
'Running finetuning with params: %s'
,
params_replaced
)
with
distribution_strategy
.
scope
():
if
isinstance
(
params
,
configs
.
MultiEvalExperimentConfig
):
task
=
task_factory
.
get_task
(
params_replaced
.
task
)
eval_tasks
=
multitask
.
MultiTask
.
from_config
(
params_replaced
.
eval_tasks
)
(
_
,
eval_metrics
)
=
multitask_train_lib
.
run_experiment_wtih_multitask_eval
(
distribution_strategy
=
distribution_strategy
,
train_task
=
task
,
eval_tasks
=
eval_tasks
,
mode
=
'train_and_eval'
,
params
=
params_replaced
,
model_dir
=
model_dir
,
run_post_eval
=
True
,
save_summary
=
False
)
else
:
task
=
task_factory
.
get_task
(
params_replaced
.
task
,
logging_dir
=
model_dir
)
_
,
eval_metrics
=
train_lib
.
run_experiment
(
distribution_strategy
=
distribution_strategy
,
task
=
task
,
mode
=
'train_and_eval'
,
params
=
params_replaced
,
model_dir
=
model_dir
,
run_post_eval
=
True
,
save_summary
=
False
)
logging
.
info
(
'Evaluation finished. Pretrain global_step: %d'
,
global_step
)
train_utils
.
write_json_summary
(
model_dir
,
global_step
,
eval_metrics
)
if
not
os
.
path
.
basename
(
model_dir
):
# if model_dir.endswith('/')
summary_grp
=
os
.
path
.
dirname
(
model_dir
)
+
'_'
+
task
.
name
else
:
summary_grp
=
os
.
path
.
basename
(
model_dir
)
+
'_'
+
task
.
name
summaries
=
{}
for
name
,
value
in
_flatten_dict
(
eval_metrics
).
items
():
summaries
[
summary_grp
+
'/'
+
'-'
.
join
(
name
)]
=
value
train_utils
.
write_summary
(
summary_writer
,
global_step
,
summaries
)
train_utils
.
remove_ckpts
(
model_dir
)
# In TF2, the resource life cycle is bound with the python object life
# cycle. Force trigger python garbage collection here so those resources
# can be deallocated in time, so it doesn't cause OOM when allocating new
# objects.
# TODO(b/169178664): Fix cycle reference in Keras model and revisit to see
# if we need gc here.
gc
.
collect
()
if
run_post_eval
:
return
eval_metrics
return
{}
official/nlp/
train_ctl_
continuous_finetune_test.py
→
official/nlp/continuous_finetune_
lib_
test.py
View file @
3219a8da
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may not use this file except in compliance with the License.
...
@@ -19,11 +18,15 @@ from absl import flags
...
@@ -19,11 +18,15 @@ from absl import flags
from
absl.testing
import
flagsaver
from
absl.testing
import
flagsaver
from
absl.testing
import
parameterized
from
absl.testing
import
parameterized
import
tensorflow
as
tf
import
tensorflow
as
tf
# pylint: disable=unused-import
from
official.common
import
registry_imports
# pylint: enable=unused-import
from
official.common
import
flags
as
tfm_flags
from
official.common
import
flags
as
tfm_flags
from
official.core
import
task_factory
from
official.core
import
task_factory
from
official.core
import
train_lib
from
official.core
import
train_lib
from
official.core
import
train_utils
from
official.core
import
train_utils
from
official.nlp
import
train_ctl_
continuous_finetune
from
official.nlp
import
continuous_finetune
_lib
FLAGS
=
flags
.
FLAGS
FLAGS
=
flags
.
FLAGS
...
@@ -36,8 +39,8 @@ class ContinuousFinetuneTest(tf.test.TestCase, parameterized.TestCase):
...
@@ -36,8 +39,8 @@ class ContinuousFinetuneTest(tf.test.TestCase, parameterized.TestCase):
super
().
setUp
()
super
().
setUp
()
self
.
_model_dir
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'model_dir'
)
self
.
_model_dir
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
'model_dir'
)
@
parameterized
.
parameters
(
None
,
1
)
def
testContinuousFinetune
(
self
):
def
testTrainCtl
(
self
,
pretrain_steps
):
pretrain_steps
=
1
src_model_dir
=
self
.
get_temp_dir
()
src_model_dir
=
self
.
get_temp_dir
()
flags_dict
=
dict
(
flags_dict
=
dict
(
experiment
=
'mock'
,
experiment
=
'mock'
,
...
@@ -79,7 +82,7 @@ class ContinuousFinetuneTest(tf.test.TestCase, parameterized.TestCase):
...
@@ -79,7 +82,7 @@ class ContinuousFinetuneTest(tf.test.TestCase, parameterized.TestCase):
model_dir
=
src_model_dir
)
model_dir
=
src_model_dir
)
params
=
train_utils
.
parse_configuration
(
FLAGS
)
params
=
train_utils
.
parse_configuration
(
FLAGS
)
eval_metrics
=
train_ctl_
continuous_finetune
.
run_continuous_finetune
(
eval_metrics
=
continuous_finetune
_lib
.
run_continuous_finetune
(
FLAGS
.
mode
,
FLAGS
.
mode
,
params
,
params
,
FLAGS
.
model_dir
,
FLAGS
.
model_dir
,
...
...
official/nlp/train_ctl_continuous_finetune.py
View file @
3219a8da
...
@@ -14,27 +14,16 @@
...
@@ -14,27 +14,16 @@
# limitations under the License.
# limitations under the License.
# ==============================================================================
# ==============================================================================
"""TFM continuous finetuning+eval training driver."""
"""TFM continuous finetuning+eval training driver."""
import
gc
import
os
import
time
from
typing
import
Any
,
Mapping
,
Optional
from
absl
import
app
from
absl
import
app
from
absl
import
flags
from
absl
import
flags
from
absl
import
logging
import
gin
import
gin
import
tensorflow
as
tf
# pylint: disable=unused-import
# pylint: disable=unused-import
from
official.common
import
registry_imports
from
official.common
import
registry_imports
# pylint: enable=unused-import
# pylint: enable=unused-import
from
official.common
import
distribute_utils
from
official.common
import
flags
as
tfm_flags
from
official.common
import
flags
as
tfm_flags
from
official.core
import
config_definitions
from
official.core
import
task_factory
from
official.core
import
train_lib
from
official.core
import
train_utils
from
official.core
import
train_utils
from
official.
modeling
import
performance
from
official.
nlp
import
continuous_finetune_lib
FLAGS
=
flags
.
FLAGS
FLAGS
=
flags
.
FLAGS
...
@@ -44,140 +33,14 @@ flags.DEFINE_integer(
...
@@ -44,140 +33,14 @@ flags.DEFINE_integer(
help
=
'The number of total training steps for the pretraining job.'
)
help
=
'The number of total training steps for the pretraining job.'
)
def
run_continuous_finetune
(
mode
:
str
,
params
:
config_definitions
.
ExperimentConfig
,
model_dir
:
str
,
run_post_eval
:
bool
=
False
,
pretrain_steps
:
Optional
[
int
]
=
None
,
)
->
Mapping
[
str
,
Any
]:
"""Run modes with continuous training.
Currently only supports continuous_train_and_eval.
Args:
mode: A 'str', specifying the mode. continuous_train_and_eval - monitors a
checkpoint directory. Once a new checkpoint is discovered, loads the
checkpoint, finetune the model by training it (probably on another dataset
or with another task), then evaluate the finetuned model.
params: ExperimentConfig instance.
model_dir: A 'str', a path to store model checkpoints and summaries.
run_post_eval: Whether to run post eval once after training, metrics logs
are returned.
pretrain_steps: Optional, the number of total training steps for the
pretraining job.
Returns:
eval logs: returns eval metrics logs when run_post_eval is set to True,
othewise, returns {}.
"""
assert
mode
==
'continuous_train_and_eval'
,
(
'Only continuous_train_and_eval is supported by continuous_finetune. '
'Got mode: {}'
.
format
(
mode
))
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if
params
.
runtime
.
mixed_precision_dtype
:
performance
.
set_mixed_precision_policy
(
params
.
runtime
.
mixed_precision_dtype
,
params
.
runtime
.
loss_scale
)
distribution_strategy
=
distribute_utils
.
get_distribution_strategy
(
distribution_strategy
=
params
.
runtime
.
distribution_strategy
,
all_reduce_alg
=
params
.
runtime
.
all_reduce_alg
,
num_gpus
=
params
.
runtime
.
num_gpus
,
tpu_address
=
params
.
runtime
.
tpu
)
retry_times
=
0
while
not
tf
.
io
.
gfile
.
isdir
(
params
.
task
.
init_checkpoint
):
# Wait for the init_checkpoint directory to be created.
if
retry_times
>=
60
:
raise
ValueError
(
'ExperimentConfig.task.init_checkpoint must be a directory for '
'continuous_train_and_eval mode.'
)
retry_times
+=
1
time
.
sleep
(
60
)
summary_writer
=
tf
.
summary
.
create_file_writer
(
os
.
path
.
join
(
model_dir
,
'eval'
))
global_step
=
0
def
timeout_fn
():
if
pretrain_steps
and
global_step
<
pretrain_steps
:
# Keeps waiting for another timeout period.
logging
.
info
(
'Continue waiting for new checkpoint as current pretrain '
'global_step=%d and target is %d.'
,
global_step
,
pretrain_steps
)
return
False
# Quits the loop.
return
True
for
pretrain_ckpt
in
tf
.
train
.
checkpoints_iterator
(
checkpoint_dir
=
params
.
task
.
init_checkpoint
,
min_interval_secs
=
10
,
timeout
=
params
.
trainer
.
continuous_eval_timeout
,
timeout_fn
=
timeout_fn
):
with
distribution_strategy
.
scope
():
global_step
=
train_utils
.
read_global_step_from_checkpoint
(
pretrain_ckpt
)
if
params
.
trainer
.
best_checkpoint_export_subdir
:
best_ckpt_subdir
=
'{}_{}'
.
format
(
params
.
trainer
.
best_checkpoint_export_subdir
,
global_step
)
params_replaced
=
params
.
replace
(
task
=
{
'init_checkpoint'
:
pretrain_ckpt
},
trainer
=
{
'best_checkpoint_export_subdir'
:
best_ckpt_subdir
})
else
:
params_replaced
=
params
.
replace
(
task
=
{
'init_checkpoint'
:
pretrain_ckpt
})
params_replaced
.
lock
()
logging
.
info
(
'Running finetuning with params: %s'
,
params_replaced
)
with
distribution_strategy
.
scope
():
task
=
task_factory
.
get_task
(
params_replaced
.
task
,
logging_dir
=
model_dir
)
_
,
eval_metrics
=
train_lib
.
run_experiment
(
distribution_strategy
=
distribution_strategy
,
task
=
task
,
mode
=
'train_and_eval'
,
# replace params.task.init_checkpoint to make sure that we load
# exactly this pretrain checkpoint.
params
=
params_replaced
,
model_dir
=
model_dir
,
run_post_eval
=
True
,
save_summary
=
False
)
logging
.
info
(
'Evaluation finished. Pretrain global_step: %d'
,
global_step
)
train_utils
.
write_json_summary
(
model_dir
,
global_step
,
eval_metrics
)
if
not
os
.
path
.
basename
(
model_dir
):
# if model_dir.endswith('/')
summary_grp
=
os
.
path
.
dirname
(
model_dir
)
+
'_'
+
task
.
name
else
:
summary_grp
=
os
.
path
.
basename
(
model_dir
)
+
'_'
+
task
.
name
summaries
=
{}
for
name
,
value
in
eval_metrics
.
items
():
summaries
[
summary_grp
+
'/'
+
name
]
=
value
train_utils
.
write_summary
(
summary_writer
,
global_step
,
summaries
)
train_utils
.
remove_ckpts
(
model_dir
)
# In TF2, the resource life cycle is bound with the python object life
# cycle. Force trigger python garbage collection here so those resources
# can be deallocated in time, so it doesn't cause OOM when allocating new
# objects.
# TODO(b/169178664): Fix cycle reference in Keras model and revisit to see
# if we need gc here.
gc
.
collect
()
if
run_post_eval
:
return
eval_metrics
return
{}
def
main
(
_
):
def
main
(
_
):
# TODO(b/177863554): consolidate to nlp/train.py
gin
.
parse_config_files_and_bindings
(
FLAGS
.
gin_file
,
FLAGS
.
gin_params
)
gin
.
parse_config_files_and_bindings
(
FLAGS
.
gin_file
,
FLAGS
.
gin_params
)
params
=
train_utils
.
parse_configuration
(
FLAGS
)
params
=
train_utils
.
parse_configuration
(
FLAGS
)
model_dir
=
FLAGS
.
model_dir
model_dir
=
FLAGS
.
model_dir
train_utils
.
serialize_config
(
params
,
model_dir
)
train_utils
.
serialize_config
(
params
,
model_dir
)
run_continuous_finetune
(
FLAGS
.
mode
,
params
,
model_dir
,
FLAGS
.
pretrain_steps
)
continuous_finetune_lib
.
run_continuous_finetune
(
FLAGS
.
mode
,
params
,
model_dir
,
FLAGS
.
pretrain_steps
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment