Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
b363df84
Commit
b363df84
authored
May 27, 2021
by
Rebecca Chen
Committed by
A. Unique TensorFlower
May 27, 2021
Browse files
Internal change
PiperOrigin-RevId: 376298243
parent
1a21d1d3
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
58 additions
and
52 deletions
+58
-52
official/core/base_task.py
official/core/base_task.py
+4
-1
official/modeling/optimization/ema_optimizer.py
official/modeling/optimization/ema_optimizer.py
+2
-2
official/modeling/optimization/optimizer_factory.py
official/modeling/optimization/optimizer_factory.py
+3
-3
official/nlp/modeling/models/xlnet.py
official/nlp/modeling/models/xlnet.py
+2
-2
official/vision/beta/modeling/heads/dense_prediction_heads.py
...cial/vision/beta/modeling/heads/dense_prediction_heads.py
+1
-1
official/vision/beta/modeling/layers/detection_generator.py
official/vision/beta/modeling/layers/detection_generator.py
+1
-1
official/vision/beta/projects/assemblenet/modeling/assemblenet.py
.../vision/beta/projects/assemblenet/modeling/assemblenet.py
+8
-6
official/vision/beta/projects/movinet/modeling/movinet_model.py
...al/vision/beta/projects/movinet/modeling/movinet_model.py
+13
-12
official/vision/detection/executor/distributed_executor.py
official/vision/detection/executor/distributed_executor.py
+17
-17
official/vision/image_classification/efficientnet/efficientnet_model.py
...n/image_classification/efficientnet/efficientnet_model.py
+7
-7
No files found.
official/core/base_task.py
View file @
b363df84
...
@@ -38,7 +38,10 @@ class Task(tf.Module, metaclass=abc.ABCMeta):
...
@@ -38,7 +38,10 @@ class Task(tf.Module, metaclass=abc.ABCMeta):
# Special keys in train/validate step returned logs.
# Special keys in train/validate step returned logs.
loss
=
"loss"
loss
=
"loss"
def
__init__
(
self
,
params
,
logging_dir
:
str
=
None
,
name
:
str
=
None
):
def
__init__
(
self
,
params
,
logging_dir
:
Optional
[
str
]
=
None
,
name
:
Optional
[
str
]
=
None
):
"""Task initialization.
"""Task initialization.
Args:
Args:
...
...
official/modeling/optimization/ema_optimizer.py
View file @
b363df84
...
@@ -14,7 +14,7 @@
...
@@ -14,7 +14,7 @@
"""Exponential moving average optimizer."""
"""Exponential moving average optimizer."""
from
typing
import
Text
,
Lis
t
from
typing
import
List
,
Optional
,
Tex
t
import
tensorflow
as
tf
import
tensorflow
as
tf
...
@@ -106,7 +106,7 @@ class ExponentialMovingAverage(tf.keras.optimizers.Optimizer):
...
@@ -106,7 +106,7 @@ class ExponentialMovingAverage(tf.keras.optimizers.Optimizer):
def
_create_slots
(
self
,
var_list
):
def
_create_slots
(
self
,
var_list
):
self
.
_optimizer
.
_create_slots
(
var_list
=
var_list
)
# pylint: disable=protected-access
self
.
_optimizer
.
_create_slots
(
var_list
=
var_list
)
# pylint: disable=protected-access
def
apply_gradients
(
self
,
grads_and_vars
,
name
:
Text
=
None
):
def
apply_gradients
(
self
,
grads_and_vars
,
name
:
Optional
[
Text
]
=
None
):
result
=
self
.
_optimizer
.
apply_gradients
(
grads_and_vars
,
name
)
result
=
self
.
_optimizer
.
apply_gradients
(
grads_and_vars
,
name
)
self
.
update_average
(
self
.
iterations
)
self
.
update_average
(
self
.
iterations
)
return
result
return
result
...
...
official/modeling/optimization/optimizer_factory.py
View file @
b363df84
...
@@ -13,7 +13,7 @@
...
@@ -13,7 +13,7 @@
# limitations under the License.
# limitations under the License.
"""Optimizer factory class."""
"""Optimizer factory class."""
from
typing
import
Callable
,
Union
from
typing
import
Callable
,
Optional
,
Union
import
gin
import
gin
import
tensorflow
as
tf
import
tensorflow
as
tf
...
@@ -134,8 +134,8 @@ class OptimizerFactory:
...
@@ -134,8 +134,8 @@ class OptimizerFactory:
def
build_optimizer
(
def
build_optimizer
(
self
,
self
,
lr
:
Union
[
tf
.
keras
.
optimizers
.
schedules
.
LearningRateSchedule
,
float
],
lr
:
Union
[
tf
.
keras
.
optimizers
.
schedules
.
LearningRateSchedule
,
float
],
postprocessor
:
Callable
[[
tf
.
keras
.
optimizers
.
Optimizer
],
postprocessor
:
Optional
[
Callable
[[
tf
.
keras
.
optimizers
.
Optimizer
],
tf
.
keras
.
optimizers
.
Optimizer
]
=
None
):
tf
.
keras
.
optimizers
.
Optimizer
]
]
=
None
):
"""Build optimizer.
"""Build optimizer.
Builds optimizer from config. It takes learning rate as input, and builds
Builds optimizer from config. It takes learning rate as input, and builds
...
...
official/nlp/modeling/models/xlnet.py
View file @
b363df84
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
"""XLNet models."""
"""XLNet models."""
# pylint: disable=g-classes-have-attributes
# pylint: disable=g-classes-have-attributes
from
typing
import
Any
,
Mapping
,
Union
from
typing
import
Any
,
Mapping
,
Optional
,
Union
import
tensorflow
as
tf
import
tensorflow
as
tf
...
@@ -99,7 +99,7 @@ class XLNetPretrainer(tf.keras.Model):
...
@@ -99,7 +99,7 @@ class XLNetPretrainer(tf.keras.Model):
network
:
Union
[
tf
.
keras
.
layers
.
Layer
,
tf
.
keras
.
Model
],
network
:
Union
[
tf
.
keras
.
layers
.
Layer
,
tf
.
keras
.
Model
],
mlm_activation
=
None
,
mlm_activation
=
None
,
mlm_initializer
=
'glorot_uniform'
,
mlm_initializer
=
'glorot_uniform'
,
name
:
str
=
None
,
name
:
Optional
[
str
]
=
None
,
**
kwargs
):
**
kwargs
):
super
().
__init__
(
name
=
name
,
**
kwargs
)
super
().
__init__
(
name
=
name
,
**
kwargs
)
self
.
_config
=
{
self
.
_config
=
{
...
...
official/vision/beta/modeling/heads/dense_prediction_heads.py
View file @
b363df84
...
@@ -36,7 +36,7 @@ class RetinaNetHead(tf.keras.layers.Layer):
...
@@ -36,7 +36,7 @@ class RetinaNetHead(tf.keras.layers.Layer):
num_anchors_per_location
:
int
,
num_anchors_per_location
:
int
,
num_convs
:
int
=
4
,
num_convs
:
int
=
4
,
num_filters
:
int
=
256
,
num_filters
:
int
=
256
,
attribute_heads
:
List
[
Dict
[
str
,
Any
]]
=
None
,
attribute_heads
:
Optional
[
List
[
Dict
[
str
,
Any
]]
]
=
None
,
use_separable_conv
:
bool
=
False
,
use_separable_conv
:
bool
=
False
,
activation
:
str
=
'relu'
,
activation
:
str
=
'relu'
,
use_sync_bn
:
bool
=
False
,
use_sync_bn
:
bool
=
False
,
...
...
official/vision/beta/modeling/layers/detection_generator.py
View file @
b363df84
...
@@ -593,7 +593,7 @@ class MultilevelDetectionGenerator(tf.keras.layers.Layer):
...
@@ -593,7 +593,7 @@ class MultilevelDetectionGenerator(tf.keras.layers.Layer):
raw_scores
:
Mapping
[
str
,
tf
.
Tensor
],
raw_scores
:
Mapping
[
str
,
tf
.
Tensor
],
anchor_boxes
:
tf
.
Tensor
,
anchor_boxes
:
tf
.
Tensor
,
image_shape
:
tf
.
Tensor
,
image_shape
:
tf
.
Tensor
,
raw_attributes
:
Mapping
[
str
,
tf
.
Tensor
]
=
None
):
raw_attributes
:
Optional
[
Mapping
[
str
,
tf
.
Tensor
]
]
=
None
):
"""Generates final detections.
"""Generates final detections.
Args:
Args:
...
...
official/vision/beta/projects/assemblenet/modeling/assemblenet.py
View file @
b363df84
...
@@ -411,7 +411,7 @@ class _ApplyEdgeWeight(layers.Layer):
...
@@ -411,7 +411,7 @@ class _ApplyEdgeWeight(layers.Layer):
def
__init__
(
self
,
def
__init__
(
self
,
weights_shape
,
weights_shape
,
index
:
int
=
None
,
index
:
Optional
[
int
]
=
None
,
use_5d_mode
:
bool
=
False
,
use_5d_mode
:
bool
=
False
,
model_edge_weights
:
Optional
[
List
[
Any
]]
=
None
,
model_edge_weights
:
Optional
[
List
[
Any
]]
=
None
,
**
kwargs
):
**
kwargs
):
...
@@ -471,7 +471,7 @@ class _ApplyEdgeWeight(layers.Layer):
...
@@ -471,7 +471,7 @@ class _ApplyEdgeWeight(layers.Layer):
def
call
(
self
,
def
call
(
self
,
inputs
:
List
[
tf
.
Tensor
],
inputs
:
List
[
tf
.
Tensor
],
training
:
bool
=
None
)
->
Mapping
[
Any
,
List
[
tf
.
Tensor
]]:
training
:
Optional
[
bool
]
=
None
)
->
Mapping
[
Any
,
List
[
tf
.
Tensor
]]:
use_5d_mode
=
self
.
_use_5d_mode
use_5d_mode
=
self
.
_use_5d_mode
dtype
=
inputs
[
0
].
dtype
dtype
=
inputs
[
0
].
dtype
assert
len
(
inputs
)
>
1
assert
len
(
inputs
)
>
1
...
@@ -517,7 +517,7 @@ class _ApplyEdgeWeight(layers.Layer):
...
@@ -517,7 +517,7 @@ class _ApplyEdgeWeight(layers.Layer):
def
multi_connection_fusion
(
inputs
:
List
[
tf
.
Tensor
],
def
multi_connection_fusion
(
inputs
:
List
[
tf
.
Tensor
],
index
:
int
=
None
,
index
:
Optional
[
int
]
=
None
,
use_5d_mode
:
bool
=
False
,
use_5d_mode
:
bool
=
False
,
model_edge_weights
:
Optional
[
List
[
Any
]]
=
None
):
model_edge_weights
:
Optional
[
List
[
Any
]]
=
None
):
"""Do weighted summation of multiple different sized tensors.
"""Do weighted summation of multiple different sized tensors.
...
@@ -893,7 +893,8 @@ class AssembleNetModel(tf.keras.Model):
...
@@ -893,7 +893,8 @@ class AssembleNetModel(tf.keras.Model):
num_classes
,
num_classes
,
num_frames
:
int
,
num_frames
:
int
,
model_structure
:
List
[
Any
],
model_structure
:
List
[
Any
],
input_specs
:
Mapping
[
str
,
tf
.
keras
.
layers
.
InputSpec
]
=
None
,
input_specs
:
Optional
[
Mapping
[
str
,
tf
.
keras
.
layers
.
InputSpec
]]
=
None
,
max_pool_preditions
:
bool
=
False
,
max_pool_preditions
:
bool
=
False
,
**
kwargs
):
**
kwargs
):
if
not
input_specs
:
if
not
input_specs
:
...
@@ -1018,7 +1019,8 @@ def build_assemblenet_v1(
...
@@ -1018,7 +1019,8 @@ def build_assemblenet_v1(
input_specs
:
tf
.
keras
.
layers
.
InputSpec
,
input_specs
:
tf
.
keras
.
layers
.
InputSpec
,
backbone_config
:
hyperparams
.
Config
,
backbone_config
:
hyperparams
.
Config
,
norm_activation_config
:
hyperparams
.
Config
,
norm_activation_config
:
hyperparams
.
Config
,
l2_regularizer
:
tf
.
keras
.
regularizers
.
Regularizer
=
None
)
->
tf
.
keras
.
Model
:
l2_regularizer
:
Optional
[
tf
.
keras
.
regularizers
.
Regularizer
]
=
None
)
->
tf
.
keras
.
Model
:
"""Builds assemblenet backbone."""
"""Builds assemblenet backbone."""
del
l2_regularizer
del
l2_regularizer
...
@@ -1058,7 +1060,7 @@ def build_assemblenet_model(
...
@@ -1058,7 +1060,7 @@ def build_assemblenet_model(
input_specs
:
tf
.
keras
.
layers
.
InputSpec
,
input_specs
:
tf
.
keras
.
layers
.
InputSpec
,
model_config
:
cfg
.
AssembleNetModel
,
model_config
:
cfg
.
AssembleNetModel
,
num_classes
:
int
,
num_classes
:
int
,
l2_regularizer
:
tf
.
keras
.
regularizers
.
Regularizer
=
None
):
l2_regularizer
:
Optional
[
tf
.
keras
.
regularizers
.
Regularizer
]
=
None
):
"""Builds assemblenet model."""
"""Builds assemblenet model."""
input_specs_dict
=
{
'image'
:
input_specs
}
input_specs_dict
=
{
'image'
:
input_specs
}
backbone
=
build_assemblenet_v1
(
input_specs
,
model_config
.
backbone
,
backbone
=
build_assemblenet_v1
(
input_specs
,
model_config
.
backbone
,
...
...
official/vision/beta/projects/movinet/modeling/movinet_model.py
View file @
b363df84
...
@@ -16,7 +16,7 @@
...
@@ -16,7 +16,7 @@
Reference: https://arxiv.org/pdf/2103.11511.pdf
Reference: https://arxiv.org/pdf/2103.11511.pdf
"""
"""
from
typing
import
Mapping
from
typing
import
Mapping
,
Optional
from
absl
import
logging
from
absl
import
logging
import
tensorflow
as
tf
import
tensorflow
as
tf
...
@@ -31,16 +31,17 @@ from official.vision.beta.projects.movinet.modeling import movinet_layers
...
@@ -31,16 +31,17 @@ from official.vision.beta.projects.movinet.modeling import movinet_layers
class
MovinetClassifier
(
tf
.
keras
.
Model
):
class
MovinetClassifier
(
tf
.
keras
.
Model
):
"""A video classification class builder."""
"""A video classification class builder."""
def
__init__
(
self
,
def
__init__
(
backbone
:
tf
.
keras
.
Model
,
self
,
num_classes
:
int
,
backbone
:
tf
.
keras
.
Model
,
input_specs
:
Mapping
[
str
,
tf
.
keras
.
layers
.
InputSpec
]
=
None
,
num_classes
:
int
,
dropout_rate
:
float
=
0.0
,
input_specs
:
Optional
[
Mapping
[
str
,
tf
.
keras
.
layers
.
InputSpec
]]
=
None
,
kernel_initializer
:
str
=
'HeNormal'
,
dropout_rate
:
float
=
0.0
,
kernel_regularizer
:
tf
.
keras
.
regularizers
.
Regularizer
=
None
,
kernel_initializer
:
str
=
'HeNormal'
,
bias_regularizer
:
tf
.
keras
.
regularizers
.
Regularizer
=
None
,
kernel_regularizer
:
Optional
[
tf
.
keras
.
regularizers
.
Regularizer
]
=
None
,
output_states
:
bool
=
False
,
bias_regularizer
:
Optional
[
tf
.
keras
.
regularizers
.
Regularizer
]
=
None
,
**
kwargs
):
output_states
:
bool
=
False
,
**
kwargs
):
"""Movinet initialization function.
"""Movinet initialization function.
Args:
Args:
...
@@ -144,7 +145,7 @@ def build_movinet_model(
...
@@ -144,7 +145,7 @@ def build_movinet_model(
input_specs
:
tf
.
keras
.
layers
.
InputSpec
,
input_specs
:
tf
.
keras
.
layers
.
InputSpec
,
model_config
:
cfg
.
MovinetModel
,
model_config
:
cfg
.
MovinetModel
,
num_classes
:
int
,
num_classes
:
int
,
l2_regularizer
:
tf
.
keras
.
regularizers
.
Regularizer
=
None
):
l2_regularizer
:
Optional
[
tf
.
keras
.
regularizers
.
Regularizer
]
=
None
):
"""Builds movinet model."""
"""Builds movinet model."""
logging
.
info
(
'Building movinet model with num classes: %s'
,
num_classes
)
logging
.
info
(
'Building movinet model with num classes: %s'
,
num_classes
)
if
l2_regularizer
is
not
None
:
if
l2_regularizer
is
not
None
:
...
...
official/vision/detection/executor/distributed_executor.py
View file @
b363df84
...
@@ -322,21 +322,21 @@ class DistributedExecutor(object):
...
@@ -322,21 +322,21 @@ class DistributedExecutor(object):
return
test_step
return
test_step
def
train
(
self
,
def
train
(
train_input_fn
:
Callable
[[
params_dict
.
ParamsDict
],
tf
.
data
.
Dataset
]
,
self
,
eval
_input_fn
:
Callable
[[
params_dict
.
ParamsDict
],
train
_input_fn
:
Callable
[[
params_dict
.
ParamsDict
],
tf
.
data
.
Dataset
],
tf
.
data
.
Dataset
]
=
None
,
eval_input_fn
:
Optional
[
Callable
[[
params_dict
.
ParamsDict
]
,
model_dir
:
Text
=
None
,
tf
.
data
.
Dataset
]]
=
None
,
total_steps
:
int
=
1
,
model_dir
:
Optional
[
Text
]
=
None
,
iterations_per_loop
:
int
=
1
,
total_steps
:
int
=
1
,
train_metric_fn
:
Callable
[[],
Any
]
=
None
,
iterations_per_loop
:
int
=
1
,
eval
_metric_fn
:
Callable
[[],
Any
]
=
None
,
train
_metric_fn
:
Optional
[
Callable
[[],
Any
]
]
=
None
,
summary_writer_fn
:
Callable
[[
Text
,
Text
]
,
eval_metric_fn
:
Optional
[
Callable
[[],
Any
]]
=
None
,
SummaryWriter
]
=
SummaryWriter
,
summary_writer_fn
:
Callable
[[
Text
,
Text
],
SummaryWriter
]
=
SummaryWriter
,
init_checkpoint
:
Callable
[[
tf
.
keras
.
Model
],
Any
]
=
None
,
init_checkpoint
:
Optional
[
Callable
[[
tf
.
keras
.
Model
],
Any
]
]
=
None
,
custom_callbacks
:
List
[
tf
.
keras
.
callbacks
.
Callback
]
=
None
,
custom_callbacks
:
Optional
[
List
[
tf
.
keras
.
callbacks
.
Callback
]
]
=
None
,
continuous_eval
:
bool
=
False
,
continuous_eval
:
bool
=
False
,
save_config
:
bool
=
True
):
save_config
:
bool
=
True
):
"""Runs distributed training.
"""Runs distributed training.
Args:
Args:
...
@@ -590,7 +590,7 @@ class DistributedExecutor(object):
...
@@ -590,7 +590,7 @@ class DistributedExecutor(object):
eval_input_fn
:
Callable
[[
params_dict
.
ParamsDict
],
tf
.
data
.
Dataset
],
eval_input_fn
:
Callable
[[
params_dict
.
ParamsDict
],
tf
.
data
.
Dataset
],
eval_metric_fn
:
Callable
[[],
Any
],
eval_metric_fn
:
Callable
[[],
Any
],
total_steps
:
int
=
-
1
,
total_steps
:
int
=
-
1
,
eval_timeout
:
int
=
None
,
eval_timeout
:
Optional
[
int
]
=
None
,
min_eval_interval
:
int
=
180
,
min_eval_interval
:
int
=
180
,
summary_writer_fn
:
Callable
[[
Text
,
Text
],
SummaryWriter
]
=
SummaryWriter
):
summary_writer_fn
:
Callable
[[
Text
,
Text
],
SummaryWriter
]
=
SummaryWriter
):
"""Runs distributed evaluation on model folder.
"""Runs distributed evaluation on model folder.
...
@@ -646,7 +646,7 @@ class DistributedExecutor(object):
...
@@ -646,7 +646,7 @@ class DistributedExecutor(object):
eval_input_fn
:
Callable
[[
params_dict
.
ParamsDict
],
eval_input_fn
:
Callable
[[
params_dict
.
ParamsDict
],
tf
.
data
.
Dataset
],
tf
.
data
.
Dataset
],
eval_metric_fn
:
Callable
[[],
Any
],
eval_metric_fn
:
Callable
[[],
Any
],
summary_writer
:
SummaryWriter
=
None
):
summary_writer
:
Optional
[
SummaryWriter
]
=
None
):
"""Runs distributed evaluation on the one checkpoint.
"""Runs distributed evaluation on the one checkpoint.
Args:
Args:
...
...
official/vision/image_classification/efficientnet/efficientnet_model.py
View file @
b363df84
...
@@ -160,9 +160,9 @@ def conv2d_block(inputs: tf.Tensor,
...
@@ -160,9 +160,9 @@ def conv2d_block(inputs: tf.Tensor,
strides
:
Any
=
(
1
,
1
),
strides
:
Any
=
(
1
,
1
),
use_batch_norm
:
bool
=
True
,
use_batch_norm
:
bool
=
True
,
use_bias
:
bool
=
False
,
use_bias
:
bool
=
False
,
activation
:
Any
=
None
,
activation
:
Optional
[
Any
]
=
None
,
depthwise
:
bool
=
False
,
depthwise
:
bool
=
False
,
name
:
Text
=
None
):
name
:
Optional
[
Text
]
=
None
):
"""A conv2d followed by batch norm and an activation."""
"""A conv2d followed by batch norm and an activation."""
batch_norm
=
common_modules
.
get_batch_norm
(
config
.
batch_norm
)
batch_norm
=
common_modules
.
get_batch_norm
(
config
.
batch_norm
)
bn_momentum
=
config
.
bn_momentum
bn_momentum
=
config
.
bn_momentum
...
@@ -212,7 +212,7 @@ def conv2d_block(inputs: tf.Tensor,
...
@@ -212,7 +212,7 @@ def conv2d_block(inputs: tf.Tensor,
def
mb_conv_block
(
inputs
:
tf
.
Tensor
,
def
mb_conv_block
(
inputs
:
tf
.
Tensor
,
block
:
BlockConfig
,
block
:
BlockConfig
,
config
:
ModelConfig
,
config
:
ModelConfig
,
prefix
:
Text
=
None
):
prefix
:
Optional
[
Text
]
=
None
):
"""Mobile Inverted Residual Bottleneck.
"""Mobile Inverted Residual Bottleneck.
Args:
Args:
...
@@ -432,8 +432,8 @@ class EfficientNet(tf.keras.Model):
...
@@ -432,8 +432,8 @@ class EfficientNet(tf.keras.Model):
"""
"""
def
__init__
(
self
,
def
__init__
(
self
,
config
:
ModelConfig
=
None
,
config
:
Optional
[
ModelConfig
]
=
None
,
overrides
:
Dict
[
Text
,
Any
]
=
None
):
overrides
:
Optional
[
Dict
[
Text
,
Any
]
]
=
None
):
"""Create an EfficientNet model.
"""Create an EfficientNet model.
Args:
Args:
...
@@ -463,9 +463,9 @@ class EfficientNet(tf.keras.Model):
...
@@ -463,9 +463,9 @@ class EfficientNet(tf.keras.Model):
@
classmethod
@
classmethod
def
from_name
(
cls
,
def
from_name
(
cls
,
model_name
:
Text
,
model_name
:
Text
,
model_weights_path
:
Text
=
None
,
model_weights_path
:
Optional
[
Text
]
=
None
,
weights_format
:
Text
=
'saved_model'
,
weights_format
:
Text
=
'saved_model'
,
overrides
:
Dict
[
Text
,
Any
]
=
None
):
overrides
:
Optional
[
Dict
[
Text
,
Any
]
]
=
None
):
"""Construct an EfficientNet model from a predefined model name.
"""Construct an EfficientNet model from a predefined model name.
E.g., `EfficientNet.from_name('efficientnet-b0')`.
E.g., `EfficientNet.from_name('efficientnet-b0')`.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment