Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
nni
Commits
21725f96
Commit
21725f96
authored
Jun 20, 2019
by
suiguoxin
Browse files
gp_tuner init from fmfn's repo
parent
db208207
Changes
7
Show whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
556 additions
and
2 deletions
+556
-2
src/nni_manager/rest_server/restValidationSchemas.ts
src/nni_manager/rest_server/restValidationSchemas.ts
+1
-1
src/sdk/pynni/nni/constants.py
src/sdk/pynni/nni/constants.py
+3
-1
src/sdk/pynni/nni/gp_tuner/__init__.py
src/sdk/pynni/nni/gp_tuner/__init__.py
+0
-0
src/sdk/pynni/nni/gp_tuner/gp_tuner.py
src/sdk/pynni/nni/gp_tuner/gp_tuner.py
+155
-0
src/sdk/pynni/nni/gp_tuner/target_space.py
src/sdk/pynni/nni/gp_tuner/target_space.py
+225
-0
src/sdk/pynni/nni/gp_tuner/util.py
src/sdk/pynni/nni/gp_tuner/util.py
+163
-0
tools/nni_cmd/config_schema.py
tools/nni_cmd/config_schema.py
+9
-0
No files found.
src/nni_manager/rest_server/restValidationSchemas.ts
View file @
21725f96
...
@@ -161,7 +161,7 @@ export namespace ValidationSchemas {
...
@@ -161,7 +161,7 @@ export namespace ValidationSchemas {
checkpointDir
:
joi
.
string
().
allow
(
''
)
checkpointDir
:
joi
.
string
().
allow
(
''
)
}),
}),
tuner
:
joi
.
object
({
tuner
:
joi
.
object
({
builtinTunerName
:
joi
.
string
().
valid
(
'
TPE
'
,
'
Random
'
,
'
Anneal
'
,
'
Evolution
'
,
'
SMAC
'
,
'
BatchTuner
'
,
'
GridSearch
'
,
'
NetworkMorphism
'
,
'
MetisTuner
'
),
builtinTunerName
:
joi
.
string
().
valid
(
'
TPE
'
,
'
Random
'
,
'
Anneal
'
,
'
Evolution
'
,
'
SMAC
'
,
'
BatchTuner
'
,
'
GridSearch
'
,
'
NetworkMorphism
'
,
'
MetisTuner
'
,
'
GPTuner
'
),
codeDir
:
joi
.
string
(),
codeDir
:
joi
.
string
(),
classFileName
:
joi
.
string
(),
classFileName
:
joi
.
string
(),
className
:
joi
.
string
(),
className
:
joi
.
string
(),
...
...
src/sdk/pynni/nni/constants.py
View file @
21725f96
...
@@ -29,7 +29,8 @@ ModuleName = {
...
@@ -29,7 +29,8 @@ ModuleName = {
'GridSearch'
:
'nni.gridsearch_tuner.gridsearch_tuner'
,
'GridSearch'
:
'nni.gridsearch_tuner.gridsearch_tuner'
,
'NetworkMorphism'
:
'nni.networkmorphism_tuner.networkmorphism_tuner'
,
'NetworkMorphism'
:
'nni.networkmorphism_tuner.networkmorphism_tuner'
,
'Curvefitting'
:
'nni.curvefitting_assessor.curvefitting_assessor'
,
'Curvefitting'
:
'nni.curvefitting_assessor.curvefitting_assessor'
,
'MetisTuner'
:
'nni.metis_tuner.metis_tuner'
'MetisTuner'
:
'nni.metis_tuner.metis_tuner'
,
'GPTuner'
:
'nni.gp_tuner.gp_tuner'
}
}
ClassName
=
{
ClassName
=
{
...
@@ -42,6 +43,7 @@ ClassName = {
...
@@ -42,6 +43,7 @@ ClassName = {
'GridSearch'
:
'GridSearchTuner'
,
'GridSearch'
:
'GridSearchTuner'
,
'NetworkMorphism'
:
'NetworkMorphismTuner'
,
'NetworkMorphism'
:
'NetworkMorphismTuner'
,
'MetisTuner'
:
'MetisTuner'
,
'MetisTuner'
:
'MetisTuner'
,
'GPTuner'
:
'GPTuner'
,
'Medianstop'
:
'MedianstopAssessor'
,
'Medianstop'
:
'MedianstopAssessor'
,
'Curvefitting'
:
'CurvefittingAssessor'
'Curvefitting'
:
'CurvefittingAssessor'
...
...
src/sdk/pynni/nni/gp_tuner/__init__.py
0 → 100644
View file @
21725f96
src/sdk/pynni/nni/gp_tuner/gp_tuner.py
0 → 100644
View file @
21725f96
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
gp_tuner.py
'''
import
warnings
import
logging
import
numpy
as
np
from
sklearn.gaussian_process.kernels
import
Matern
from
sklearn.gaussian_process
import
GaussianProcessRegressor
from
nni.tuner
import
Tuner
from
nni.utils
import
OptimizeMode
,
extract_scalar_reward
from
.target_space
import
TargetSpace
from
.util
import
UtilityFunction
,
acq_max
,
ensure_rng
logger
=
logging
.
getLogger
(
"GP_Tuner_AutoML"
)
class
GPTuner
(
Tuner
):
'''
GPTuner
'''
def
__init__
(
self
,
optimize_mode
=
"maximize"
,
cold_start_num
=
3
,
random_state
=
None
):
self
.
optimize_mode
=
optimize_mode
self
.
_random_state
=
ensure_rng
(
random_state
)
self
.
_space
=
None
self
.
_gp
=
GaussianProcessRegressor
(
kernel
=
Matern
(
nu
=
2.5
),
alpha
=
1e-6
,
normalize_y
=
True
,
n_restarts_optimizer
=
25
,
random_state
=
self
.
_random_state
)
self
.
cold_start_num
=
cold_start_num
self
.
supplement_data_num
=
0
def
update_search_space
(
self
,
search_space
):
"""Update the self.x_bounds and self.x_types by the search_space.json
Parameters
----------
search_space : dict
"""
self
.
_space
=
TargetSpace
(
search_space
,
self
.
_random_state
)
def
generate_parameters
(
self
,
parameter_id
):
"""Generate next parameter for trial
If the number of trial result is lower than cold start number,
metis will first random generate some parameters.
Otherwise, choose the parameters by the Gussian Process Model
Parameters
----------
parameter_id : int
Returns
-------
result : dict
"""
"""Most promissing point to probe next"""
if
len
(
self
.
_space
)
==
0
or
len
(
self
.
_space
.
_target
)
<
self
.
cold_start_num
:
return
self
.
_space
.
array_to_params
(
self
.
_space
.
random_sample
())
# Sklearn's GP throws a large number of warnings at times, but
# we don't really need to see them here.
with
warnings
.
catch_warnings
():
warnings
.
simplefilter
(
"ignore"
)
self
.
_gp
.
fit
(
self
.
_space
.
params
,
self
.
_space
.
target
)
util
=
UtilityFunction
(
kind
=
'ei'
,
kappa
=
0
,
xi
=
0
)
# Finding argmax of the acquisition function.
suggestion
=
acq_max
(
ac
=
util
.
utility
,
gp
=
self
.
_gp
,
y_max
=
self
.
_space
.
target
.
max
(),
bounds
=
self
.
_space
.
bounds
,
random_state
=
self
.
_random_state
,
space
=
self
.
_space
)
logger
.
info
(
"Generate paramageters(array):
\n
"
+
str
(
suggestion
))
print
(
"Generate paramageters(array):
\n
"
+
str
(
suggestion
))
results
=
self
.
_space
.
array_to_params
(
suggestion
)
logger
.
info
(
"Generate paramageters(json):
\n
"
+
str
(
results
))
print
(
"Generate paramageters(json):
\n
"
+
str
(
results
))
return
results
def
receive_trial_result
(
self
,
parameter_id
,
parameters
,
value
):
"""Tuner receive result from trial.
Parameters
----------
parameter_id : int
parameters : dict
value : dict/float
if value is dict, it should have "default" key.
"""
logger
.
info
(
"Received trial result."
)
logger
.
info
(
"value is :"
+
str
(
value
))
logger
.
info
(
"parameter is : "
+
str
(
parameters
))
self
.
_space
.
register
(
parameters
,
value
)
def
import_data
(
self
,
data
):
"""Import additional data for tuning
Parameters
----------
data:
a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'
"""
_completed_num
=
0
for
trial_info
in
data
:
logger
.
info
(
"Importing data, current processing progress %s / %s"
%
(
_completed_num
,
len
(
data
)))
_completed_num
+=
1
assert
"parameter"
in
trial_info
_params
=
trial_info
[
"parameter"
]
assert
"value"
in
trial_info
_value
=
trial_info
[
'value'
]
if
not
_value
:
logger
.
info
(
"Useless trial data, value is %s, skip this trial data."
%
_value
)
continue
self
.
supplement_data_num
+=
1
_parameter_id
=
'_'
.
join
(
[
"ImportData"
,
str
(
self
.
supplement_data_num
)])
self
.
receive_trial_result
(
parameter_id
=
_parameter_id
,
parameters
=
_params
,
value
=
_value
)
logger
.
info
(
"Successfully import data to GP tuner."
)
src/sdk/pynni/nni/gp_tuner/target_space.py
0 → 100644
View file @
21725f96
import
numpy
as
np
from
.util
import
ensure_rng
def
_hashable
(
x
):
""" ensure that an point is hashable by a python dict """
#return tuple(map(float, x))
return
x
class
TargetSpace
(
object
):
"""
Holds the param-space coordinates (X) and target values (Y)
Allows for constant-time appends while ensuring no duplicates are added
Example
-------
>>> def target_func(p1, p2):
>>> return p1 + p2
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(target_func, pbounds, random_state=0)
>>> x = space.random_points(1)[0]
>>> y = space.register_point(x)
>>> assert self.max_point()['max_val'] == y
"""
def
__init__
(
self
,
pbounds
,
random_state
=
None
):
"""
Parameters
----------
pbounds : dict
Dictionary with parameters names as keys and a tuple with minimum
and maximum values.
random_state : int, RandomState, or None
optionally specify a seed for a random number generator
"""
self
.
random_state
=
ensure_rng
(
random_state
)
# Get the name of the parameters
self
.
_keys
=
sorted
(
pbounds
)
# Create an array with parameters bounds
self
.
_bounds
=
np
.
array
(
[
item
[
1
]
for
item
in
sorted
(
pbounds
.
items
(),
key
=
lambda
x
:
x
[
0
])]
)
# preallocated memory for X and Y points
self
.
_params
=
np
.
empty
(
shape
=
(
0
,
self
.
dim
))
self
.
_target
=
np
.
empty
(
shape
=
(
0
))
# keep track of unique points we have seen so far
self
.
_cache
=
{}
def
__contains__
(
self
,
x
):
return
_hashable
(
x
)
in
self
.
_cache
def
__len__
(
self
):
assert
len
(
self
.
_params
)
==
len
(
self
.
_target
)
return
len
(
self
.
_target
)
@
property
def
empty
(
self
):
return
len
(
self
)
==
0
@
property
def
params
(
self
):
return
self
.
_params
@
property
def
target
(
self
):
return
self
.
_target
@
property
def
dim
(
self
):
return
len
(
self
.
_keys
)
@
property
def
keys
(
self
):
return
self
.
_keys
@
property
def
bounds
(
self
):
return
self
.
_bounds
def
params_to_array
(
self
,
params
):
try
:
assert
set
(
params
)
==
set
(
self
.
keys
)
except
AssertionError
:
raise
ValueError
(
"Parameters' keys ({}) do "
.
format
(
sorted
(
params
))
+
"not match the expected set of keys ({})."
.
format
(
self
.
keys
)
)
return
np
.
asarray
([
params
[
key
]
for
key
in
self
.
keys
])
def
array_to_params
(
self
,
x
):
try
:
assert
len
(
x
)
==
len
(
self
.
keys
)
except
AssertionError
:
raise
ValueError
(
"Size of array ({}) is different than the "
.
format
(
len
(
x
))
+
"expected number of parameters ({})."
.
format
(
len
(
self
.
keys
))
)
return
dict
(
zip
(
self
.
keys
,
x
))
def
_as_array
(
self
,
x
):
try
:
#x = np.asarray(x, dtype=float)
x
=
np
.
asarray
(
x
)
except
TypeError
:
x
=
self
.
params_to_array
(
x
)
x
=
x
.
ravel
()
try
:
assert
x
.
size
==
self
.
dim
except
AssertionError
:
raise
ValueError
(
"Size of array ({}) is different than the "
.
format
(
len
(
x
))
+
"expected number of parameters ({})."
.
format
(
len
(
self
.
keys
))
)
return
x
def
register
(
self
,
params
,
target
):
"""
Append a point and its target value to the known data.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
y : float
target function value
Raises
------
KeyError:
if the point is not unique
Notes
-----
runs in ammortized constant time
Example
-------
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(lambda p1, p2: p1 + p2, pbounds)
>>> len(space)
0
>>> x = np.array([0, 0])
>>> y = 1
>>> space.add_observation(x, y)
>>> len(space)
1
"""
x
=
self
.
_as_array
(
params
)
if
x
in
self
:
raise
KeyError
(
'Data point {} is not unique'
.
format
(
x
))
# Insert data into unique dictionary
self
.
_cache
[
_hashable
(
x
.
ravel
())]
=
target
self
.
_params
=
np
.
concatenate
([
self
.
_params
,
x
.
reshape
(
1
,
-
1
)])
self
.
_target
=
np
.
concatenate
([
self
.
_target
,
[
target
]])
def
random_sample
(
self
):
"""
Creates random points within the bounds of the space.
Returns
----------
data: ndarray
[num x dim] array points with dimensions corresponding to `self._keys`
Example
-------
>>> target_func = lambda p1, p2: p1 + p2
>>> pbounds = { "dropout_rate":{"_type":"uniform","_value":[0.5, 0.9]}, "conv_size":{"_type":"choice","_value":[2,3,5,7]}}
>>> space = TargetSpace( pbounds, random_state=0)
>>> space.random_points(1)
array([[ 55.33253689, 0.54488318]])
"""
# TODO: support randint, quniform
data
=
np
.
empty
((
1
,
self
.
dim
))
for
col
,
_bound
in
enumerate
(
self
.
_bounds
):
if
_bound
[
'_type'
]
==
'uniform'
:
data
.
T
[
col
]
=
self
.
random_state
.
uniform
(
_bound
[
'_value'
][
0
],
_bound
[
'_value'
][
1
],
size
=
1
)
elif
_bound
[
'_type'
]
==
'choice'
:
data
.
T
[
col
]
=
self
.
random_state
.
choice
(
_bound
[
'_value'
])
print
(
"rand sample:"
,
data
.
ravel
())
return
data
.
ravel
()
def
max
(
self
):
"""Get maximum target value found and corresponding parametes."""
try
:
res
=
{
'target'
:
self
.
target
.
max
(),
'params'
:
dict
(
zip
(
self
.
keys
,
self
.
params
[
self
.
target
.
argmax
()])
)
}
except
ValueError
:
res
=
{}
return
res
def
res
(
self
):
"""Get all target values found and corresponding parametes."""
params
=
[
dict
(
zip
(
self
.
keys
,
p
))
for
p
in
self
.
params
]
return
[
{
"target"
:
target
,
"params"
:
param
}
for
target
,
param
in
zip
(
self
.
target
,
params
)
]
def
set_bounds
(
self
,
new_bounds
):
"""
A method that allows changing the lower and upper searching bounds
Parameters
----------
new_bounds : dict
A dictionary with the parameter name and its new bounds
"""
for
row
,
key
in
enumerate
(
self
.
keys
):
if
key
in
new_bounds
:
self
.
_bounds
[
row
]
=
new_bounds
[
key
]
src/sdk/pynni/nni/gp_tuner/util.py
0 → 100644
View file @
21725f96
import
warnings
import
numpy
as
np
from
scipy.stats
import
norm
from
scipy.optimize
import
minimize
def
_match_val_type
(
vals
,
bounds
):
'''
Update values in the array, to match their corresponding type
'''
vals_new
=
[]
for
i
,
_
in
enumerate
(
bounds
):
_type
=
bounds
[
'_type'
]
if
_type
==
"choice"
:
# Find the closest integer in the array, vals_bounds
vals_new
.
append
(
min
(
bounds
[
i
][
'_value'
],
key
=
lambda
x
:
abs
(
x
-
vals
[
i
])))
else
:
vals_new
.
append
(
vals
[
i
])
return
vals_new
def
acq_max
(
ac
,
gp
,
y_max
,
bounds
,
random_state
,
space
,
n_warmup
=
100000
,
n_iter
=
250
):
"""
A function to find the maximum of the acquisition function
It uses a combination of random sampling (cheap) and the 'L-BFGS-B'
optimization method. First by sampling `n_warmup` (1e5) points at random,
and then running L-BFGS-B from `n_iter` (250) random starting points.
Parameters
----------
:param ac:
The acquisition function object that return its point-wise value.
:param gp:
A gaussian process fitted to the relevant data.
:param y_max:
The current maximum known value of the target function.
:param bounds:
The variables bounds to limit the search of the acq max.
:param random_state:
instance of np.RandomState random number generator
:param n_warmup:
number of times to randomly sample the aquisition function
:param n_iter:
number of times to run scipy.minimize
Returns
-------
:return: x_max, The arg max of the acquisition function.
"""
# Warm up with random points
x_tries
=
[
space
.
random_sample
()
for
_
in
range
(
n_warmup
)]
ys
=
ac
(
x_tries
,
gp
=
gp
,
y_max
=
y_max
)
x_max
=
x_tries
[
ys
.
argmax
()]
max_acq
=
ys
.
max
()
# Explore the parameter space more throughly
x_seeds
=
[
space
.
random_sample
()
for
_
in
range
(
n_iter
)]
bounds_minmax
=
np
.
array
(
[[
bound
[
'_value'
][
0
],
bound
[
'_value'
][
-
1
]]
for
bound
in
bounds
])
for
x_try
in
x_seeds
:
# Find the minimum of minus the acquisition function
res
=
minimize
(
lambda
x
:
-
ac
(
x
.
reshape
(
1
,
-
1
),
gp
=
gp
,
y_max
=
y_max
),
x_try
.
reshape
(
1
,
-
1
),
bounds
=
bounds_minmax
,
method
=
"L-BFGS-B"
)
# See if success
if
not
res
.
success
:
continue
# Store it if better than previous minimum(maximum).
if
max_acq
is
None
or
-
res
.
fun
[
0
]
>=
max_acq
:
x_max
=
_match_val_type
(
res
.
x
,
bounds
)
max_acq
=
-
res
.
fun
[
0
]
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
# return np.clip(x_max, bounds[:, 0], bounds[:, 1])
return
x_max
class
UtilityFunction
(
object
):
"""
An object to compute the acquisition functions.
"""
def
__init__
(
self
,
kind
,
kappa
,
xi
):
"""
If UCB is to be used, a constant kappa is needed.
"""
self
.
kappa
=
kappa
self
.
xi
=
xi
if
kind
not
in
[
'ucb'
,
'ei'
,
'poi'
]:
err
=
"The utility function "
\
"{} has not been implemented, "
\
"please choose one of ucb, ei, or poi."
.
format
(
kind
)
raise
NotImplementedError
(
err
)
else
:
self
.
kind
=
kind
def
utility
(
self
,
x
,
gp
,
y_max
):
if
self
.
kind
==
'ucb'
:
return
self
.
_ucb
(
x
,
gp
,
self
.
kappa
)
if
self
.
kind
==
'ei'
:
return
self
.
_ei
(
x
,
gp
,
y_max
,
self
.
xi
)
if
self
.
kind
==
'poi'
:
return
self
.
_poi
(
x
,
gp
,
y_max
,
self
.
xi
)
@
staticmethod
def
_ucb
(
x
,
gp
,
kappa
):
with
warnings
.
catch_warnings
():
warnings
.
simplefilter
(
"ignore"
)
mean
,
std
=
gp
.
predict
(
x
,
return_std
=
True
)
return
mean
+
kappa
*
std
@
staticmethod
def
_ei
(
x
,
gp
,
y_max
,
xi
):
with
warnings
.
catch_warnings
():
warnings
.
simplefilter
(
"ignore"
)
mean
,
std
=
gp
.
predict
(
x
,
return_std
=
True
)
z
=
(
mean
-
y_max
-
xi
)
/
std
return
(
mean
-
y_max
-
xi
)
*
norm
.
cdf
(
z
)
+
std
*
norm
.
pdf
(
z
)
@
staticmethod
def
_poi
(
x
,
gp
,
y_max
,
xi
):
with
warnings
.
catch_warnings
():
warnings
.
simplefilter
(
"ignore"
)
mean
,
std
=
gp
.
predict
(
x
,
return_std
=
True
)
z
=
(
mean
-
y_max
-
xi
)
/
std
return
norm
.
cdf
(
z
)
def
ensure_rng
(
random_state
=
None
):
"""
Creates a random number generator based on an optional seed. This can be
an integer or another random state for a seeded rng, or None for an
unseeded rng.
"""
if
random_state
is
None
:
random_state
=
np
.
random
.
RandomState
()
elif
isinstance
(
random_state
,
int
):
random_state
=
np
.
random
.
RandomState
(
random_state
)
else
:
assert
isinstance
(
random_state
,
np
.
random
.
RandomState
)
return
random_state
tools/nni_cmd/config_schema.py
View file @
21725f96
...
@@ -103,6 +103,15 @@ tuner_schema_dict = {
...
@@ -103,6 +103,15 @@ tuner_schema_dict = {
},
},
Optional
(
'gpuNum'
):
setNumberRange
(
'gpuNum'
,
int
,
0
,
99999
),
Optional
(
'gpuNum'
):
setNumberRange
(
'gpuNum'
,
int
,
0
,
99999
),
},
},
'GPTuner'
:
{
'builtinTunerName'
:
'GPTuner'
,
'classArgs'
:
{
Optional
(
'optimize_mode'
):
setChoice
(
'optimize_mode'
,
'maximize'
,
'minimize'
),
Optional
(
'selection_num_starting_points'
):
setType
(
'selection_num_starting_points'
,
int
),
Optional
(
'cold_start_num'
):
setType
(
'cold_start_num'
,
int
),
},
Optional
(
'gpuNum'
):
setNumberRange
(
'gpuNum'
,
int
,
0
,
99999
),
},
'customized'
:
{
'customized'
:
{
'codeDir'
:
setPathCheck
(
'codeDir'
),
'codeDir'
:
setPathCheck
(
'codeDir'
),
'classFileName'
:
setType
(
'classFileName'
,
str
),
'classFileName'
:
setType
(
'classFileName'
,
str
),
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment