"vscode:/vscode.git/clone" did not exist on "34a4a94f13d283ef794ba02f84ded96a794bb5d2"
Commit b309ea1b authored by chenzk's avatar chenzk
Browse files

v1.0

parents
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
phrases_dict = {
'㐖毒': [['xié'], ['dú']],
'若陀': [['rě'], ['tuó']],
'平藏': [['píng'], ['zàng']],
'派蒙': [['pài'], ['méng']],
'安柏': [['ān'], ['bó']],
'一斗': [['yī'], ['dǒu']]
}
\ No newline at end of file
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from pypinyin import load_phrases_dict
from text.custom_pypinyin_dict import cc_cedict_0
from text.custom_pypinyin_dict import cc_cedict_1
from text.custom_pypinyin_dict import cc_cedict_2
from text.custom_pypinyin_dict import cc_cedict_3
from text.custom_pypinyin_dict import genshin
phrases_dict = {}
phrases_dict.update(cc_cedict_0.phrases_dict)
phrases_dict.update(cc_cedict_1.phrases_dict)
phrases_dict.update(cc_cedict_2.phrases_dict)
phrases_dict.update(cc_cedict_3.phrases_dict)
phrases_dict.update(genshin.phrases_dict)
def load():
load_phrases_dict(phrases_dict)
print("加载自定义词典成功")
if __name__ == '__main__':
print(phrases_dict)
\ No newline at end of file
""" from https://github.com/keithito/tacotron """
'''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
# Regular expression matching whitespace:
import re
import inflect
from unidecode import unidecode
import eng_to_ipa as ipa
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
# List of (ipa, lazy ipa) pairs:
_lazy_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
('r', 'ɹ'),
('æ', 'e'),
('ɑ', 'a'),
('ɔ', 'o'),
('ð', 'z'),
('θ', 's'),
('ɛ', 'e'),
('ɪ', 'i'),
('ʊ', 'u'),
('ʒ', 'ʥ'),
('ʤ', 'ʥ'),
('ˈ', '↓'),
]]
# List of (ipa, lazy ipa2) pairs:
_lazy_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
('r', 'ɹ'),
('ð', 'z'),
('θ', 's'),
('ʒ', 'ʑ'),
('ʤ', 'dʑ'),
('ˈ', '↓'),
]]
# List of (ipa, ipa2) pairs
_ipa_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
('r', 'ɹ'),
('ʤ', 'dʒ'),
('ʧ', 'tʃ')
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def collapse_whitespace(text):
return re.sub(r'\s+', ' ', text)
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _inflect.number_to_words(num, andword='')
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
return text
def mark_dark_l(text):
return re.sub(r'l([^aeiouæɑɔəɛɪʊ ]*(?: |$))', lambda x: 'ɫ'+x.group(1), text)
def english_to_ipa(text):
text = unidecode(text).lower()
text = expand_abbreviations(text)
text = normalize_numbers(text)
phonemes = ipa.convert(text)
phonemes = collapse_whitespace(phonemes)
return phonemes
def english_to_ipa2(text):
text = english_to_ipa(text)
text = mark_dark_l(text)
for regex, replacement in _ipa_to_ipa2:
text = re.sub(regex, replacement, text)
return list(text.replace('...', '…'))
import re
from unidecode import unidecode
import pyopenjtalk
# Regular expression matching Japanese without punctuation marks:
_japanese_characters = re.compile(
r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
# Regular expression matching non-Japanese characters or punctuation marks:
_japanese_marks = re.compile(
r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
# List of (symbol, Japanese) pairs for marks:
_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [
('%', 'パーセント')
]]
# List of (romaji, ipa) pairs for marks:
_romaji_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
('ts', 'ʦ'),
('u', 'ɯ'),
('j', 'ʥ'),
('y', 'j'),
('ni', 'n^i'),
('nj', 'n^'),
('hi', 'çi'),
('hj', 'ç'),
('f', 'ɸ'),
('I', 'i*'),
('U', 'ɯ*'),
('r', 'ɾ')
]]
# List of (romaji, ipa2) pairs for marks:
_romaji_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
('u', 'ɯ'),
('ʧ', 'tʃ'),
('j', 'dʑ'),
('y', 'j'),
('ni', 'n^i'),
('nj', 'n^'),
('hi', 'çi'),
('hj', 'ç'),
('f', 'ɸ'),
('I', 'i*'),
('U', 'ɯ*'),
('r', 'ɾ')
]]
# List of (consonant, sokuon) pairs:
_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [
(r'Q([↑↓]*[kg])', r'k#\1'),
(r'Q([↑↓]*[tdjʧ])', r't#\1'),
(r'Q([↑↓]*[sʃ])', r's\1'),
(r'Q([↑↓]*[pb])', r'p#\1')
]]
# List of (consonant, hatsuon) pairs:
_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [
(r'N([↑↓]*[pbm])', r'm\1'),
(r'N([↑↓]*[ʧʥj])', r'n^\1'),
(r'N([↑↓]*[tdn])', r'n\1'),
(r'N([↑↓]*[kg])', r'ŋ\1')
]]
def symbols_to_japanese(text):
for regex, replacement in _symbols_to_japanese:
text = re.sub(regex, replacement, text)
return text
def japanese_to_romaji_with_accent(text):
'''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
text = symbols_to_japanese(text)
sentences = re.split(_japanese_marks, text)
marks = re.findall(_japanese_marks, text)
text = ''
for i, sentence in enumerate(sentences):
if re.match(_japanese_characters, sentence):
if text != '':
text += ' '
labels = pyopenjtalk.extract_fullcontext(sentence)
for n, label in enumerate(labels):
phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
if phoneme not in ['sil', 'pau']:
text += phoneme.replace('ch', 'ʧ').replace('sh',
'ʃ').replace('cl', 'Q')
else:
continue
# n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
a2 = int(re.search(r"\+(\d+)\+", label).group(1))
a3 = int(re.search(r"\+(\d+)/", label).group(1))
if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']:
a2_next = -1
else:
a2_next = int(
re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
# Accent phrase boundary
if a3 == 1 and a2_next == 1:
text += ' '
# Falling
elif a1 == 0 and a2_next == a2 + 1:
text += '↓'
# Rising
elif a2 == 1 and a2_next == 2:
text += '↑'
if i < len(marks):
text += unidecode(marks[i]).replace(' ', '')
return text
def get_real_sokuon(text):
for regex, replacement in _real_sokuon:
text = re.sub(regex, replacement, text)
return text
def get_real_hatsuon(text):
for regex, replacement in _real_hatsuon:
text = re.sub(regex, replacement, text)
return text
def japanese_to_ipa(text):
text = japanese_to_romaji_with_accent(text).replace('...', '…')
text = re.sub(
r'([aiueo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text)
text = get_real_sokuon(text)
text = get_real_hatsuon(text)
for regex, replacement in _romaji_to_ipa:
text = re.sub(regex, replacement, text)
return text
def japanese_to_ipa2(text):
text = japanese_to_romaji_with_accent(text).replace('...', '…')
text = get_real_sokuon(text)
text = get_real_hatsuon(text)
for regex, replacement in _romaji_to_ipa2:
text = re.sub(regex, replacement, text)
return list(text)
def japanese_to_ipa3(text):
text = japanese_to_ipa2(text).replace('n^', 'ȵ').replace(
'ʃ', 'ɕ').replace('*', '\u0325').replace('#', '\u031a')
text = re.sub(
r'([aiɯeo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text)
text = re.sub(r'((?:^|\s)(?:ts|tɕ|[kpt]))', r'\1ʰ', text)
return text
if __name__ == '__main__':
a = japanese_to_romaji_with_accent('こんにちは!はい、元気です。あなたは?')
print(a)
from .detector_factory import DetectorFactory, PROFILES_DIRECTORY, detect, detect_langs
from .lang_detect_exception import LangDetectException
import random
import re
import six
from six.moves import zip, xrange
from .lang_detect_exception import ErrorCode, LangDetectException
from .language import Language
from .utils.ngram import NGram
from .utils.unicode_block import unicode_block
class Detector(object):
'''
Detector class is to detect language from specified text.
Its instance is able to be constructed via the factory class DetectorFactory.
After appending a target text to the Detector instance with .append(string),
the detector provides the language detection results for target text via .detect() or .get_probabilities().
.detect() method returns a single language name which has the highest probability.
.get_probabilities() methods returns a list of multiple languages and their probabilities.
The detector has some parameters for language detection.
See set_alpha(double), .set_max_text_length(int) .set_prior_map(dict).
Example:
from langdetect.detector_factory import DetectorFactory
factory = DetectorFactory()
factory.load_profile('/path/to/profile/directory')
def detect(text):
detector = factory.create()
detector.append(text)
return detector.detect()
def detect_langs(text):
detector = factory.create()
detector.append(text)
return detector.get_probabilities()
'''
ALPHA_DEFAULT = 0.5
ALPHA_WIDTH = 0.05
ITERATION_LIMIT = 1000
PROB_THRESHOLD = 0.1
CONV_THRESHOLD = 0.99999
BASE_FREQ = 10000
UNKNOWN_LANG = 'unknown'
URL_RE = re.compile(r'https?://[-_.?&~;+=/#0-9A-Za-z]{1,2076}')
MAIL_RE = re.compile(r'[-_.0-9A-Za-z]{1,64}@[-_0-9A-Za-z]{1,255}[-_.0-9A-Za-z]{1,255}')
def __init__(self, factory):
self.word_lang_prob_map = factory.word_lang_prob_map
self.langlist = factory.langlist
self.seed = factory.seed
self.random = random.Random()
self.text = ''
self.langprob = None
self.alpha = self.ALPHA_DEFAULT
self.n_trial = 7
self.max_text_length = 10000
self.prior_map = None
self.verbose = False
def set_verbose(self):
self.verbose = True
def set_alpha(self, alpha):
self.alpha = alpha
def set_prior_map(self, prior_map):
'''Set prior information about language probabilities.'''
self.prior_map = [0.0] * len(self.langlist)
sump = 0.0
for i in xrange(len(self.prior_map)):
lang = self.langlist[i]
if lang in prior_map:
p = prior_map[lang]
if p < 0:
raise LangDetectException(ErrorCode.InitParamError, 'Prior probability must be non-negative.')
self.prior_map[i] = p
sump += p
if sump <= 0.0:
raise LangDetectException(ErrorCode.InitParamError, 'More one of prior probability must be non-zero.')
for i in xrange(len(self.prior_map)):
self.prior_map[i] /= sump
def set_max_text_length(self, max_text_length):
'''Specify max size of target text to use for language detection.
The default value is 10000(10KB).
'''
self.max_text_length = max_text_length
def append(self, text):
'''Append the target text for language detection.
If the total size of target text exceeds the limit size specified by
Detector.set_max_text_length(int), the rest is cut down.
'''
text = self.URL_RE.sub(' ', text)
text = self.MAIL_RE.sub(' ', text)
text = NGram.normalize_vi(text)
pre = 0
for i in xrange(min(len(text), self.max_text_length)):
ch = text[i]
if ch != ' ' or pre != ' ':
self.text += ch
pre = ch
def cleaning_text(self):
'''Cleaning text to detect
(eliminate URL, e-mail address and Latin sentence if it is not written in Latin alphabet).
'''
latin_count, non_latin_count = 0, 0
for ch in self.text:
if 'A' <= ch <= 'z':
latin_count += 1
elif ch >= six.u('\u0300') and unicode_block(ch) != 'Latin Extended Additional':
non_latin_count += 1
if latin_count * 2 < non_latin_count:
text_without_latin = ''
for ch in self.text:
if ch < 'A' or 'z' < ch:
text_without_latin += ch
self.text = text_without_latin
def detect(self):
'''Detect language of the target text and return the language name
which has the highest probability.
'''
probabilities = self.get_probabilities()
if probabilities:
return probabilities[0].lang
return self.UNKNOWN_LANG
def get_probabilities(self):
if self.langprob is None:
self._detect_block()
return self._sort_probability(self.langprob)
def _detect_block(self):
self.cleaning_text()
ngrams = self._extract_ngrams()
if not ngrams:
raise LangDetectException(ErrorCode.CantDetectError, 'No features in text.')
self.langprob = [0.0] * len(self.langlist)
self.random.seed(self.seed)
for t in xrange(self.n_trial):
prob = self._init_probability()
alpha = self.alpha + self.random.gauss(0.0, 1.0) * self.ALPHA_WIDTH
i = 0
while True:
self._update_lang_prob(prob, self.random.choice(ngrams), alpha)
if i % 5 == 0:
if self._normalize_prob(prob) > self.CONV_THRESHOLD or i >= self.ITERATION_LIMIT:
break
if self.verbose:
six.print_('>', self._sort_probability(prob))
i += 1
for j in xrange(len(self.langprob)):
self.langprob[j] += prob[j] / self.n_trial
if self.verbose:
six.print_('==>', self._sort_probability(prob))
def _init_probability(self):
'''Initialize the map of language probabilities.
If there is the specified prior map, use it as initial map.
'''
if self.prior_map is not None:
return list(self.prior_map)
else:
return [1.0 / len(self.langlist)] * len(self.langlist)
def _extract_ngrams(self):
'''Extract n-grams from target text.'''
RANGE = list(xrange(1, NGram.N_GRAM + 1))
result = []
ngram = NGram()
for ch in self.text:
ngram.add_char(ch)
if ngram.capitalword:
continue
for n in RANGE:
# optimized w = ngram.get(n)
if len(ngram.grams) < n:
break
w = ngram.grams[-n:]
if w and w != ' ' and w in self.word_lang_prob_map:
result.append(w)
return result
def _update_lang_prob(self, prob, word, alpha):
'''Update language probabilities with N-gram string(N=1,2,3).'''
if word is None or word not in self.word_lang_prob_map:
return False
lang_prob_map = self.word_lang_prob_map[word]
if self.verbose:
six.print_('%s(%s): %s' % (word, self._unicode_encode(word), self._word_prob_to_string(lang_prob_map)))
weight = alpha / self.BASE_FREQ
for i in xrange(len(prob)):
prob[i] *= weight + lang_prob_map[i]
return True
def _word_prob_to_string(self, prob):
result = ''
for j in xrange(len(prob)):
p = prob[j]
if p >= 0.00001:
result += ' %s:%.5f' % (self.langlist[j], p)
return result
def _normalize_prob(self, prob):
'''Normalize probabilities and check convergence by the maximun probability.
'''
maxp, sump = 0.0, sum(prob)
for i in xrange(len(prob)):
p = prob[i] / sump
if maxp < p:
maxp = p
prob[i] = p
return maxp
def _sort_probability(self, prob):
result = [Language(lang, p) for (lang, p) in zip(self.langlist, prob) if p > self.PROB_THRESHOLD]
result.sort(reverse=True)
return result
def _unicode_encode(self, word):
buf = ''
for ch in word:
if ch >= six.u('\u0080'):
st = hex(0x10000 + ord(ch))[2:]
while len(st) < 4:
st = '0' + st
buf += r'\u' + st[1:5]
else:
buf += ch
return buf
import os
from os import path
import sys
import json
from .detector import Detector
from .lang_detect_exception import ErrorCode, LangDetectException
from .utils.lang_profile import LangProfile
class DetectorFactory(object):
'''
Language Detector Factory Class.
This class manages an initialization and constructions of Detector.
Before using language detection library,
load profiles with DetectorFactory.load_profile(str)
and set initialization parameters.
When the language detection,
construct Detector instance via DetectorFactory.create().
See also Detector's sample code.
'''
seed = None
def __init__(self):
self.word_lang_prob_map = {}
self.langlist = []
def load_profile(self, profile_directory):
list_files = os.listdir(profile_directory)
if not list_files:
raise LangDetectException(ErrorCode.NeedLoadProfileError, 'Not found profile: ' + profile_directory)
langsize, index = len(list_files), 0
for filename in list_files:
if filename.startswith('.'):
continue
filename = path.join(profile_directory, filename)
if not path.isfile(filename):
continue
f = None
try:
if sys.version_info[0] < 3:
f = open(filename, 'r')
else:
f = open(filename, 'r', encoding='utf-8')
json_data = json.load(f)
profile = LangProfile(**json_data)
self.add_profile(profile, index, langsize)
index += 1
except IOError:
raise LangDetectException(ErrorCode.FileLoadError, 'Cannot open "%s"' % filename)
except:
raise LangDetectException(ErrorCode.FormatError, 'Profile format error in "%s"' % filename)
finally:
if f:
f.close()
def load_json_profile(self, json_profiles):
langsize, index = len(json_profiles), 0
if langsize < 2:
raise LangDetectException(ErrorCode.NeedLoadProfileError, 'Need more than 2 profiles.')
for json_profile in json_profiles:
try:
json_data = json.loads(json_profile)
profile = LangProfile(**json_data)
self.add_profile(profile, index, langsize)
index += 1
except:
raise LangDetectException(ErrorCode.FormatError, 'Profile format error.')
def add_profile(self, profile, index, langsize):
lang = profile.name
if lang in self.langlist:
raise LangDetectException(ErrorCode.DuplicateLangError, 'Duplicate the same language profile.')
self.langlist.append(lang)
for word in profile.freq:
if word not in self.word_lang_prob_map:
self.word_lang_prob_map[word] = [0.0] * langsize
length = len(word)
if 1 <= length <= 3:
prob = 1.0 * profile.freq.get(word) / profile.n_words[length - 1]
self.word_lang_prob_map[word][index] = prob
def clear(self):
self.langlist = []
self.word_lang_prob_map = {}
def create(self, alpha=None):
'''Construct Detector instance with smoothing parameter.'''
detector = self._create_detector()
if alpha is not None:
detector.set_alpha(alpha)
return detector
def _create_detector(self):
if not self.langlist:
raise LangDetectException(ErrorCode.NeedLoadProfileError, 'Need to load profiles.')
return Detector(self)
def set_seed(self, seed):
self.seed = seed
def get_lang_list(self):
return list(self.langlist)
PROFILES_DIRECTORY = path.join(path.dirname(__file__), 'profiles')
_factory = None
def init_factory():
global _factory
if _factory is None:
_factory = DetectorFactory()
_factory.load_profile(PROFILES_DIRECTORY)
def detect(text):
init_factory()
detector = _factory.create()
detector.append(text)
return detector.detect()
def detect_langs(text):
init_factory()
detector = _factory.create()
detector.append(text)
return detector.get_probabilities()
_error_codes = {
'NoTextError': 0,
'FormatError': 1,
'FileLoadError': 2,
'DuplicateLangError': 3,
'NeedLoadProfileError': 4,
'CantDetectError': 5,
'CantOpenTrainData': 6,
'TrainDataFormatError': 7,
'InitParamError': 8,
}
ErrorCode = type('ErrorCode', (), _error_codes)
class LangDetectException(Exception):
def __init__(self, code, message):
super(LangDetectException, self).__init__(message)
self.code = code
def get_code(self):
return self.code
class Language(object):
'''
Language is to store the detected language.
Detector.get_probabilities() returns a list of Languages.
'''
def __init__(self, lang, prob):
self.lang = lang
self.prob = prob
def __repr__(self):
if self.lang is None:
return ''
return '%s:%s' % (self.lang, self.prob)
def __lt__(self, other):
return self.prob < other.prob
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment