Unverified Commit 641b1ee7 authored by Hongxin Liu's avatar Hongxin Liu Committed by GitHub
Browse files

[devops] remove post commit ci (#5566)

* [devops] remove post commit ci

* [misc] run pre-commit on all files

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci



---------
Co-authored-by: default avatarpre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
parent 341263df
''' """
Class for loading table type data. please refer to Pandas-Input/Output for file format details. Class for loading table type data. please refer to Pandas-Input/Output for file format details.
''' """
import os
import glob import glob
import os
import pandas as pd import pandas as pd
from sqlalchemy import create_engine
from colossalqa.utils import drop_table
from colossalqa.mylogging import get_logger from colossalqa.mylogging import get_logger
from colossalqa.utils import drop_table
from sqlalchemy import create_engine
logger = get_logger() logger = get_logger()
SUPPORTED_DATA_FORMAT = ['.csv','.xlsx', '.xls','.json','.html','.h5', '.hdf5','.parquet','.feather','.dta'] SUPPORTED_DATA_FORMAT = [".csv", ".xlsx", ".xls", ".json", ".html", ".h5", ".hdf5", ".parquet", ".feather", ".dta"]
class TableLoader: class TableLoader:
''' """
Load tables from different files and serve a sql database for database operations Load tables from different files and serve a sql database for database operations
''' """
def __init__(self, files: str,
sql_path:str='sqlite:///mydatabase.db', def __init__(self, files: str, sql_path: str = "sqlite:///mydatabase.db", verbose=False, **kwargs) -> None:
verbose=False, **kwargs) -> None: """
'''
Args: Args:
files: list of files (list[file path, name]) files: list of files (list[file path, name])
sql_path: how to serve the sql database sql_path: how to serve the sql database
**kwargs: keyword type arguments, useful for certain document types **kwargs: keyword type arguments, useful for certain document types
''' """
self.data = {} self.data = {}
self.verbose = verbose self.verbose = verbose
self.sql_path = sql_path self.sql_path = sql_path
...@@ -49,58 +50,58 @@ class TableLoader: ...@@ -49,58 +50,58 @@ class TableLoader:
self.to_sql(path, dataset_name) self.to_sql(path, dataset_name)
def load_data(self, path): def load_data(self, path):
''' """
Load data and serve the data as sql database. Load data and serve the data as sql database.
Data must be in pandas format Data must be in pandas format
''' """
files = [] files = []
# Handle glob expression # Handle glob expression
try: try:
files = glob.glob(path) files = glob.glob(path)
except Exception as e: except Exception as e:
logger.error(e) logger.error(e)
if len(files)==0: if len(files) == 0:
raise ValueError("Unsupported file/directory format. For directories, please use glob expression") raise ValueError("Unsupported file/directory format. For directories, please use glob expression")
elif len(files)==1: elif len(files) == 1:
path = files[0] path = files[0]
else: else:
for file in files: for file in files:
self.load_data(file) self.load_data(file)
if path.endswith('.csv'): if path.endswith(".csv"):
# Load csv # Load csv
self.data[path] = pd.read_csv(path) self.data[path] = pd.read_csv(path)
elif path.endswith('.xlsx') or path.endswith('.xls'): elif path.endswith(".xlsx") or path.endswith(".xls"):
# Load excel # Load excel
self.data[path] = pd.read_excel(path) # You can adjust the sheet_name as needed self.data[path] = pd.read_excel(path) # You can adjust the sheet_name as needed
elif path.endswith('.json'): elif path.endswith(".json"):
# Load json # Load json
self.data[path] = pd.read_json(path) self.data[path] = pd.read_json(path)
elif path.endswith('.html'): elif path.endswith(".html"):
# Load html # Load html
html_tables = pd.read_html(path) html_tables = pd.read_html(path)
# Choose the desired table from the list of DataFrame objects # Choose the desired table from the list of DataFrame objects
self.data[path] = html_tables[0] # You may need to adjust this index self.data[path] = html_tables[0] # You may need to adjust this index
elif path.endswith('.h5') or path.endswith('.hdf5'): elif path.endswith(".h5") or path.endswith(".hdf5"):
# Load h5 # Load h5
self.data[path] = pd.read_hdf(path, key=self.kwargs.get('key', 'data')) # You can adjust the key as needed self.data[path] = pd.read_hdf(path, key=self.kwargs.get("key", "data")) # You can adjust the key as needed
elif path.endswith('.parquet'): elif path.endswith(".parquet"):
# Load parquet # Load parquet
self.data[path] = pd.read_parquet(path, engine='fastparquet') self.data[path] = pd.read_parquet(path, engine="fastparquet")
elif path.endswith('.feather'): elif path.endswith(".feather"):
# Load feather # Load feather
self.data[path] = pd.read_feather(path) self.data[path] = pd.read_feather(path)
elif path.endswith('.dta'): elif path.endswith(".dta"):
# Load dta # Load dta
self.data[path] = pd.read_stata(path) self.data[path] = pd.read_stata(path)
else: else:
raise ValueError("Unsupported file format") raise ValueError("Unsupported file format")
def to_sql(self, path, table_name): def to_sql(self, path, table_name):
''' """
Serve the data as sql database. Serve the data as sql database.
''' """
self.data[path].to_sql(table_name, con=self.sql_engine, if_exists='replace', index=False) self.data[path].to_sql(table_name, con=self.sql_engine, if_exists="replace", index=False)
logger.info(f"Loaded to Sqlite3\nPath: {path}", verbose=self.verbose) logger.info(f"Loaded to Sqlite3\nPath: {path}", verbose=self.verbose)
return self.sql_path return self.sql_path
...@@ -113,7 +114,3 @@ class TableLoader: ...@@ -113,7 +114,3 @@ class TableLoader:
self.sql_engine.dispose() self.sql_engine.dispose()
del self.data del self.data
del self.sql_engine del self.sql_engine
...@@ -21,7 +21,7 @@ print(resp) # super-heavyweight awesome-natured yawning Australian creature! ...@@ -21,7 +21,7 @@ print(resp) # super-heavyweight awesome-natured yawning Australian creature!
""" """
import json import json
from typing import Any, List, Mapping, Optional from typing import Any, Mapping
import requests import requests
from langchain.llms.base import LLM from langchain.llms.base import LLM
...@@ -33,11 +33,11 @@ class ColossalCloudLLM(LLM): ...@@ -33,11 +33,11 @@ class ColossalCloudLLM(LLM):
A custom LLM class that integrates LLMs running on the ColossalCloud Platform A custom LLM class that integrates LLMs running on the ColossalCloud Platform
""" """
n: int n: int
gen_config: dict = None gen_config: dict = None
auth_config: dict = None auth_config: dict = None
valid_gen_para: list = ['max_new_tokens', 'top_k', valid_gen_para: list = ["max_new_tokens", "top_k", "top_p", "temperature", "repetition_penalty"]
'top_p', 'temperature', 'repetition_penalty']
def __init__(self, gen_config=None, **kwargs): def __init__(self, gen_config=None, **kwargs):
""" """
...@@ -63,15 +63,15 @@ class ColossalCloudLLM(LLM): ...@@ -63,15 +63,15 @@ class ColossalCloudLLM(LLM):
@property @property
def _llm_type(self) -> str: def _llm_type(self) -> str:
return 'ColossalCloudLLM' return "ColossalCloudLLM"
def set_auth_config(self, **kwargs): def set_auth_config(self, **kwargs):
url = get_from_dict_or_env(kwargs, "url", "URL") url = get_from_dict_or_env(kwargs, "url", "URL")
host = get_from_dict_or_env(kwargs, "host", "HOST") host = get_from_dict_or_env(kwargs, "host", "HOST")
auth_config = {} auth_config = {}
auth_config['endpoint'] = url auth_config["endpoint"] = url
auth_config['Host'] = host auth_config["Host"] = host
self.auth_config = auth_config self.auth_config = auth_config
def _call(self, prompt: str, stop=None, **kwargs: Any) -> str: def _call(self, prompt: str, stop=None, **kwargs: Any) -> str:
...@@ -86,7 +86,9 @@ class ColossalCloudLLM(LLM): ...@@ -86,7 +86,9 @@ class ColossalCloudLLM(LLM):
# Update the generation arguments # Update the generation arguments
for key, value in kwargs.items(): for key, value in kwargs.items():
if key not in self.valid_gen_para: if key not in self.valid_gen_para:
raise KeyError(f"Invalid generation parameter: '{key}'. Valid keys are: {', '.join(self.valid_gen_para)}") raise KeyError(
f"Invalid generation parameter: '{key}'. Valid keys are: {', '.join(self.valid_gen_para)}"
)
if key in self.gen_config: if key in self.gen_config:
self.gen_config[key] = value self.gen_config[key] = value
...@@ -98,26 +100,16 @@ class ColossalCloudLLM(LLM): ...@@ -98,26 +100,16 @@ class ColossalCloudLLM(LLM):
resp_text = resp_text.split(stopping_words)[0] resp_text = resp_text.split(stopping_words)[0]
return resp_text return resp_text
def text_completion(self, prompt, gen_config, auth_config): def text_completion(self, prompt, gen_config, auth_config):
# Required Parameters # Required Parameters
endpoint = auth_config.pop('endpoint') endpoint = auth_config.pop("endpoint")
max_new_tokens = gen_config.pop('max_new_tokens') max_new_tokens = gen_config.pop("max_new_tokens")
# Optional Parameters # Optional Parameters
optional_params = ['top_k', 'top_p', 'temperature', 'repetition_penalty'] # Self.optional optional_params = ["top_k", "top_p", "temperature", "repetition_penalty"] # Self.optional
gen_config = {key: gen_config[key] for key in optional_params if key in gen_config} gen_config = {key: gen_config[key] for key in optional_params if key in gen_config}
# Define the data payload # Define the data payload
data = { data = {"max_new_tokens": max_new_tokens, "history": [{"instruction": prompt, "response": ""}], **gen_config}
"max_new_tokens": max_new_tokens, headers = {"Content-Type": "application/json", **auth_config} # 'Host',
"history": [
{"instruction": prompt, "response": ""}
],
**gen_config
}
headers = {
"Content-Type": "application/json",
**auth_config # 'Host',
}
# Make the POST request # Make the POST request
response = requests.post(endpoint, headers=headers, data=json.dumps(data)) response = requests.post(endpoint, headers=headers, data=json.dumps(data))
response.raise_for_status() # raise error if return code is not 200(success) response.raise_for_status() # raise error if return code is not 200(success)
......
...@@ -193,4 +193,3 @@ class VllmLLM(LLM): ...@@ -193,4 +193,3 @@ class VllmLLM(LLM):
def _identifying_params(self) -> Mapping[str, int]: def _identifying_params(self) -> Mapping[str, int]:
"""Get the identifying parameters.""" """Get the identifying parameters."""
return {"n": self.n} return {"n": self.n}
...@@ -4,7 +4,6 @@ All custom prompt templates are defined here. ...@@ -4,7 +4,6 @@ All custom prompt templates are defined here.
from langchain.prompts.prompt import PromptTemplate from langchain.prompts.prompt import PromptTemplate
# Below are Chinese retrieval qa prompts # Below are Chinese retrieval qa prompts
_CUSTOM_SUMMARIZER_TEMPLATE_ZH = """请递进式地总结所提供的当前对话,将当前对话的摘要内容添加到先前已有的摘要上,返回一个融合了当前对话的新的摘要。 _CUSTOM_SUMMARIZER_TEMPLATE_ZH = """请递进式地总结所提供的当前对话,将当前对话的摘要内容添加到先前已有的摘要上,返回一个融合了当前对话的新的摘要。
......
...@@ -99,13 +99,7 @@ class CustomRetriever(BaseRetriever): ...@@ -99,13 +99,7 @@ class CustomRetriever(BaseRetriever):
def clear_documents(self): def clear_documents(self):
"""Clear all document vectors from database""" """Clear all document vectors from database"""
for source in self.vector_stores: for source in self.vector_stores:
index( index([], self.record_managers[source], self.vector_stores[source], cleanup="full", source_id_key="source")
[],
self.record_managers[source],
self.vector_stores[source],
cleanup="full",
source_id_key="source"
)
self.vector_stores = {} self.vector_stores = {}
self.sql_index_database = {} self.sql_index_database = {}
self.record_managers = {} self.record_managers = {}
......
import argparse import argparse
from colossalqa.retrieval_conversation_universal import UniversalRetrievalConversation from colossalqa.retrieval_conversation_universal import UniversalRetrievalConversation
if __name__ == '__main__': if __name__ == "__main__":
# Parse arguments # Parse arguments
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--en_model_path', type=str, default=None) parser.add_argument("--en_model_path", type=str, default=None)
parser.add_argument('--zh_model_path', type=str, default=None) parser.add_argument("--zh_model_path", type=str, default=None)
parser.add_argument('--zh_model_name', type=str, default=None) parser.add_argument("--zh_model_name", type=str, default=None)
parser.add_argument('--en_model_name', type=str, default=None) parser.add_argument("--en_model_name", type=str, default=None)
parser.add_argument('--sql_file_path', type=str, default=None, help='path to the a empty folder for storing sql files for indexing') parser.add_argument(
"--sql_file_path", type=str, default=None, help="path to the a empty folder for storing sql files for indexing"
)
args = parser.parse_args() args = parser.parse_args()
# Will ask for documents path in running time # Will ask for documents path in running time
session = UniversalRetrievalConversation(files_en=None, session = UniversalRetrievalConversation(
files_en=None,
files_zh=None, files_zh=None,
zh_model_path=args.zh_model_path, en_model_path=args.en_model_path, zh_model_path=args.zh_model_path,
zh_model_name=args.zh_model_name, en_model_name=args.en_model_name, en_model_path=args.en_model_path,
sql_file_path=args.sql_file_path zh_model_name=args.zh_model_name,
en_model_name=args.en_model_name,
sql_file_path=args.sql_file_path,
) )
session.start_test_session() session.start_test_session()
\ No newline at end of file
...@@ -5,13 +5,7 @@ from colossalqa.chain.retrieval_qa.base import RetrievalQA ...@@ -5,13 +5,7 @@ from colossalqa.chain.retrieval_qa.base import RetrievalQA
from colossalqa.data_loader.document_loader import DocumentLoader from colossalqa.data_loader.document_loader import DocumentLoader
from colossalqa.memory import ConversationBufferWithSummary from colossalqa.memory import ConversationBufferWithSummary
from colossalqa.mylogging import get_logger from colossalqa.mylogging import get_logger
from colossalqa.prompt.prompt import ( from colossalqa.prompt.prompt import ZH_RETRIEVAL_QA_REJECTION_ANSWER, ZH_RETRIEVAL_QA_TRIGGER_KEYWORDS
PROMPT_DISAMBIGUATE_ZH,
PROMPT_RETRIEVAL_QA_ZH,
SUMMARY_PROMPT_ZH,
ZH_RETRIEVAL_QA_REJECTION_ANSWER,
ZH_RETRIEVAL_QA_TRIGGER_KEYWORDS,
)
from colossalqa.retriever import CustomRetriever from colossalqa.retriever import CustomRetriever
from langchain import LLMChain from langchain import LLMChain
from langchain.embeddings import HuggingFaceEmbeddings from langchain.embeddings import HuggingFaceEmbeddings
......
from colossalqa.prompt.prompt import ( from colossalqa.prompt.prompt import PROMPT_DISAMBIGUATE_ZH, PROMPT_RETRIEVAL_QA_ZH, SUMMARY_PROMPT_ZH
PROMPT_DISAMBIGUATE_ZH,
PROMPT_RETRIEVAL_QA_ZH,
SUMMARY_PROMPT_ZH,
ZH_RETRIEVAL_QA_REJECTION_ANSWER,
ZH_RETRIEVAL_QA_TRIGGER_KEYWORDS,
)
from colossalqa.text_splitter import ChineseTextSplitter from colossalqa.text_splitter import ChineseTextSplitter
ALL_CONFIG = { ALL_CONFIG = {
"embed": { "embed": {
"embed_name": "m3e", # embedding model name "embed_name": "m3e", # embedding model name
"embed_model_name_or_path": "moka-ai/m3e-base", # path to embedding model, could be a local path or a huggingface path "embed_model_name_or_path": "moka-ai/m3e-base", # path to embedding model, could be a local path or a huggingface path
"embed_model_device": { "embed_model_device": {"device": "cpu"},
"device": "cpu"
}
}, },
"model": { "model": {
"mode": "api", # "local" for loading models, "api" for using model api "mode": "api", # "local" for loading models, "api" for using model api
"model_name": "chatgpt_api", # local model name, "chatgpt_api" or "pangu_api" "model_name": "chatgpt_api", # local model name, "chatgpt_api" or "pangu_api"
"model_path": "", # path to the model, could be a local path or a huggingface path. don't need if using an api "model_path": "", # path to the model, could be a local path or a huggingface path. don't need if using an api
"device": { "device": {"device": "cuda"},
"device": "cuda"
}
},
"splitter": {
"name": ChineseTextSplitter
},
"retrieval": {
"retri_top_k": 3,
"retri_kb_file_path": "./", # path to store database files
"verbose": True
}, },
"splitter": {"name": ChineseTextSplitter},
"retrieval": {"retri_top_k": 3, "retri_kb_file_path": "./", "verbose": True}, # path to store database files
"chain": { "chain": {
"mem_summary_prompt": SUMMARY_PROMPT_ZH, # summary prompt template "mem_summary_prompt": SUMMARY_PROMPT_ZH, # summary prompt template
"mem_human_prefix": "用户", "mem_human_prefix": "用户",
"mem_ai_prefix": "Assistant", "mem_ai_prefix": "Assistant",
"mem_max_tokens": 2000, "mem_max_tokens": 2000,
"mem_llm_kwargs": { "mem_llm_kwargs": {"max_new_tokens": 50, "temperature": 1, "do_sample": True},
"max_new_tokens": 50,
"temperature": 1,
"do_sample": True
},
"disambig_prompt": PROMPT_DISAMBIGUATE_ZH, # disambiguate prompt template "disambig_prompt": PROMPT_DISAMBIGUATE_ZH, # disambiguate prompt template
"disambig_llm_kwargs": { "disambig_llm_kwargs": {"max_new_tokens": 30, "temperature": 1, "do_sample": True},
"max_new_tokens": 30, "gen_llm_kwargs": {"max_new_tokens": 100, "temperature": 1, "do_sample": True},
"temperature": 1,
"do_sample": True
},
"gen_llm_kwargs": {
"max_new_tokens": 100,
"temperature": 1,
"do_sample": True
},
"gen_qa_prompt": PROMPT_RETRIEVAL_QA_ZH, # generation prompt template "gen_qa_prompt": PROMPT_RETRIEVAL_QA_ZH, # generation prompt template
"verbose": True "verbose": True,
} },
} }
import argparse import argparse
import os
from typing import List, Union from typing import List, Union
import config
import uvicorn
from colossalqa.local.llm import ColossalAPI, ColossalLLM from colossalqa.local.llm import ColossalAPI, ColossalLLM
from colossalqa.data_loader.document_loader import DocumentLoader
from colossalqa.mylogging import get_logger from colossalqa.mylogging import get_logger
from colossalqa.retrieval_conversation_zh import ChineseRetrievalConversation
from colossalqa.retriever import CustomRetriever
from enum import Enum
from fastapi import FastAPI, Request from fastapi import FastAPI, Request
from langchain.embeddings import HuggingFaceEmbeddings from pydantic import BaseModel
from langchain.text_splitter import RecursiveCharacterTextSplitter
from pydantic import BaseModel, Field
import uvicorn
import config
from RAG_ChatBot import RAG_ChatBot from RAG_ChatBot import RAG_ChatBot
from utils import DocAction from utils import DocAction
logger = get_logger() logger = get_logger()
def parseArgs(): def parseArgs():
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--http_host", default="0.0.0.0") parser.add_argument("--http_host", default="0.0.0.0")
...@@ -36,6 +27,7 @@ class DocUpdateReq(BaseModel): ...@@ -36,6 +27,7 @@ class DocUpdateReq(BaseModel):
doc_files: Union[List[str], str, None] = None doc_files: Union[List[str], str, None] = None
action: DocAction = DocAction.ADD action: DocAction = DocAction.ADD
class GenerationTaskReq(BaseModel): class GenerationTaskReq(BaseModel):
user_input: str user_input: str
...@@ -45,7 +37,7 @@ def update_docs(data: DocUpdateReq, request: Request): ...@@ -45,7 +37,7 @@ def update_docs(data: DocUpdateReq, request: Request):
if data.action == "add": if data.action == "add":
if isinstance(data.doc_files, str): if isinstance(data.doc_files, str):
data.doc_files = [data.doc_files] data.doc_files = [data.doc_files]
chatbot.load_doc_from_files(files = data.doc_files) chatbot.load_doc_from_files(files=data.doc_files)
all_docs = "" all_docs = ""
for doc in chatbot.docs_names: for doc in chatbot.docs_names:
all_docs += f"\t{doc}\n\n" all_docs += f"\t{doc}\n\n"
...@@ -84,12 +76,13 @@ if __name__ == "__main__": ...@@ -84,12 +76,13 @@ if __name__ == "__main__":
"user": "User", "user": "User",
"max_tokens": all_config["chain"]["disambig_llm_kwargs"]["max_new_tokens"], "max_tokens": all_config["chain"]["disambig_llm_kwargs"]["max_new_tokens"],
"temperature": all_config["chain"]["disambig_llm_kwargs"]["temperature"], "temperature": all_config["chain"]["disambig_llm_kwargs"]["temperature"],
"n": 1 # the number of responses generated "n": 1, # the number of responses generated
} }
llm = Pangu(gen_config=gen_config) llm = Pangu(gen_config=gen_config)
llm.set_auth_config() # verify user's auth info here llm.set_auth_config() # verify user's auth info here
elif model_name == "chatgpt_api": elif model_name == "chatgpt_api":
from langchain.llms import OpenAI from langchain.llms import OpenAI
llm = OpenAI() llm = OpenAI()
else: else:
raise ValueError("Unsupported mode.") raise ValueError("Unsupported mode.")
......
import argparse import argparse
import json import json
import os import os
import requests
import gradio as gr import gradio as gr
import requests
from utils import DocAction from utils import DocAction
def parseArgs(): def parseArgs():
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--http_host", default="0.0.0.0") parser.add_argument("--http_host", default="0.0.0.0")
parser.add_argument("--http_port", type=int, default=13666) parser.add_argument("--http_port", type=int, default=13666)
return parser.parse_args() return parser.parse_args()
def get_response(data, url): def get_response(data, url):
headers = {"Content-type": "application/json"} headers = {"Content-type": "application/json"}
response = requests.post(url, json=data, headers=headers) response = requests.post(url, json=data, headers=headers)
response = json.loads(response.content) response = json.loads(response.content)
return response return response
def add_text(history, text): def add_text(history, text):
history = history + [(text, None)] history = history + [(text, None)]
return history, gr.update(value=None, interactive=True) return history, gr.update(value=None, interactive=True)
...@@ -28,18 +30,14 @@ def add_file(history, files): ...@@ -28,18 +30,14 @@ def add_file(history, files):
files_string = "\n".join([os.path.basename(file.name) for file in files]) files_string = "\n".join([os.path.basename(file.name) for file in files])
doc_files = [file.name for file in files] doc_files = [file.name for file in files]
data = { data = {"doc_files": doc_files, "action": DocAction.ADD}
"doc_files": doc_files,
"action": DocAction.ADD
}
response = get_response(data, update_url)["response"] response = get_response(data, update_url)["response"]
history = history + [(files_string, response)] history = history + [(files_string, response)]
return history return history
def bot(history): def bot(history):
data = { data = {"user_input": history[-1][0].strip()}
"user_input": history[-1][0].strip()
}
response = get_response(data, gen_url) response = get_response(data, gen_url)
if response["error"] != "": if response["error"] != "":
...@@ -51,11 +49,8 @@ def bot(history): ...@@ -51,11 +49,8 @@ def bot(history):
def restart(chatbot, txt): def restart(chatbot, txt):
# Reset the conversation state and clear the chat history # Reset the conversation state and clear the chat history
data = { data = {"doc_files": "", "action": DocAction.CLEAR}
"doc_files": "", get_response(data, update_url)
"action": DocAction.CLEAR
}
response = get_response(data, update_url)
return gr.update(value=None), gr.update(value=None, interactive=True) return gr.update(value=None), gr.update(value=None, interactive=True)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment