Unverified Commit 61cf00e1 authored by Chayenne's avatar Chayenne Committed by GitHub
Browse files

change file tree (#1859)


Co-authored-by: default avatarChayenne <zhaochenyang@g.ucla.edu>
parent b9fd178f
......@@ -38,14 +38,8 @@ jobs:
GITHUB_TOKEN: ${{ secrets.PAT_TOKEN }}
run: |
cd docs
for nb in *.ipynb; do
if [ -f "$nb" ]; then
echo "Executing $nb"
jupyter nbconvert --to notebook --execute --inplace "$nb" \
--ExecutePreprocessor.timeout=600 \
--ExecutePreprocessor.kernel_name=python3
fi
done
make clean
make compile
make html
cd _build/html
......
......@@ -44,11 +44,5 @@ jobs:
- name: Execute notebooks
run: |
cd docs
for nb in *.ipynb; do
if [ -f "$nb" ]; then
echo "Executing $nb"
jupyter nbconvert --to notebook --execute --inplace "$nb" \
--ExecutePreprocessor.timeout=600 \
--ExecutePreprocessor.kernel_name=python3
fi
done
\ No newline at end of file
make clean
make compile
\ No newline at end of file
......@@ -40,13 +40,13 @@ The core features include:
- **Active Community**: SGLang is open-source and backed by an active community with industry adoption.
## Install
See [https://sgl-project.github.io/install.html](https://sgl-project.github.io/install.html)
See [https://sgl-project.github.io/starts/install.html](https://sgl-project.github.io/starts/install.html)
## Backend: SGLang Runtime (SRT)
See [https://sgl-project.github.io/backend.html](https://sgl-project.github.io/backend.html)
See [https://sgl-project.github.io/backend/backend.html](https://sgl-project.github.io/backend/backend.html)
## Frontend: Structured Generation Language (SGLang)
See [https://sgl-project.github.io/frontend.html](https://sgl-project.github.io/frontend.html)
See [https://sgl-project.github.io/frontend/frontend.html](https://sgl-project.github.io/frontend/frontend.html)
## Benchmark And Performance
Learn more in our release blogs: [v0.2 blog](https://lmsys.org/blog/2024-07-25-sglang-llama3/), [v0.3 blog](https://lmsys.org/blog/2024-09-04-sglang-v0-3/)
......
......@@ -12,7 +12,18 @@ BUILDDIR = _build
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# New target to compile Markdown and Jupyter Notebook files
compile:
find $(SOURCEDIR) -name '*.ipynb' | while read nb; do \
if [ -f "$$nb" ]; then \
echo "Executing $$nb"; \
jupyter nbconvert --to notebook --execute --inplace "$$nb" \
--ExecutePreprocessor.timeout=600 \
--ExecutePreprocessor.kernel_name=python3; \
fi; \
done
.PHONY: help Makefile compile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
......
......@@ -30,47 +30,181 @@
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-11-01T02:47:32.337369Z",
"iopub.status.busy": "2024-11-01T02:47:32.337032Z",
"iopub.status.idle": "2024-11-01T02:47:59.540926Z",
"shell.execute_reply": "2024-11-01T02:47:59.539861Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"/home/chenyang/miniconda3/envs/AlphaMeemory/lib/python3.11/site-packages/transformers/utils/hub.py:128: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.\n",
" warnings.warn(\n",
"[2024-10-29 21:07:15] server_args=ServerArgs(model_path='Alibaba-NLP/gte-Qwen2-7B-instruct', tokenizer_path='Alibaba-NLP/gte-Qwen2-7B-instruct', tokenizer_mode='auto', skip_tokenizer_init=False, load_format='auto', trust_remote_code=False, dtype='auto', kv_cache_dtype='auto', quantization=None, context_length=None, device='cuda', served_model_name='Alibaba-NLP/gte-Qwen2-7B-instruct', chat_template=None, is_embedding=True, host='0.0.0.0', port=30010, mem_fraction_static=0.88, max_running_requests=None, max_total_tokens=None, chunked_prefill_size=8192, max_prefill_tokens=16384, schedule_policy='lpm', schedule_conservativeness=1.0, tp_size=1, stream_interval=1, random_seed=568040040, constrained_json_whitespace_pattern=None, log_level='info', log_level_http=None, log_requests=False, show_time_cost=False, api_key=None, file_storage_pth='SGLang_storage', enable_cache_report=False, watchdog_timeout=600, dp_size=1, load_balance_method='round_robin', dist_init_addr=None, nnodes=1, node_rank=0, json_model_override_args='{}', enable_double_sparsity=False, ds_channel_config_path=None, ds_heavy_channel_num=32, ds_heavy_token_num=256, ds_heavy_channel_type='qk', ds_sparse_decode_threshold=4096, lora_paths=None, max_loras_per_batch=8, attention_backend='flashinfer', sampling_backend='flashinfer', grammar_backend='outlines', disable_flashinfer=False, disable_flashinfer_sampling=False, disable_radix_cache=False, disable_regex_jump_forward=False, disable_cuda_graph=False, disable_cuda_graph_padding=False, disable_disk_cache=False, disable_custom_all_reduce=False, disable_mla=False, disable_penalizer=False, disable_nan_detection=False, enable_overlap_schedule=False, enable_mixed_chunk=False, enable_torch_compile=False, torch_compile_max_bs=32, cuda_graph_max_bs=160, torchao_config='', enable_p2p_check=False, triton_attention_reduce_in_fp32=False, num_continuous_decode_steps=1)\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2024-10-31 19:47:37] server_args=ServerArgs(model_path='Alibaba-NLP/gte-Qwen2-7B-instruct', tokenizer_path='Alibaba-NLP/gte-Qwen2-7B-instruct', tokenizer_mode='auto', skip_tokenizer_init=False, load_format='auto', trust_remote_code=False, dtype='auto', kv_cache_dtype='auto', quantization=None, context_length=None, device='cuda', served_model_name='Alibaba-NLP/gte-Qwen2-7B-instruct', chat_template=None, is_embedding=True, host='0.0.0.0', port=30010, mem_fraction_static=0.88, max_running_requests=None, max_total_tokens=None, chunked_prefill_size=8192, max_prefill_tokens=16384, schedule_policy='lpm', schedule_conservativeness=1.0, tp_size=1, stream_interval=1, random_seed=314021918, constrained_json_whitespace_pattern=None, decode_log_interval=40, log_level='info', log_level_http=None, log_requests=False, show_time_cost=False, api_key=None, file_storage_pth='SGLang_storage', enable_cache_report=False, watchdog_timeout=600, dp_size=1, load_balance_method='round_robin', dist_init_addr=None, nnodes=1, node_rank=0, json_model_override_args='{}', enable_double_sparsity=False, ds_channel_config_path=None, ds_heavy_channel_num=32, ds_heavy_token_num=256, ds_heavy_channel_type='qk', ds_sparse_decode_threshold=4096, lora_paths=None, max_loras_per_batch=8, attention_backend='flashinfer', sampling_backend='flashinfer', grammar_backend='outlines', disable_flashinfer=False, disable_flashinfer_sampling=False, disable_radix_cache=False, disable_regex_jump_forward=False, disable_cuda_graph=False, disable_cuda_graph_padding=False, disable_disk_cache=False, disable_custom_all_reduce=False, disable_mla=False, disable_penalizer=False, disable_nan_detection=False, enable_overlap_schedule=False, enable_mixed_chunk=False, enable_torch_compile=False, torch_compile_max_bs=32, cuda_graph_max_bs=160, torchao_config='', enable_p2p_check=False, triton_attention_reduce_in_fp32=False, num_continuous_decode_steps=1)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"/home/chenyang/miniconda3/envs/AlphaMeemory/lib/python3.11/site-packages/transformers/utils/hub.py:128: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.\n",
" warnings.warn(\n",
"/home/chenyang/miniconda3/envs/AlphaMeemory/lib/python3.11/site-packages/transformers/utils/hub.py:128: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.\n",
" warnings.warn(\n",
"[2024-10-29 21:07:20 TP0] Init torch distributed begin.\n",
"[2024-10-29 21:07:20 TP0] Load weight begin. avail mem=47.27 GB\n",
"[2024-10-29 21:07:21 TP0] lm_eval is not installed, GPTQ may not be usable\n",
"INFO 10-29 21:07:22 weight_utils.py:243] Using model weights format ['*.safetensors']\n",
"Loading safetensors checkpoint shards: 0% Completed | 0/7 [00:00<?, ?it/s]\n",
"Loading safetensors checkpoint shards: 14% Completed | 1/7 [00:00<00:03, 1.65it/s]\n",
"Loading safetensors checkpoint shards: 29% Completed | 2/7 [00:01<00:04, 1.02it/s]\n",
"Loading safetensors checkpoint shards: 43% Completed | 3/7 [00:03<00:04, 1.24s/it]\n",
"Loading safetensors checkpoint shards: 57% Completed | 4/7 [00:05<00:04, 1.47s/it]\n",
"Loading safetensors checkpoint shards: 71% Completed | 5/7 [00:07<00:03, 1.62s/it]\n",
"Loading safetensors checkpoint shards: 86% Completed | 6/7 [00:08<00:01, 1.64s/it]\n",
"Loading safetensors checkpoint shards: 100% Completed | 7/7 [00:10<00:00, 1.63s/it]\n",
"Loading safetensors checkpoint shards: 100% Completed | 7/7 [00:10<00:00, 1.49s/it]\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2024-10-31 19:47:43 TP0] Init torch distributed begin.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2024-10-31 19:47:44 TP0] Load weight begin. avail mem=47.27 GB\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2024-10-31 19:47:44 TP0] lm_eval is not installed, GPTQ may not be usable\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"INFO 10-31 19:47:45 weight_utils.py:243] Using model weights format ['*.safetensors']\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\r",
"Loading safetensors checkpoint shards: 0% Completed | 0/7 [00:00<?, ?it/s]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\r",
"Loading safetensors checkpoint shards: 14% Completed | 1/7 [00:00<00:03, 1.96it/s]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\r",
"Loading safetensors checkpoint shards: 29% Completed | 2/7 [00:01<00:03, 1.39it/s]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\r",
"Loading safetensors checkpoint shards: 43% Completed | 3/7 [00:02<00:03, 1.13it/s]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\r",
"Loading safetensors checkpoint shards: 57% Completed | 4/7 [00:03<00:02, 1.00it/s]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\r",
"Loading safetensors checkpoint shards: 71% Completed | 5/7 [00:04<00:02, 1.05s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\r",
"Loading safetensors checkpoint shards: 86% Completed | 6/7 [00:05<00:01, 1.09s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\r",
"Loading safetensors checkpoint shards: 100% Completed | 7/7 [00:07<00:00, 1.11s/it]\n",
"\r",
"Loading safetensors checkpoint shards: 100% Completed | 7/7 [00:07<00:00, 1.01s/it]\n",
"\n",
"[2024-10-29 21:07:32 TP0] Load weight end. type=Qwen2ForCausalLM, dtype=torch.float16, avail mem=32.91 GB\n",
"[2024-10-29 21:07:33 TP0] Memory pool end. avail mem=4.56 GB\n",
"[2024-10-29 21:07:33 TP0] max_total_num_tokens=509971, max_prefill_tokens=16384, max_running_requests=2049, context_len=131072\n",
"[2024-10-29 21:07:33] INFO: Started server process [2650986]\n",
"[2024-10-29 21:07:33] INFO: Waiting for application startup.\n",
"[2024-10-29 21:07:33] INFO: Application startup complete.\n",
"[2024-10-29 21:07:33] INFO: Uvicorn running on http://0.0.0.0:30010 (Press CTRL+C to quit)\n",
"[2024-10-29 21:07:34] INFO: 127.0.0.1:47812 - \"GET /v1/models HTTP/1.1\" 200 OK\n"
"[2024-10-31 19:47:53 TP0] Load weight end. type=Qwen2ForCausalLM, dtype=torch.float16, avail mem=32.91 GB\n",
"[2024-10-31 19:47:53 TP0] Memory pool end. avail mem=4.56 GB\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2024-10-31 19:47:53 TP0] max_total_num_tokens=509971, max_prefill_tokens=16384, max_running_requests=2049, context_len=131072\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2024-10-31 19:47:54] INFO: Started server process [1552642]\n",
"[2024-10-31 19:47:54] INFO: Waiting for application startup.\n",
"[2024-10-31 19:47:54] INFO: Application startup complete.\n",
"[2024-10-31 19:47:54] INFO: Uvicorn running on http://0.0.0.0:30010 (Press CTRL+C to quit)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2024-10-31 19:47:54] INFO: 127.0.0.1:47776 - \"GET /v1/models HTTP/1.1\" 200 OK\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2024-10-31 19:47:55] INFO: 127.0.0.1:50344 - \"GET /get_model_info HTTP/1.1\" 200 OK\n",
"[2024-10-31 19:47:55 TP0] Prefill batch. #new-seq: 1, #new-token: 6, #cached-token: 0, cache hit rate: 0.00%, token usage: 0.00, #running-req: 0, #queue-req: 0\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2024-10-31 19:47:55] INFO: 127.0.0.1:50352 - \"POST /encode HTTP/1.1\" 200 OK\n",
"[2024-10-31 19:47:55] The server is fired up and ready to roll!\n"
]
},
{
"data": {
"text/html": [
"<strong style='color: #00008B;'><br> This cell combines server and notebook output. <br> <br> Typically, the server runs in a separate terminal, <br> but we combine the output of server and notebook to demonstrate the usage better.<br> <br> In our documentation, server output is in gray, notebook output is highlighted.<br> </strong>"
"<strong style='color: #00008B;'><br><br> NOTE: Typically, the server runs in a separate terminal.<br> In this notebook, we run the server and notebook code together, so their outputs are combined.<br> To improve clarity, the server logs are displayed in the original black color, while the notebook outputs are highlighted in blue.<br> </strong>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
......@@ -78,16 +212,6 @@
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2024-10-29 21:07:34] INFO: 127.0.0.1:41780 - \"GET /get_model_info HTTP/1.1\" 200 OK\n",
"[2024-10-29 21:07:34 TP0] Prefill batch. #new-seq: 1, #new-token: 6, #cached-token: 0, cache hit rate: 0.00%, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
"[2024-10-29 21:07:35] INFO: 127.0.0.1:41792 - \"POST /encode HTTP/1.1\" 200 OK\n",
"[2024-10-29 21:07:35] The server is fired up and ready to roll!\n"
]
}
],
"source": [
......@@ -118,20 +242,21 @@
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-11-01T02:47:59.543958Z",
"iopub.status.busy": "2024-11-01T02:47:59.543670Z",
"iopub.status.idle": "2024-11-01T02:47:59.591699Z",
"shell.execute_reply": "2024-11-01T02:47:59.590809Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2024-10-28 02:10:30 TP0] Prefill batch. #new-seq: 1, #new-token: 4, #cached-token: 0, cache hit rate: 0.00%, token usage: 0.00, #running-req: 0, #queue-req: 0\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2024-10-28 02:10:31] INFO: 127.0.0.1:48094 - \"POST /v1/embeddings HTTP/1.1\" 200 OK\n"
"[2024-10-31 19:47:59 TP0] Prefill batch. #new-seq: 1, #new-token: 4, #cached-token: 0, cache hit rate: 0.00%, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
"[2024-10-31 19:47:59] INFO: 127.0.0.1:50358 - \"POST /v1/embeddings HTTP/1.1\" 200 OK\n"
]
},
{
......@@ -174,18 +299,21 @@
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-11-01T02:47:59.594229Z",
"iopub.status.busy": "2024-11-01T02:47:59.594049Z",
"iopub.status.idle": "2024-11-01T02:48:00.006233Z",
"shell.execute_reply": "2024-11-01T02:48:00.005255Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2024-10-28 02:10:31] INFO: 127.0.0.1:48110 - \"GET /get_model_info HTTP/1.1\" 200 OK\n",
"[2024-10-28 02:10:31 TP0] Prefill batch. #new-seq: 1, #new-token: 6, #cached-token: 0, cache hit rate: 0.00%, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
"[2024-10-28 02:10:31] INFO: 127.0.0.1:48114 - \"POST /encode HTTP/1.1\" 200 OK\n",
"[2024-10-28 02:10:31] The server is fired up and ready to roll!\n",
"[2024-10-28 02:10:31 TP0] Prefill batch. #new-seq: 1, #new-token: 1, #cached-token: 3, cache hit rate: 21.43%, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
"[2024-10-28 02:10:31] INFO: 127.0.0.1:48118 - \"POST /v1/embeddings HTTP/1.1\" 200 OK\n"
"[2024-10-31 19:47:59 TP0] Prefill batch. #new-seq: 1, #new-token: 1, #cached-token: 3, cache hit rate: 21.43%, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
"[2024-10-31 19:47:59] INFO: 127.0.0.1:50362 - \"POST /v1/embeddings HTTP/1.1\" 200 OK\n"
]
},
{
......@@ -228,13 +356,20 @@
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"metadata": {
"execution": {
"iopub.execute_input": "2024-11-01T02:48:00.008858Z",
"iopub.status.busy": "2024-11-01T02:48:00.008689Z",
"iopub.status.idle": "2024-11-01T02:48:01.872542Z",
"shell.execute_reply": "2024-11-01T02:48:01.871573Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/chenyang/miniconda3/envs/AlphaMeemory/lib/python3.11/site-packages/transformers/utils/hub.py:127: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.\n",
"/home/chenyang/miniconda3/envs/AlphaMeemory/lib/python3.11/site-packages/transformers/utils/hub.py:128: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.\n",
" warnings.warn(\n"
]
},
......@@ -242,8 +377,8 @@
"name": "stdout",
"output_type": "stream",
"text": [
"[2024-10-28 02:10:32 TP0] Prefill batch. #new-seq: 1, #new-token: 1, #cached-token: 3, cache hit rate: 33.33%, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
"[2024-10-28 02:10:32] INFO: 127.0.0.1:48124 - \"POST /v1/embeddings HTTP/1.1\" 200 OK\n"
"[2024-10-31 19:48:01 TP0] Prefill batch. #new-seq: 1, #new-token: 1, #cached-token: 3, cache hit rate: 33.33%, token usage: 0.00, #running-req: 0, #queue-req: 0\n",
"[2024-10-31 19:48:01] INFO: 127.0.0.1:50366 - \"POST /v1/embeddings HTTP/1.1\" 200 OK\n"
]
},
{
......@@ -284,20 +419,15 @@
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[2024-10-28 02:10:32] INFO: Shutting down\n",
"[2024-10-28 02:10:32] INFO: Waiting for application shutdown.\n",
"[2024-10-28 02:10:32] INFO: Application shutdown complete.\n",
"[2024-10-28 02:10:32] INFO: Finished server process [1188896]\n",
"W1028 02:10:32.490000 140389363193408 torch/_inductor/compile_worker/subproc_pool.py:126] SubprocPool unclean exit\n"
]
"metadata": {
"execution": {
"iopub.execute_input": "2024-11-01T02:48:01.875204Z",
"iopub.status.busy": "2024-11-01T02:48:01.874915Z",
"iopub.status.idle": "2024-11-01T02:48:02.193734Z",
"shell.execute_reply": "2024-11-01T02:48:02.192158Z"
}
],
},
"outputs": [],
"source": [
"terminate_process(embedding_process)"
]
......
# Deploy the documents
# Deploy the documents
import os
from datetime import datetime
def run_cmd(cmd):
print(cmd)
os.system(cmd)
def run_cmd(cmd):
print(cmd)
os.system(cmd)
run_cmd("cd $DOC_SITE_PATH; git pull")
run_cmd("cd $DOC_SITE_PATH; git pull")
# (Optional) Remove old files
# run_cmd("rm -rf $ALPA_SITE_PATH/*")
# (Optional) Remove old files
# run_cmd("rm -rf $ALPA_SITE_PATH/*")
run_cmd("cp -r _build/html/* $DOC_SITE_PATH")
run_cmd("cp -r _build/html/* $DOC_SITE_PATH")
cmd_message = f"Update {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
run_cmd(
f"cd $DOC_SITE_PATH; git add .; git commit -m '{cmd_message}'; git push origin main"
)
cmd_message = f"Update {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
run_cmd(
f"cd $DOC_SITE_PATH; git add .; git commit -m '{cmd_message}'; git push origin main"
)
......@@ -15,35 +15,35 @@ The core features include:
:maxdepth: 1
:caption: Getting Started
install.md
send_request.ipynb
starts/install.md
starts/send_request.ipynb
.. toctree::
:maxdepth: 1
:caption: Backend Tutorial
openai_api.ipynb
backend.md
backend/openai_api.ipynb
backend/backend.md
.. toctree::
:maxdepth: 1
:caption: Frontend Tutorial
frontend.md
frontend/frontend.md
.. toctree::
:maxdepth: 1
:caption: References
sampling_params.md
hyperparameter_tuning.md
model_support.md
contributor_guide.md
choices_methods.md
benchmark_and_profiling.md
troubleshooting.md
embedding_model.ipynb
learn_more.md
references/sampling_params.md
references/hyperparameter_tuning.md
references/model_support.md
references/contributor_guide.md
references/choices_methods.md
references/benchmark_and_profiling.md
references/troubleshooting.md
references/embedding_model.ipynb
references/learn_more.md
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment