Unverified Commit 715b16c1 authored by Chayenne's avatar Chayenne Committed by GitHub
Browse files

Add support for ipynb (#1786)

parent 9ce8e1a9
name: Build Documentation
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
workflow_dispatch:
jobs:
execute-notebooks:
runs-on: 1-gpu-runner
if: github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch'
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.9'
- name: Install dependencies
run: |
pip install --upgrade pip
pip install -e "python[all]"
pip install -r docs/requirements.txt
pip install nbconvert jupyter_client ipykernel ipywidgets matplotlib
pip install transformers==4.45.2
pip install flashinfer -i https://flashinfer.ai/whl/cu121/torch2.4/ --force-reinstall
- name: Setup Jupyter Kernel
run: |
python -m ipykernel install --user --name python3 --display-name "Python 3"
- name: Execute notebooks
env:
HF_HOME: /hf_home
SGLANG_IS_IN_CI: true
CUDA_VISIBLE_DEVICES: 0
run: |
cd docs/en
for nb in *.ipynb; do
if [ -f "$nb" ]; then
echo "Executing $nb"
jupyter nbconvert --to notebook --execute --inplace "$nb" \
--ExecutePreprocessor.timeout=600 \
--ExecutePreprocessor.kernel_name=python3
fi
done
build-and-deploy:
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
runs-on: 1-gpu-runner
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.9'
- name: Cache Python dependencies
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Install dependencies
run: |
pip install --upgrade pip
pip install -e "python[all]"
pip install -r docs/requirements.txt
pip install nbconvert jupyter_client ipykernel ipywidgets matplotlib
pip install transformers==4.45.2
pip install flashinfer -i https://flashinfer.ai/whl/cu121/torch2.4/ --force-reinstall
- name: Install Pandoc
run: |
apt-get update
apt-get install -y pandoc
- name: Build documentation
run: |
cd docs/en
make html
- name: Push to sgl-project.github.io
env:
GITHUB_TOKEN: ${{ secrets.PAT_TOKEN }}
run: |
cd docs/en/_build/html
git clone https://$GITHUB_TOKEN@github.com/sgl-project/sgl-project.github.io.git ../sgl-project.github.io
cp -r * ../sgl-project.github.io
cd ../sgl-project.github.io
git config user.name "zhaochenyang20"
git config user.email "zhaochenyang20@gmail.com"
git add .
git commit -m "$(date +'%Y-%m-%d %H:%M:%S') - Update documentation"
git push https://$GITHUB_TOKEN@github.com/sgl-project/sgl-project.github.io.git main
cd ..
rm -rf sgl-project.github.io
...@@ -186,3 +186,6 @@ work_dirs/ ...@@ -186,3 +186,6 @@ work_dirs/
*.csv *.csv
!logo.png !logo.png
# docs
/docs/en/_build
\ No newline at end of file
...@@ -7,11 +7,11 @@ repos: ...@@ -7,11 +7,11 @@ repos:
hooks: hooks:
- id: isort - id: isort
- repo: https://github.com/psf/black - repo: https://github.com/psf/black
rev: 24.4.2 rev: 24.10.0
hooks: hooks:
- id: black - id: black
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0 rev: v5.0.0
hooks: hooks:
- id: no-commit-to-branch - id: no-commit-to-branch
\ No newline at end of file
...@@ -26,6 +26,9 @@ extensions = [ ...@@ -26,6 +26,9 @@ extensions = [
"myst_parser", "myst_parser",
"sphinx_copybutton", "sphinx_copybutton",
"sphinxcontrib.mermaid", "sphinxcontrib.mermaid",
"nbsphinx",
"sphinx.ext.mathjax",
"sphinx.ext.autodoc",
] ]
autosectionlabel_prefix_document = True autosectionlabel_prefix_document = True
...@@ -123,3 +126,5 @@ intersphinx_mapping = { ...@@ -123,3 +126,5 @@ intersphinx_mapping = {
"numpy": ("https://numpy.org/doc/stable", None), "numpy": ("https://numpy.org/doc/stable", None),
"torch": ("https://pytorch.org/docs/stable", None), "torch": ("https://pytorch.org/docs/stable", None),
} }
html_theme = "sphinx_book_theme"
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Embedding Model"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Launch A Server"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Embedding server is ready. Proceeding with the next steps.\n"
]
}
],
"source": [
"import subprocess\n",
"import time\n",
"import requests\n",
"\n",
"# Equivalent to running this in the shell:\n",
"# python -m sglang.launch_server --model-path Alibaba-NLP/gte-Qwen2-7B-instruct --port 30010 --host 0.0.0.0 --is-embedding --log-level error\n",
"embedding_process = subprocess.Popen(\n",
" [\n",
" \"python\",\n",
" \"-m\",\n",
" \"sglang.launch_server\",\n",
" \"--model-path\",\n",
" \"Alibaba-NLP/gte-Qwen2-7B-instruct\",\n",
" \"--port\",\n",
" \"30010\",\n",
" \"--host\",\n",
" \"0.0.0.0\",\n",
" \"--is-embedding\",\n",
" \"--log-level\",\n",
" \"error\",\n",
" ],\n",
" text=True,\n",
" stdout=subprocess.DEVNULL,\n",
" stderr=subprocess.DEVNULL,\n",
")\n",
"\n",
"while True:\n",
" try:\n",
" response = requests.get(\n",
" \"http://localhost:30010/v1/models\",\n",
" headers={\"Authorization\": \"Bearer None\"},\n",
" )\n",
" if response.status_code == 200:\n",
" break\n",
" except requests.exceptions.RequestException:\n",
" time.sleep(1)\n",
"\n",
"print(\"Embedding server is ready. Proceeding with the next steps.\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Use Curl"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0.0083160400390625, 0.0006804466247558594, -0.00809478759765625, -0.0006995201110839844, 0.0143890380859375, -0.0090179443359375, 0.01238250732421875, 0.00209808349609375, 0.0062103271484375, -0.003047943115234375]\n"
]
}
],
"source": [
"# Get the first 10 elements of the embedding\n",
"\n",
"! curl -s http://localhost:30010/v1/embeddings \\\n",
" -H \"Content-Type: application/json\" \\\n",
" -H \"Authorization: Bearer None\" \\\n",
" -d '{\"model\": \"Alibaba-NLP/gte-Qwen2-7B-instruct\", \"input\": \"Once upon a time\"}' \\\n",
" | python3 -c \"import sys, json; print(json.load(sys.stdin)['data'][0]['embedding'][:10])\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Using OpenAI Compatible API"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0.00603485107421875, -0.0190582275390625, -0.01273345947265625, 0.01552581787109375, 0.0066680908203125, -0.0135955810546875, 0.01131439208984375, 0.0013713836669921875, -0.0089874267578125, 0.021759033203125]\n"
]
}
],
"source": [
"import openai\n",
"\n",
"client = openai.Client(\n",
" base_url=\"http://127.0.0.1:30010/v1\", api_key=\"None\"\n",
")\n",
"\n",
"# Text embedding example\n",
"response = client.embeddings.create(\n",
" model=\"Alibaba-NLP/gte-Qwen2-7B-instruct\",\n",
" input=\"How are you today\",\n",
")\n",
"\n",
"embedding = response.data[0].embedding[:10]\n",
"print(embedding)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "AlphaMeemory",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
...@@ -15,7 +15,17 @@ The core features include: ...@@ -15,7 +15,17 @@ The core features include:
:caption: Getting Started :caption: Getting Started
install.md install.md
send_request.ipynb
.. toctree::
:maxdepth: 1
:caption: Backend Tutorial
backend.md backend.md
.. toctree::
:maxdepth: 1
:caption: Frontend Tutorial
frontend.md frontend.md
.. toctree:: .. toctree::
...@@ -29,3 +39,4 @@ The core features include: ...@@ -29,3 +39,4 @@ The core features include:
choices_methods.md choices_methods.md
benchmark_and_profiling.md benchmark_and_profiling.md
troubleshooting.md troubleshooting.md
embedding_model.ipynb
\ No newline at end of file
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Quick Start"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Launch a server\n",
"\n",
"This code uses `subprocess.Popen` to start an SGLang server process, equivalent to executing \n",
"\n",
"```bash\n",
"python -m sglang.launch_server --model-path meta-llama/Meta-Llama-3.1-8B-Instruct \\\n",
"--port 30000 --host 0.0.0.0 --log-level warning\n",
"```\n",
"in your command line and wait for the server to be ready."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Server is ready. Proceeding with the next steps.\n"
]
}
],
"source": [
"import subprocess\n",
"import time\n",
"import requests\n",
"import os\n",
"\n",
"server_process = subprocess.Popen(\n",
" [\n",
" \"python\",\n",
" \"-m\",\n",
" \"sglang.launch_server\",\n",
" \"--model-path\",\n",
" \"meta-llama/Meta-Llama-3.1-8B-Instruct\",\n",
" \"--port\",\n",
" \"30000\",\n",
" \"--host\",\n",
" \"0.0.0.0\",\n",
" \"--log-level\",\n",
" \"error\",\n",
" ],\n",
" text=True,\n",
" stdout=subprocess.DEVNULL,\n",
" stderr=subprocess.DEVNULL,\n",
")\n",
"\n",
"while True:\n",
" try:\n",
" response = requests.get(\n",
" \"http://localhost:30000/v1/models\",\n",
" headers={\"Authorization\": \"Bearer None\"},\n",
" )\n",
" if response.status_code == 200:\n",
" break\n",
" except requests.exceptions.RequestException:\n",
" time.sleep(1)\n",
"\n",
"print(\"Server is ready. Proceeding with the next steps.\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Send a Request\n",
"\n",
"Once the server is running, you can send test requests using curl."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\"id\":\"1449c9c20d4448299431a57facc68d7a\",\"object\":\"chat.completion\",\"created\":1729816891,\"model\":\"meta-llama/Meta-Llama-3.1-8B-Instruct\",\"choices\":[{\"index\":0,\"message\":{\"role\":\"assistant\",\"content\":\"LLM stands for Large Language Model. It's a type of artificial intelligence (AI) designed to process and generate human-like language. LLMs are trained on vast amounts of text data, which enables them to learn patterns, relationships, and nuances of language.\\n\\nLarge Language Models are typically trained using a technique called deep learning, where multiple layers of artificial neural networks are used to analyze and understand the input data. This training process involves feeding the model massive amounts of text data, which it uses to learn and improve its language understanding and generation capabilities.\\n\\nSome key characteristics of LLMs include:\\n\\n1. **Language understanding**: LLMs can comprehend natural language, including its syntax, semantics, and context.\\n2. **Language generation**: LLMs can generate text, including responses to user input, articles, stories, and more.\\n3. **Contextual understanding**: LLMs can understand the context in which language is being used, including the topic, tone, and intent.\\n4. **Self-supervised learning**: LLMs can learn from large datasets without explicit supervision or labeling.\\n\\nLLMs have a wide range of applications, including:\\n\\n1. **Virtual assistants**: LLMs power virtual assistants like Siri, Alexa, and Google Assistant.\\n2. **Language translation**: LLMs can translate text from one language to another.\\n3. **Text summarization**: LLMs can summarize long pieces of text into shorter, more digestible versions.\\n4. **Content generation**: LLMs can generate content, such as news articles, product descriptions, and social media posts.\\n5. **Chatbots**: LLMs can power chatbots that can have human-like conversations with users.\\n\\nThe Large Language Model I am, is a type of LLM that has been trained on a massive dataset of text and can answer a wide range of questions and engage in conversation.\"},\"logprobs\":null,\"finish_reason\":\"stop\",\"matched_stop\":128009}],\"usage\":{\"prompt_tokens\":47,\"total_tokens\":426,\"completion_tokens\":379,\"prompt_tokens_details\":null}}"
]
}
],
"source": [
"!curl http://localhost:30000/v1/chat/completions \\\n",
" -H \"Content-Type: application/json\" \\\n",
" -H \"Authorization: Bearer None\" \\\n",
" -d '{\"model\": \"meta-llama/Meta-Llama-3.1-8B-Instruct\", \"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful assistant.\"}, {\"role\": \"user\", \"content\": \"What is a LLM?\"}]}'"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Using OpenAI Compatible API\n",
"\n",
"SGLang supports OpenAI-compatible APIs. Here are Python examples:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"ChatCompletion(id='16757c3dd6e14a6e9bafd1122f84e4c5', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='Here are 3 countries and their capitals:\\n\\n1. **Country:** Japan\\n**Capital:** Tokyo\\n\\n2. **Country:** Australia\\n**Capital:** Canberra\\n\\n3. **Country:** Brazil\\n**Capital:** Brasília', refusal=None, role='assistant', function_call=None, tool_calls=None), matched_stop=128009)], created=1729816893, model='meta-llama/Meta-Llama-3.1-8B-Instruct', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=46, prompt_tokens=49, total_tokens=95, prompt_tokens_details=None))\n"
]
}
],
"source": [
"import openai\n",
"\n",
"# Always assign an api_key, even if not specified during server initialization.\n",
"# Setting an API key during server initialization is strongly recommended.\n",
"\n",
"client = openai.Client(\n",
" base_url=\"http://127.0.0.1:30000/v1\", api_key=\"None\"\n",
")\n",
"\n",
"# Chat completion example\n",
"\n",
"response = client.chat.completions.create(\n",
" model=\"meta-llama/Meta-Llama-3.1-8B-Instruct\",\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": \"You are a helpful AI assistant\"},\n",
" {\"role\": \"user\", \"content\": \"List 3 countries and their capitals.\"},\n",
" ],\n",
" temperature=0,\n",
" max_tokens=64,\n",
")\n",
"print(response)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"import signal\n",
"import gc\n",
"import torch\n",
"\n",
"def terminate_process(process):\n",
" try:\n",
" process.terminate()\n",
" try:\n",
" process.wait(timeout=5)\n",
" except subprocess.TimeoutExpired:\n",
" if os.name != 'nt':\n",
" try:\n",
" pgid = os.getpgid(process.pid)\n",
" os.killpg(pgid, signal.SIGTERM)\n",
" time.sleep(1)\n",
" if process.poll() is None:\n",
" os.killpg(pgid, signal.SIGKILL)\n",
" except ProcessLookupError:\n",
" pass\n",
" else:\n",
" process.kill()\n",
" process.wait()\n",
" except Exception as e:\n",
" print(f\"Warning: {e}\")\n",
" finally:\n",
" gc.collect()\n",
" if torch.cuda.is_available():\n",
" torch.cuda.empty_cache()\n",
" torch.cuda.ipc_collect()\n",
"\n",
"terminate_process(server_process)\n",
"time.sleep(2)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "AlphaMeemory",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
...@@ -8,3 +8,5 @@ sphinxcontrib-mermaid ...@@ -8,3 +8,5 @@ sphinxcontrib-mermaid
pillow pillow
pydantic pydantic
urllib3<2.0.0 urllib3<2.0.0
nbsphinx
pandoc
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment