Commit 118f1fc7 authored by maxiao1's avatar maxiao1
Browse files

sglangv0.5.2 & support Qwen3-Next-80B-A3B-Instruct

parents
"""
Usage:
export TOGETHER_API_KEY=sk-******
python3 together_example_chat.py
"""
import os
import sglang as sgl
@sgl.function
def multi_turn_question(s, question_1, question_2):
s += sgl.system("You are a helpful assistant.")
s += sgl.user(question_1)
s += sgl.assistant(sgl.gen("answer_1", max_tokens=256))
s += sgl.user(question_2)
s += sgl.assistant(sgl.gen("answer_2", max_tokens=256))
def single():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
)
for m in state.messages():
print(m["role"], ":", m["content"])
print("\n-- answer_1 --\n", state["answer_1"])
def stream():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
stream=True,
)
for out in state.text_iter():
print(out, end="", flush=True)
print()
def batch():
states = multi_turn_question.run_batch(
[
{
"question_1": "What is the capital of the United States?",
"question_2": "List two local attractions.",
},
{
"question_1": "What is the capital of France?",
"question_2": "What is the population of this city?",
},
]
)
for s in states:
print(s.messages())
if __name__ == "__main__":
backend = sgl.OpenAI(
model_name="mistralai/Mixtral-8x7B-Instruct-v0.1",
base_url="https://api.together.xyz/v1",
api_key=os.environ.get("TOGETHER_API_KEY"),
)
sgl.set_default_backend(backend)
# Run a single request
print("\n========== single ==========\n")
single()
# Stream output
print("\n========== stream ==========\n")
stream()
# Run a batch of requests
print("\n========== batch ==========\n")
batch()
"""
Usage:
export TOGETHER_API_KEY=sk-******
python3 together_example_complete.py
"""
import os
import sglang as sgl
@sgl.function
def few_shot_qa(s, question):
s += """The following are questions with answers.
Q: What is the capital of France?
A: Paris
Q: What is the capital of Germany?
A: Berlin
Q: What is the capital of Italy?
A: Rome
"""
s += "Q: " + question + "\n"
s += "A:" + sgl.gen("answer", stop="\n", temperature=0)
def single():
state = few_shot_qa.run(question="What is the capital of the United States?")
answer = state["answer"].strip().lower()
assert "washington" in answer, f"answer: {state['answer']}"
print(state.text())
def stream():
state = few_shot_qa.run(
question="What is the capital of the United States?", stream=True
)
for out in state.text_iter("answer"):
print(out, end="", flush=True)
print()
def batch():
states = few_shot_qa.run_batch(
[
{"question": "What is the capital of the United States?"},
{"question": "What is the capital of China?"},
]
)
for s in states:
print(s["answer"])
if __name__ == "__main__":
backend = sgl.OpenAI(
model_name="mistralai/Mixtral-8x7B-Instruct-v0.1",
is_chat_model=False,
base_url="https://api.together.xyz/v1",
api_key=os.environ.get("TOGETHER_API_KEY"),
)
sgl.set_default_backend(backend)
# Run a single request
print("\n========== single ==========\n")
single()
# Stream output
print("\n========== stream ==========\n")
stream()
# Run a batch of requests
print("\n========== batch ==========\n")
batch()
import sglang as sgl
character_regex = (
r"""\{\n"""
+ r""" "姓名": "[^"]{1,32}",\n"""
+ r""" "学院": "(格兰芬多|赫奇帕奇|拉文克劳|斯莱特林)",\n"""
+ r""" "血型": "(纯血|混血|麻瓜)",\n"""
+ r""" "职业": "(学生|教师|傲罗|魔法部|食死徒|凤凰社成员)",\n"""
+ r""" "魔杖": \{\n"""
+ r""" "材质": "[^"]{1,32}",\n"""
+ r""" "杖芯": "[^"]{1,32}",\n"""
+ r""" "长度": [0-9]{1,2}\.[0-9]{0,2}\n"""
+ r""" \},\n"""
+ r""" "存活": "(存活|死亡)",\n"""
+ r""" "守护神": "[^"]{1,32}",\n"""
+ r""" "博格特": "[^"]{1,32}"\n"""
+ r"""\}"""
)
@sgl.function
def character_gen(s, name):
s += name + " 是一名哈利波特系列小说中的角色。请填写以下关于这个角色的信息。"
s += """\
这是一个例子
{
"姓名": "哈利波特",
"学院": "格兰芬多",
"血型": "混血",
"职业": "学生",
"魔杖": {
"材质": "冬青木",
"杖芯": "凤凰尾羽",
"长度": 11.0
},
"存活": "存活",
"守护神": "麋鹿",
"博格特": "摄魂怪"
}
"""
s += f"现在请你填写{name}的信息:\n"
s += sgl.gen("json_output", max_tokens=256, regex=character_regex)
def main():
backend = sgl.RuntimeEndpoint("http://localhost:30000")
sgl.set_default_backend(backend)
ret = character_gen.run(name="赫敏格兰杰", temperature=0)
print(ret.text())
if __name__ == "__main__":
main()
"""
Usage:
python -m sglang.launch_server --model-path meta-llama/Llama-2-7b-chat-hf --port 30000
python choices_logprob.py
"""
import sglang as sgl
@sgl.function
def tool_use(s, question):
s += "To answer this question: " + question + ", "
s += "I need to use a " + sgl.gen("tool", choices=["calculator", "search engine"])
def main():
# Run one case
question = "What is 5 + 5?"
state = tool_use.run(question)
print("questions:", question)
print("choice:", state["tool"])
meta_info = state.get_meta_info("tool")
print("logprobs of choice 1", meta_info["input_token_logprobs"][0])
print("logprobs of choice 2", meta_info["input_token_logprobs"][1])
print("-" * 50)
# Run a batch
questions = [
"What is 5 + 6?",
"Who is Michael Jordan?",
]
states = tool_use.run_batch([{"question": q} for q in questions])
for question, state in zip(questions, states):
print("questions:", question)
print("choice:", state["tool"])
meta_info = state.get_meta_info("tool")
print("logprobs of choice 1", meta_info["input_token_logprobs"][0])
print("logprobs of choice 2", meta_info["input_token_logprobs"][1])
print("-" * 50)
if __name__ == "__main__":
sgl.set_default_backend(sgl.RuntimeEndpoint("http://localhost:30000"))
main()
from math import exp
from pprint import pformat
import sglang as sgl
YELLOW = "\033[1;33m"
GREEN = "\033[1;32m"
BLUE = "\033[1;34m"
CLEAR = "\033[1;0m"
@sgl.function
def cot_decoding(s, question, get_top_k, is_chat_model, verbose):
"""CoT Decoding: http://arxiv.org/abs/2402.10200"""
if is_chat_model:
s += sgl.user("Question: " + question + "\nAnswer:")
s += sgl.assistant_begin()
else:
s += "Question: " + question + "\nAnswer:"
step_0 = s.fork(1)[0]
forks = s.fork(get_top_k)
answer_forks = s.fork(get_top_k)
# decoding step 0
step_0 += sgl.gen(
"get_top_k",
max_tokens=0,
return_logprob=True,
top_logprobs_num=get_top_k,
return_text_in_logprobs=True,
)
logprobs = step_0.get_meta_info("get_top_k")["output_top_logprobs"][0]
print("Decoding step 0:", ", ".join(pformat(token[2]) for token in logprobs))
for idx, (f, token) in enumerate(zip(forks, logprobs)):
logprob, token_id, text = token
f += text
if text == "<|end_of_text|>":
print(
f"{YELLOW}Path #{idx} {pformat(text)}[{exp(logprob):.3f}] (score=nan, answer=nan){CLEAR}"
)
continue
# continue greedy decoding
f += sgl.gen(
"answer",
temperature=0,
max_tokens=1024,
return_logprob=True,
top_logprobs_num=2,
return_text_in_logprobs=True,
)
# calculate probability disparity between the top and secondary tokens
x1s = [exp(xt[0][0]) for xt in f.get_meta_info("answer")["output_top_logprobs"]]
x2s = [exp(xt[1][0]) for xt in f.get_meta_info("answer")["output_top_logprobs"]]
tokens = [xt[0][2] for xt in f.get_meta_info("answer")["output_top_logprobs"]]
delta = (sum(x1s) - sum(x2s)) / len(x1s)
# extract the answer span (without the '<|end_of_text|>' token)
answer_forks[idx] += text + f["answer"] + "\nSo the answer is"
answer_forks[idx] += sgl.gen(
"answer_span",
temperature=0,
max_tokens=64,
return_logprob=True,
top_logprobs_num=2,
return_text_in_logprobs=True,
)
answer = answer_forks[idx]["answer_span"].replace("\n", " ").strip(":")
print(
f"{YELLOW}Path #{idx} {pformat(text)}[{exp(logprob):.3f}] (score={delta}, answer={answer}){CLEAR}"
)
generated_text = str(answer_forks[idx])[len("ProgramState(") : -1]
print(f"{BLUE}{pformat(generated_text)}{CLEAR}")
if verbose:
answer_tokens = [
xt[0][2]
for xt in answer_forks[idx].get_meta_info("answer_span")[
"output_top_logprobs"
]
]
answer_x1s = [
exp(xt[0][0])
for xt in answer_forks[idx].get_meta_info("answer_span")[
"output_top_logprobs"
]
]
answer_x2s = [
exp(xt[1][0])
for xt in answer_forks[idx].get_meta_info("answer_span")[
"output_top_logprobs"
]
]
for token, x1, x2 in zip(tokens, x1s, x2s):
print(f" {GREEN}{pformat(token)}{CLEAR}({x1:.3f}-{x2:.3f})", end="")
print("\n===========")
for token, x1, x2 in zip(answer_tokens, answer_x1s, answer_x2s):
print(f" {GREEN}{pformat(token)}{CLEAR}({x1:.3f}-{x2:.3f})", end="")
print()
sgl.set_default_backend(sgl.RuntimeEndpoint("http://localhost:30000"))
state = cot_decoding.run(
question=r"Claire makes a 3 egg omelet every morning for breakfast. How many dozens of eggs will she eat in 4 weeks?",
get_top_k=10,
is_chat_model=True,
verbose=False,
)
"""
Usage:
python -m sglang.launch_server --model-path meta-llama/Llama-2-7b-chat-hf --port 30000
python json_decode.py
"""
from enum import Enum
from pydantic import BaseModel
import sglang as sgl
from sglang.srt.constrained.outlines_backend import build_regex_from_object
character_regex = (
r"""\{\n"""
+ r""" "name": "[\w\d\s]{1,16}",\n"""
+ r""" "house": "(Gryffindor|Slytherin|Ravenclaw|Hufflepuff)",\n"""
+ r""" "blood status": "(Pure-blood|Half-blood|Muggle-born)",\n"""
+ r""" "occupation": "(student|teacher|auror|ministry of magic|death eater|order of the phoenix)",\n"""
+ r""" "wand": \{\n"""
+ r""" "wood": "[\w\d\s]{1,16}",\n"""
+ r""" "core": "[\w\d\s]{1,16}",\n"""
+ r""" "length": [0-9]{1,2}\.[0-9]{0,2}\n"""
+ r""" \},\n"""
+ r""" "alive": "(Alive|Deceased)",\n"""
+ r""" "patronus": "[\w\d\s]{1,16}",\n"""
+ r""" "bogart": "[\w\d\s]{1,16}"\n"""
+ r"""\}"""
)
@sgl.function
def character_gen(s, name):
s += (
name
+ " is a character in Harry Potter. Please fill in the following information about this character.\n"
)
s += "The constrained regex is:\n"
s += character_regex + "\n"
s += "The JSON output is:\n"
s += sgl.gen("json_output", max_tokens=256, regex=character_regex)
def driver_character_gen():
state = character_gen.run(name="Hermione Granger")
print(state.text())
class Weapon(str, Enum):
sword = "sword"
axe = "axe"
mace = "mace"
spear = "spear"
bow = "bow"
crossbow = "crossbow"
class Wizard(BaseModel):
name: str
age: int
weapon: Weapon
@sgl.function
def pydantic_wizard_gen(s):
s += "Give me a description about a wizard in the JSON format.\n"
s += sgl.gen(
"character",
max_tokens=128,
temperature=0,
regex=build_regex_from_object(Wizard), # Requires pydantic >= 2.0
)
def driver_pydantic_wizard_gen():
state = pydantic_wizard_gen.run()
print(state.text())
if __name__ == "__main__":
sgl.set_default_backend(sgl.RuntimeEndpoint("http://localhost:30000"))
driver_character_gen()
# driver_pydantic_wizard_gen()
# NOTE: Currently this can only be run through HTTP requests.
from concurrent.futures import ThreadPoolExecutor
from json_decode import character_regex
from sglang.utils import http_request
character_names = ["Hermione Granger", "Ron Weasley", "Harry Potter"]
base_url = "http://localhost:30000"
prompt = "is a character in Harry Potter. Please fill in the following information about this character.\n"
def openai_api_request(name):
data = {
"model": "",
"prompt": name + prompt,
"temperature": 0,
"max_tokens": 128,
"regex": character_regex,
"logprobs": 3,
}
res = http_request(base_url + "/v1/completions", json=data).json()
# with open(f"json_logprobs_{name.replace(' ', '_')}_tmp.json", "w") as fout:
# fout.write(json.dumps(res, indent=4))
logprobs = res["choices"][0]["logprobs"]
usage = res["usage"]
assert len(logprobs["token_logprobs"]) == len(logprobs["tokens"])
assert len(logprobs["token_logprobs"]) == len(logprobs["top_logprobs"])
assert len(logprobs["token_logprobs"]) == usage["completion_tokens"] - 1
return res
def srt_api_request(name):
data = {
"text": name + prompt,
"sampling_params": {
"temperature": 0,
"max_new_tokens": 128,
"regex": character_regex,
},
"return_logprob": True,
"logprob_start_len": 0,
"top_logprobs_num": 3,
"return_text_in_logprobs": True,
}
res = http_request(base_url + "/generate", json=data).json()
# with open(f"json_logprobs_{name.replace(' ', '_')}_tmp.json", "w") as fout:
# fout.write(json.dumps(res, indent=4))
meta_info = res["meta_info"]
assert len(meta_info["input_token_logprobs"]) == len(
meta_info["input_top_logprobs"]
)
assert len(meta_info["output_token_logprobs"]) == len(
meta_info["output_top_logprobs"]
)
assert len(meta_info["input_token_logprobs"]) == meta_info["prompt_tokens"]
assert len(meta_info["output_token_logprobs"]) == meta_info["completion_tokens"] - 1
return res
def pretty_print(res):
meta_info = res["meta_info"]
print("\n\n", "=" * 30, "Prefill", "=" * 30)
for i in range(len(meta_info["input_token_logprobs"])):
print(f"{str(meta_info['input_token_logprobs'][i][2].encode()): <20}", end="")
top_ks = (
[str(t[2].encode()) for t in meta_info["input_top_logprobs"][i]]
if meta_info["input_top_logprobs"][i]
else []
)
for top_k in top_ks:
print(f"{top_k: <15}", end="")
print()
print("\n\n", "=" * 30, "Decode", "=" * 30)
for i in range(len(meta_info["output_token_logprobs"])):
print(f"{str(meta_info['output_token_logprobs'][i][2].encode()): <20}", end="")
top_ks = [str(t[2].encode()) for t in meta_info["output_top_logprobs"][i]]
for top_k in top_ks:
print(f"{top_k: <15}", end="")
print()
print(res["text"])
if __name__ == "__main__":
with ThreadPoolExecutor() as executor:
ress = executor.map(srt_api_request, character_names)
for res in ress:
pretty_print(res)
openai_api_request("Hermione Granger")
"""
Usage:
pip install opencv-python-headless
python3 srt_example_llava_v.py
"""
import argparse
import csv
import json
import os
import time
import requests
import sglang as sgl
@sgl.function
def video_qa(s, num_frames, video_path, question):
s += sgl.user(sgl.video(video_path, num_frames) + question)
s += sgl.assistant(sgl.gen("answer"))
def single(path, num_frames=16):
state = video_qa.run(
num_frames=num_frames,
video_path=path,
question="Please provide a detailed description of the video, focusing on the main subjects, their actions, the background scenes",
temperature=0.0,
max_new_tokens=1024,
)
print(state["answer"], "\n")
def split_into_chunks(lst, num_chunks):
"""Split a list into a specified number of chunks."""
# Calculate the chunk size using integer division. Note that this may drop some items if not evenly divisible.
chunk_size = len(lst) // num_chunks
if chunk_size == 0:
chunk_size = len(lst)
# Use list comprehension to generate chunks. The last chunk will take any remainder if the list size isn't evenly divisible.
chunks = [lst[i : i + chunk_size] for i in range(0, len(lst), chunk_size)]
# Ensure we have exactly num_chunks chunks, even if some are empty
chunks.extend([[] for _ in range(num_chunks - len(chunks))])
return chunks
def save_batch_results(batch_video_files, states, cur_chunk, batch_idx, save_dir):
csv_filename = f"{save_dir}/chunk_{cur_chunk}_batch_{batch_idx}.csv"
with open(csv_filename, "w", newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["video_name", "answer"])
for video_path, state in zip(batch_video_files, states):
video_name = os.path.basename(video_path)
writer.writerow([video_name, state["answer"]])
def compile_and_cleanup_final_results(cur_chunk, num_batches, save_dir):
final_csv_filename = f"{save_dir}/final_results_chunk_{cur_chunk}.csv"
with open(final_csv_filename, "w", newline="") as final_csvfile:
writer = csv.writer(final_csvfile)
writer.writerow(["video_name", "answer"])
for batch_idx in range(num_batches):
batch_csv_filename = f"{save_dir}/chunk_{cur_chunk}_batch_{batch_idx}.csv"
with open(batch_csv_filename, "r") as batch_csvfile:
reader = csv.reader(batch_csvfile)
next(reader) # Skip header row
for row in reader:
writer.writerow(row)
os.remove(batch_csv_filename)
def find_video_files(video_dir):
# Check if the video_dir is actually a file
if os.path.isfile(video_dir):
# If it's a file, return it as a single-element list
return [video_dir]
# Original logic to find video files in a directory
video_files = []
for root, dirs, files in os.walk(video_dir):
for file in files:
if file.endswith((".mp4", ".avi", ".mov")):
video_files.append(os.path.join(root, file))
return video_files
def batch(video_dir, save_dir, cur_chunk, num_chunks, num_frames=16, batch_size=64):
video_files = find_video_files(video_dir)
chunked_video_files = split_into_chunks(video_files, num_chunks)[cur_chunk]
num_batches = 0
for i in range(0, len(chunked_video_files), batch_size):
batch_video_files = chunked_video_files[i : i + batch_size]
print(f"Processing batch of {len(batch_video_files)} video(s)...")
if not batch_video_files:
print("No video files found in the specified directory.")
return
batch_input = [
{
"num_frames": num_frames,
"video_path": video_path,
"question": "Please provide a detailed description of the video, focusing on the main subjects, their actions, the background scenes.",
}
for video_path in batch_video_files
]
start_time = time.perf_counter()
states = video_qa.run_batch(batch_input, max_new_tokens=512, temperature=0.2)
total_time = time.perf_counter() - start_time
average_time = total_time / len(batch_video_files)
print(
f"Number of videos in batch: {len(batch_video_files)}. Average processing time per video: {average_time:.2f} seconds. Total time for this batch: {total_time:.2f} seconds"
)
save_batch_results(batch_video_files, states, cur_chunk, num_batches, save_dir)
num_batches += 1
compile_and_cleanup_final_results(cur_chunk, num_batches, save_dir)
if __name__ == "__main__":
url = "https://raw.githubusercontent.com/EvolvingLMMs-Lab/sglang/dev/onevision_local/assets/jobs.mp4"
cache_dir = os.path.expanduser("~/.cache")
file_path = os.path.join(cache_dir, "jobs.mp4")
os.makedirs(cache_dir, exist_ok=True)
response = requests.get(url)
response.raise_for_status() # Raise an exception for bad responses
with open(file_path, "wb") as f:
f.write(response.content)
print(f"File downloaded and saved to: {file_path}")
# Create the parser
parser = argparse.ArgumentParser(
description="Run video processing with specified port."
)
# Add an argument for the port
parser.add_argument(
"--port",
type=int,
default=30000,
help="The master port for distributed serving.",
)
parser.add_argument(
"--chunk-idx", type=int, default=0, help="The index of the chunk to process."
)
parser.add_argument(
"--num-chunks", type=int, default=8, help="The number of chunks to process."
)
parser.add_argument(
"--save-dir",
type=str,
default="./work_dirs/llava_video",
help="The directory to save the processed video files.",
)
parser.add_argument(
"--video-dir",
type=str,
default=os.path.expanduser("~/.cache/jobs.mp4"),
help="The directory or path for the processed video files.",
)
parser.add_argument(
"--model-path",
type=str,
default="lmms-lab/LLaVA-NeXT-Video-7B",
help="The model path for the video processing.",
)
parser.add_argument(
"--num-frames",
type=int,
default=16,
help="The number of frames to process in each video.",
)
parser.add_argument("--mm_spatial_pool_stride", type=int, default=2)
# Parse the arguments
args = parser.parse_args()
cur_port = args.port
cur_chunk = args.chunk_idx
num_chunks = args.num_chunks
num_frames = args.num_frames
if "34b" in args.model_path.lower():
tokenizer_path = "liuhaotian/llava-v1.6-34b-tokenizer"
elif "7b" in args.model_path.lower():
tokenizer_path = "llava-hf/llava-1.5-7b-hf"
else:
print("Invalid model path. Please specify a valid model path.")
exit()
model_override_args = {}
model_override_args["mm_spatial_pool_stride"] = args.mm_spatial_pool_stride
model_override_args["architectures"] = ["LlavaVidForCausalLM"]
model_override_args["num_frames"] = args.num_frames
model_override_args["model_type"] = "llava"
if "34b" in args.model_path.lower():
model_override_args["image_token_index"] = 64002
if args.num_frames == 32:
model_override_args["rope_scaling"] = {"factor": 2.0, "rope_type": "linear"}
model_override_args["max_sequence_length"] = 4096 * 2
model_override_args["tokenizer_model_max_length"] = 4096 * 2
elif args.num_frames < 32:
pass
else:
print(
"The maximum number of frames to process is 32. Please specify a valid number of frames."
)
exit()
runtime = sgl.Runtime(
model_path=args.model_path, # "liuhaotian/llava-v1.6-vicuna-7b",
tokenizer_path=tokenizer_path,
port=cur_port,
json_model_override_args=json.dumps(model_override_args),
tp_size=1,
)
sgl.set_default_backend(runtime)
print(f"chat template: {runtime.endpoint.chat_template.name}")
# Run a single request
print("\n========== single ==========\n")
root = args.video_dir
if os.path.isfile(root):
video_files = [root]
else:
video_files = [
os.path.join(root, f)
for f in os.listdir(root)
if f.endswith((".mp4", ".avi", ".mov"))
] # Add more extensions if needed
start_time = time.perf_counter() # Start time for processing a single video
for cur_video in video_files[:1]:
print(cur_video)
single(cur_video, num_frames)
end_time = time.perf_counter() # End time for processing a single video
total_time = end_time - start_time
average_time = total_time / len(
video_files
) # Calculate the average processing time
print(f"Average processing time per video: {average_time:.2f} seconds")
runtime.shutdown()
# # Run a batch of requests
# print("\n========== batch ==========\n")
# if not os.path.exists(args.save_dir):
# os.makedirs(args.save_dir)
# batch(args.video_dir, args.save_dir, cur_chunk, num_chunks, num_frames, num_chunks)
# runtime.shutdown()
#!/bin/bash
##### USAGE #####
# - First node:
# ```sh
# bash examples/usage/llava_video/srt_example_llava_v.sh K 0 YOUR_VIDEO_PATH YOUR_MODEL_PATH FRAMES_PER_VIDEO
# ```
# - Second node:
# ```sh
# bash examples/usage/llava_video/srt_example_llava_v.sh K 1 YOUR_VIDEO_PATH YOUR_MODEL_PATH FRAMES_PER_VIDEO
# ```
# - The K node:
# ```sh
# bash examples/usage/llava_video/srt_example_llava_v.sh K K-1 YOUR_VIDEO_PATH YOUR_MODEL_PATH FRAMES_PER_VIDEO
# ```
# Replace `K`, `YOUR_VIDEO_PATH`, `YOUR_MODEL_PATH`, and `FRAMES_PER_VIDEO` with your specific details.
# CURRENT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
CURRENT_ROOT=$(dirname "$0")
echo ${CURRENT_ROOT}
cd ${CURRENT_ROOT}
export PYTHONWARNINGS=ignore
START_TIME=$(date +%s) # Capture start time
NUM_NODES=$1
CUR_NODES_IDX=$2
VIDEO_DIR=$3
MODEL_PATH=$4
NUM_FRAMES=$5
# FRAME_FORMAT=$6
# FRAME_FORMAT=$(echo $FRAME_FORMAT | tr '[:lower:]' '[:upper:]')
# # Check if FRAME_FORMAT is either JPEG or PNG
# if [[ "$FRAME_FORMAT" != "JPEG" && "$FRAME_FORMAT" != "PNG" ]]; then
# echo "Error: FRAME_FORMAT must be either JPEG or PNG."
# exit 1
# fi
# export TARGET_FRAMES=$TARGET_FRAMES
echo "Each video you will sample $NUM_FRAMES frames"
# export FRAME_FORMAT=$FRAME_FORMAT
# echo "The frame format is $FRAME_FORMAT"
# Assuming GPULIST is a bash array containing your GPUs
GPULIST=(0 1 2 3 4 5 6 7)
LOCAL_CHUNKS=${#GPULIST[@]}
echo "Number of GPUs in GPULIST: $LOCAL_CHUNKS"
ALL_CHUNKS=$((NUM_NODES * LOCAL_CHUNKS))
# Calculate GPUs per chunk
GPUS_PER_CHUNK=1
echo $GPUS_PER_CHUNK
for IDX in $(seq 1 $LOCAL_CHUNKS); do
(
START=$(((IDX-1) * GPUS_PER_CHUNK))
LENGTH=$GPUS_PER_CHUNK # Length for slicing, not the end index
CHUNK_GPUS=(${GPULIST[@]:$START:$LENGTH})
# Convert the chunk GPUs array to a comma-separated string
CHUNK_GPUS_STR=$(IFS=,; echo "${CHUNK_GPUS[*]}")
LOCAL_IDX=$((CUR_NODES_IDX * LOCAL_CHUNKS + IDX))
echo "Chunk $(($LOCAL_IDX - 1)) will run on GPUs $CHUNK_GPUS_STR"
# Calculate the port for this chunk. Ensure it's incremented by 5 for each chunk.
PORT=$((10000 + RANDOM % 55536))
MAX_RETRIES=10
RETRY_COUNT=0
COMMAND_STATUS=1 # Initialize as failed
while [ $RETRY_COUNT -lt $MAX_RETRIES ] && [ $COMMAND_STATUS -ne 0 ]; do
echo "Running chunk $(($LOCAL_IDX - 1)) on GPUs $CHUNK_GPUS_STR with port $PORT. Attempt $(($RETRY_COUNT + 1))"
#!/bin/bash
CUDA_VISIBLE_DEVICES=$CHUNK_GPUS_STR python3 srt_example_llava_v.py \
--port $PORT \
--num-chunks $ALL_CHUNKS \
--chunk-idx $(($LOCAL_IDX - 1)) \
--save-dir work_dirs/llava_next_video_inference_results \
--video-dir $VIDEO_DIR \
--model-path $MODEL_PATH \
--num-frames $NUM_FRAMES #&
wait $! # Wait for the process to finish and capture its exit status
COMMAND_STATUS=$?
if [ $COMMAND_STATUS -ne 0 ]; then
echo "Execution failed for chunk $(($LOCAL_IDX - 1)), attempt $(($RETRY_COUNT + 1)). Retrying..."
RETRY_COUNT=$(($RETRY_COUNT + 1))
sleep 180 # Wait a bit before retrying
else
echo "Execution succeeded for chunk $(($LOCAL_IDX - 1))."
fi
done
if [ $COMMAND_STATUS -ne 0 ]; then
echo "Execution failed for chunk $(($LOCAL_IDX - 1)) after $MAX_RETRIES attempts."
fi
) #&
sleep 2 # Slight delay to stagger the start times
done
wait
cat work_dirs/llava_next_video_inference_results/final_results_chunk_*.csv > work_dirs/llava_next_video_inference_results/final_results_node_${CUR_NODES_IDX}.csv
END_TIME=$(date +%s) # Capture end time
ELAPSED_TIME=$(($END_TIME - $START_TIME))
echo "Total execution time: $ELAPSED_TIME seconds."
"""
Usage:
***Note: for speculative execution to work, user must put all "gen" in "assistant".
Show in "assistant" the desired answer format. Each "gen" term should have a stop token.
The stream mode is not supported in speculative execution.
E.g.
correct:
sgl.assistant("\nName:" + sgl.gen("name", stop="\n") + "\nBirthday:" + sgl.gen("birthday", stop="\n") + "\nJob:" + sgl.gen("job", stop="\n"))
incorrect:
s += sgl.assistant("\nName:" + sgl.gen("name", stop="\n"))
s += sgl.assistant("\nBirthday:" + sgl.gen("birthday", stop="\n"))
s += sgl.assistant("\nJob:" + sgl.gen("job", stop="\n"))
export OPENAI_API_KEY=sk-******
python3 openai_chat_speculative.py
"""
import sglang as sgl
from sglang import OpenAI, function, set_default_backend
@function(num_api_spec_tokens=256)
def gen_character_spec(s):
s += sgl.system("You are a helpful assistant.")
s += sgl.user("Construct a character within the following format:")
s += sgl.assistant(
"Name: Steve Jobs.\nBirthday: February 24, 1955.\nJob: Apple CEO.\n"
)
s += sgl.user("Please generate new Name, Birthday and Job.\n")
s += sgl.assistant(
"Name:"
+ sgl.gen("name", stop="\n")
+ "\nBirthday:"
+ sgl.gen("birthday", stop="\n")
+ "\nJob:"
+ sgl.gen("job", stop="\n")
)
@function(num_api_spec_tokens=256)
def gen_character_spec_no_few_shot(s):
s += sgl.user("Construct a character. For each field stop with a newline\n")
s += sgl.assistant(
"Name:"
+ sgl.gen("name", stop="\n")
+ "\nAge:"
+ sgl.gen("age", stop="\n")
+ "\nJob:"
+ sgl.gen("job", stop="\n")
)
@function
def gen_character_normal(s):
s += sgl.system("You are a helpful assistant.")
s += sgl.user("What's the answer of 23 + 8?")
s += sgl.assistant(sgl.gen("answer", max_tokens=64))
@function(num_api_spec_tokens=1024)
def multi_turn_question(s, question_1, question_2):
s += sgl.system("You are a helpful assistant.")
s += sgl.user("Answer questions in the following format:")
s += sgl.user(
"Question 1: What is the capital of France?\nQuestion 2: What is the population of this city?\n"
)
s += sgl.assistant(
"Answer 1: The capital of France is Paris.\nAnswer 2: The population of Paris in 2024 is estimated to be around 2.1 million for the city proper.\n"
)
s += sgl.user("Question 1: " + question_1 + "\nQuestion 2: " + question_2)
s += sgl.assistant(
"Answer 1: "
+ sgl.gen("answer_1", stop="\n")
+ "\nAnswer 2: "
+ sgl.gen("answer_2", stop="\n")
)
def test_spec_single_turn():
backend.token_usage.reset()
state = gen_character_spec.run()
for m in state.messages():
print(m["role"], ":", m["content"])
print("\n-- name:", state["name"])
print("-- birthday:", state["birthday"])
print("-- job:", state["job"])
print(backend.token_usage)
def test_inaccurate_spec_single_turn():
state = gen_character_spec_no_few_shot.run()
for m in state.messages():
print(m["role"], ":", m["content"])
print("\n-- name:", state["name"])
print("\n-- age:", state["age"])
print("\n-- job:", state["job"])
def test_normal_single_turn():
state = gen_character_normal.run()
for m in state.messages():
print(m["role"], ":", m["content"])
def test_spec_multi_turn():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions in the capital of the United States.",
)
for m in state.messages():
print(m["role"], ":", m["content"])
print("\n-- answer_1 --\n", state["answer_1"])
print("\n-- answer_2 --\n", state["answer_2"])
def test_spec_multi_turn_stream():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
stream=True,
)
for out in state.text_iter():
print(out, end="", flush=True)
if __name__ == "__main__":
backend = OpenAI("gpt-4-turbo")
set_default_backend(backend)
print("\n========== test spec single turn ==========\n")
# expect reasonable answer for each field
test_spec_single_turn()
print("\n========== test inaccurate spec single turn ==========\n")
# expect incomplete or unreasonable answers
test_inaccurate_spec_single_turn()
print("\n========== test normal single turn ==========\n")
# expect reasonable answer
test_normal_single_turn()
print("\n========== test spec multi turn ==========\n")
# expect answer with same format as in the few shot
test_spec_multi_turn()
print("\n========== test spec multi turn stream ==========\n")
# expect error in stream_executor: stream is not supported...
test_spec_multi_turn_stream()
"""
Usage:
python3 openai_speculative.py
"""
from sglang import OpenAI, function, gen, set_default_backend
@function(num_api_spec_tokens=64)
def gen_character_spec(s):
s += "Construct a character within the following format:\n"
s += "Name: Steve Jobs.\nBirthday: February 24, 1955.\nJob: Apple CEO.\n"
s += "\nPlease generate new Name, Birthday and Job.\n"
s += "Name:" + gen("name", stop="\n") + "\nBirthday:" + gen("birthday", stop="\n")
s += "\nJob:" + gen("job", stop="\n") + "\n"
@function
def gen_character_no_spec(s):
s += "Construct a character within the following format:\n"
s += "Name: Steve Jobs.\nBirthday: February 24, 1955.\nJob: Apple CEO.\n"
s += "\nPlease generate new Name, Birthday and Job.\n"
s += "Name:" + gen("name", stop="\n") + "\nBirthday:" + gen("birthday", stop="\n")
s += "\nJob:" + gen("job", stop="\n") + "\n"
@function(num_api_spec_tokens=64)
def gen_character_spec_no_few_shot(s):
# s += "Construct a character with name, birthday, and job:\n"
s += "Construct a character:\n"
s += "Name:" + gen("name", stop="\n") + "\nBirthday:" + gen("birthday", stop="\n")
s += "\nJob:" + gen("job", stop="\n") + "\n"
if __name__ == "__main__":
backend = OpenAI("gpt-3.5-turbo-instruct")
set_default_backend(backend)
for function in [
gen_character_spec,
gen_character_no_spec,
gen_character_spec_no_few_shot,
]:
backend.token_usage.reset()
print(f"function: {function.func.__name__}")
state = function.run()
print("...name:", state["name"])
print("...birthday:", state["birthday"])
print("...job:", state["job"])
print(backend.token_usage)
print()
"""
Usage:
python3 parallel_sample.py
"""
import sglang as sgl
@sgl.function
def parallel_sample(s, question, n):
s += (
"Question: Compute 1 + 2 + 3\n"
"Reasoning: I need to use a calculator.\n"
"Tool: calculator\n"
"Answer: 6\n"
"Question: Compute 3 + 2 + 2\n"
"Reasoning: I will try a calculator.\n"
"Tool: calculator\n"
"Answer: 7\n"
)
s += "Question: " + question + "\n"
forks = s.fork(n)
forks += "Reasoning:" + sgl.gen("reasoning", stop="\n") + "\n"
forks += "Tool:" + sgl.gen("tool", choices=["calculator", "browser"]) + "\n"
forks += "Answer:" + sgl.gen("answer", stop="\n") + "\n"
forks.join()
sgl.set_default_backend(sgl.OpenAI("gpt-3.5-turbo-instruct"))
# sgl.set_default_backend(sgl.RuntimeEndpoint("http://localhost:30000"))
state = parallel_sample.run(question="Compute 5 + 2 + 4.", n=5, temperature=1.0)
for i in range(5):
obj = {
"reasoning": state["reasoning"][i],
"tool": state["tool"][i],
"answer": state["answer"][i],
}
print(f"[{i}], {obj}")
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# RAG Powered by SGLang & Chroma Evaluated using Parea\n",
"\n",
"In this notebook, we will build a simple RAG pipeline using SGLang to execute our LLM calls, Chroma as vector database for retrieval and [Parea](https://www.parea.ai) for tracing and evaluation. We will then evaluate the performance of our RAG pipeline. The dataset we will use was created by [Virat](https://twitter.com/virattt) and contains 100 questions, contexts and answers from the Airbnb 2023 10k filing.\n",
"\n",
"The RAG pipeline consists of two steps:\n",
"1. Retrieval: Given a question, we retrieve the relevant context from all provided contexts.\n",
"2. Generation: Given the question and the retrieved context, we generate an answer.\n",
"\n",
"ℹ️ This notebook requires an OpenAI API key.\n",
"\n",
"ℹ️ This notebook requires a Parea API key, which can be created [here](https://docs.parea.ai/api-reference/authentication#parea-api-key)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setting up the environment\n",
"\n",
"We will first install the necessary packages: `sglang`, `parea-ai` and `chromadb`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# note, if you use a Mac M1 chip, you might need to install grpcio 1.59.0 first such that installing chromadb works\n",
"# !pip install grpcio==1.59.0\n",
"\n",
"!pip install sglang[openai] parea-ai chromadb"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Create a Parea API key as outlined [here](https://docs.parea.ai/api-reference/authentication#parea-api-key) and save it in a `.env` file as `PAREA_API_KEY=your-api-key`."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Indexing the data\n",
"\n",
"Now it's time to download the data & index it! For that, we create a collection called `contexts` in Chroma and add the contexts as documents."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import os\n",
"from typing import List\n",
"\n",
"import chromadb\n",
"\n",
"path_qca = \"airbnb-2023-10k-qca.json\"\n",
"\n",
"if not os.path.exists(path_qca):\n",
" !wget https://virattt.github.io/datasets/abnb-2023-10k.json -O airbnb-2023-10k-qca.json\n",
"\n",
"with open(path_qca, \"r\") as f:\n",
" question_context_answers = json.load(f)\n",
"\n",
"chroma_client = chromadb.PersistentClient()\n",
"collection = chroma_client.get_or_create_collection(name=\"contexts\")\n",
"if collection.count() == 0:\n",
" collection.add(\n",
" documents=[qca[\"context\"] for qca in question_context_answers],\n",
" ids=[str(i) for i in range(len(question_context_answers))],\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Defining the RAG pipeline\n",
"\n",
"We will start with importing the necessary packages, setting up tracing of OpenAI calls via Parea and setting OpenAI as the default backend for SGLang."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import time\n",
"\n",
"from dotenv import load_dotenv\n",
"\n",
"from sglang import function, user, assistant, gen, set_default_backend, OpenAI\n",
"from sglang.lang.interpreter import ProgramState\n",
"from parea import Parea, trace\n",
"\n",
"\n",
"load_dotenv()\n",
"\n",
"os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n",
"\n",
"p = Parea(api_key=os.getenv(\"PAREA_API_KEY\"), project_name=\"rag_sglang\")\n",
"p.integrate_with_sglang()\n",
"\n",
"set_default_backend(OpenAI(\"gpt-3.5-turbo\"))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now we can define our retrieval step shown below. Notice, the `trace` decorator which will automatically trace inputs, output, latency, etc. of that call."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"@trace\n",
"def retrieval(question: str) -> List[str]:\n",
" return collection.query(query_texts=[question], n_results=1)[\"documents\"][0]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Next we will define the generation step which uses SGLang to execute the LLM call."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"@function\n",
"def generation_sglang(s, question: str, *context: str):\n",
" context = \"\\n\".join(context)\n",
" s += user(\n",
" f\"Given this question:\\n{question}\\n\\nAnd this context:\\n{context}\\n\\nAnswer the question.\"\n",
" )\n",
" s += assistant(gen(\"answer\"))\n",
"\n",
"\n",
"@trace\n",
"def generation(question: str, *context):\n",
" state: ProgramState = generation_sglang.run(question, *context)\n",
" while not state.stream_executor.is_finished:\n",
" time.sleep(1)\n",
" return state.stream_executor.variables[\"answer\"]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, we can tie it together and execute a sample query."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"@trace\n",
"def rag_pipeline(question: str) -> str:\n",
" contexts = retrieval(question)\n",
" return generation(question, *contexts)\n",
"\n",
"\n",
"rag_pipeline(\n",
" \"When did the World Health Organization formally declare an end to the COVID-19 global health emergency?\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Debug Trace\n",
"\n",
"The output is unfortunately wrong! Using the traced pipeline, we can see that\n",
"\n",
"- the context is relevant to the question and contains the correct information\n",
"- but, the generation step is cut off as max tokens is set to 16\n",
"\n",
"When opening the generation step in the playground and rerunning the prompt with max. tokens set to 1000, the correct answer is produced.\n",
"\n",
"![RAG Trace](https://drive.google.com/uc?id=1QI243ogGjzbO01tUrR72g9rFoGzUJqVH)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Evaluating RAG Pipelines\n",
"\n",
"Before we apply above's fix, let's dive into evaluating RAG pipelines.\n",
"\n",
"RAG pipelines consist of a retrieval step to fetch relevant information and a generation step to generate a response to a users question. A RAG pipeline can fail at either step. E.g. the retrieval step can fail to find relevant information which makes generating the correct impossible. Another failure mode is that the generation step doesn't leverage the retrieved information correctly. We will apply the following evaluation metrics to understand different failure modes:\n",
"\n",
"- `context_relevancy`: measures how relevant the context is given the question\n",
"- `percent_target_supported_by_context`: measures how much of the target answer is supported by the context; this will give an upper ceiling of how well the generation step can perform\n",
"- `answer_context_faithfulness`: measures how much the generated answer utilizes the context\n",
"- `answer_matches_target`: measures how well the generated answer matches the target answer judged by a LLM and gives a sense of accuracy of our entire pipeline\n",
"\n",
"To use these evaluation metrics, we can import them from `parea.evals.rag` and `parea.evals.general` and apply them to a function by specifying in the `trace` decorator which evaluation metrics to use. The `@trace` decorator will automatically log the results of the evaluation metrics to the Parea dashboard.\n",
"\n",
"Applying them to the retrieval step:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from parea.evals.rag import (\n",
" context_query_relevancy_factory,\n",
" percent_target_supported_by_context_factory,\n",
")\n",
"\n",
"\n",
"context_relevancy_eval = context_query_relevancy_factory()\n",
"percent_target_supported_by_context = percent_target_supported_by_context_factory()\n",
"\n",
"\n",
"@trace(eval_funcs=[context_relevancy_eval, percent_target_supported_by_context])\n",
"def retrieval(question: str) -> List[str]:\n",
" return collection.query(query_texts=[question], n_results=1)[\"documents\"][0]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now we can apply `answer_context_faithfulness` and `answer_matches_target` to the generation step."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from parea.evals.general import answer_matches_target_llm_grader_factory\n",
"from parea.evals.rag import answer_context_faithfulness_statement_level_factory\n",
"\n",
"\n",
"answer_context_faithfulness = answer_context_faithfulness_statement_level_factory()\n",
"answer_matches_target_llm_grader = answer_matches_target_llm_grader_factory()\n",
"\n",
"\n",
"@function\n",
"def generation_sglang(s, question: str, *context: str):\n",
" context = \"\\n\".join(context)\n",
" s += user(\n",
" f\"Given this question:\\n{question}\\n\\nAnd this context:\\n{context}\\n\\nAnswer the question.\"\n",
" )\n",
" s += assistant(gen(\"answer\", max_tokens=1_000))\n",
"\n",
"\n",
"@trace(eval_funcs=[answer_context_faithfulness, answer_matches_target_llm_grader])\n",
"def generation(question: str, *context):\n",
" state: ProgramState = generation_sglang.run(question, *context)\n",
" while not state.stream_executor.is_finished:\n",
" time.sleep(1)\n",
" return state.stream_executor.variables[\"answer\"]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, we tie them together & execute the original sample query."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"@trace\n",
"def rag_pipeline(question: str) -> str:\n",
" contexts = retrieval(question)\n",
" return generation(question, *contexts)\n",
"\n",
"\n",
"rag_pipeline(\n",
" \"When did the World Health Organization formally declare an end to the COVID-19 global health emergency?\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Great, the answer is correct! Can you spot the line where we fixed the output truncation issue?\n",
"\n",
"The evaluation scores appear in the bottom right of the logs (screenshot below). Note, that there is no score for `answer_matches_target_llm_grader` and `percent_target_supported_by_context` as these evals are automatically skipped if the target answer is not provided.\n",
"\n",
"![Fixed Max. Tokens](max-tokens-fixed-rag-trace.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Running an experiment\n",
"\n",
"Now we are (almost) ready to evaluate the performance of our RAG pipeline on the entire dataset. First, we will need to apply the `nest_asyncio` package to avoid issues with the Jupyter notebook event loop."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install nest-asyncio\n",
"import nest_asyncio\n",
"\n",
"nest_asyncio.apply()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Running the actual experiment is straight-forward. For that we use `p.experiment` to initialize the experiment with a name, the data (list of key-value pairs fed into our entry function) and the entry function. We then call `run` on the experiment to execute it. Note, that `target` is a reserved key in the data dictionary and will be used as the target answer for evaluation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"e = p.experiment(\n",
" \"RAG\",\n",
" data=[\n",
" {\n",
" \"question\": qca[\"question\"],\n",
" \"target\": qca[\"answer\"],\n",
" }\n",
" for qca in question_context_answers\n",
" ],\n",
" func=rag_pipeline,\n",
").run()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Analyzing the results\n",
"\n",
"When opening above experiment, we will see an overview of the experiment as shown below. The upper half shows a summary of the statistics on the left and charts to investigate the distribution and relationships of scores on the right. The lower half is a table with the individual traces which we can use to debug individual samples.\n",
"\n",
"When looking at the statistics, we can see that the accuracy of our RAG pipeline is 22% as measured by `answer_matches_target_llm_grader`. Though when checking the quality of our retrieval step (`context_query_relevancy`), we can see that our retrieval step is fetching relevant information in only 27% of all samples. As shown in the GIF, we investigate the relationship between the two and see the two scores have 95% agreement. This confirms that the retrieval step is a major bottleneck for our RAG pipeline. So, now it's your turn to improve the retrieval step!\n",
"\n",
"Note, above link isn't publicly accessible but the experiment can be accessed through [here](https://app.parea.ai/public-experiments/parea/rag_sglang/30f0244a-d56c-44ff-bdfb-8f47626304b6).\n",
"\n",
"![Experiment Results](https://drive.google.com/uc?id=1KMtJBU47nPB02Pvv3SPPTK7RnHRh5YdA)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
"""
Usage:
python -m sglang.launch_server --model-path meta-llama/Llama-2-7b-chat-hf --port 30000
python readme_examples.py
"""
import sglang as sgl
@sgl.function
def tool_use(s, question):
s += "To answer this question: " + question + ". "
s += (
"I need to use a "
+ sgl.gen("tool", choices=["calculator", "search engine"])
+ ". "
)
if s["tool"] == "calculator":
s += "The math expression is" + sgl.gen("expression")
elif s["tool"] == "search engine":
s += "The key word to search is" + sgl.gen("word")
@sgl.function
def tip_suggestion(s):
s += (
"Here are two tips for staying healthy: "
"1. Balanced Diet. 2. Regular Exercise.\n\n"
)
forks = s.fork(2)
for i, f in enumerate(forks):
f += f"Now, expand tip {i+1} into a paragraph:\n"
f += sgl.gen(f"detailed_tip", max_tokens=256, stop="\n\n")
s += "Tip 1:" + forks[0]["detailed_tip"] + "\n"
s += "Tip 2:" + forks[1]["detailed_tip"] + "\n"
s += "In summary" + sgl.gen("summary")
@sgl.function
def regular_expression_gen(s):
s += "Q: What is the IP address of the Google DNS servers?\n"
s += "A: " + sgl.gen(
"answer",
temperature=0,
regex=r"((25[0-5]|2[0-4]\d|[01]?\d\d?).){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)",
)
@sgl.function
def text_qa(s, question):
s += "Q: " + question + "\n"
s += "A:" + sgl.gen("answer", stop="\n")
def driver_tool_use():
state = tool_use.run(question="What is the capital of the United States?")
print(state.text())
print("\n")
def driver_tip_suggestion():
state = tip_suggestion.run()
print(state.text())
print("\n")
def driver_regex():
state = regular_expression_gen.run()
print(state.text())
print("\n")
def driver_batching():
states = text_qa.run_batch(
[
{"question": "What is the capital of the United Kingdom?"},
{"question": "What is the capital of France?"},
{"question": "What is the capital of Japan?"},
],
progress_bar=True,
)
for s in states:
print(s.text())
print("\n")
def driver_stream():
state = text_qa.run(
question="What is the capital of France?", temperature=0.1, stream=True
)
for out in state.text_iter():
print(out, end="", flush=True)
print("\n")
if __name__ == "__main__":
# sgl.set_default_backend(sgl.OpenAI("gpt-3.5-turbo-instruct"))
sgl.set_default_backend(sgl.RuntimeEndpoint("http://localhost:30000"))
driver_tool_use()
driver_tip_suggestion()
driver_regex()
driver_batching()
driver_stream()
"""
This example demonstrates how to use `min_tokens` to enforce sgl.gen to generate a longer sequence
Usage:
python3 sgl_gen_min_tokens.py
"""
import sglang as sgl
@sgl.function
def long_answer(s):
s += sgl.user("What is the capital of the United States?")
s += sgl.assistant(sgl.gen("answer", min_tokens=64, max_tokens=128))
@sgl.function
def short_answer(s):
s += sgl.user("What is the capital of the United States?")
s += sgl.assistant(sgl.gen("answer"))
if __name__ == "__main__":
runtime = sgl.Runtime(model_path="meta-llama/Meta-Llama-3.1-8B-Instruct")
sgl.set_default_backend(runtime)
state = long_answer.run()
print("=" * 20)
print("Longer Answer", state["answer"])
state = short_answer.run()
print("=" * 20)
print("Short Answer", state["answer"])
runtime.shutdown()
"""
Usage:
python3 streaming.py
"""
import asyncio
import sglang as sgl
@sgl.function
def multi_turn_question(s, question_1, question_2):
s += sgl.system("You are a helpful assistant.")
s += sgl.user(question_1)
s += sgl.assistant(sgl.gen("answer_1", max_tokens=256))
s += sgl.user(question_2)
s += sgl.assistant(sgl.gen("answer_2", max_tokens=256))
sgl.set_default_backend(sgl.OpenAI("gpt-3.5-turbo"))
def stream_a_variable():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
stream=True,
)
for out in state.text_iter(var_name="answer_2"):
print(out, end="", flush=True)
print("\n")
async def async_stream():
state = multi_turn_question.run(
question_1="What is the capital of the United States?",
question_2="List two local attractions.",
stream=True,
)
async for out in state.text_async_iter(var_name="answer_2"):
print(out, end="", flush=True)
print("\n")
if __name__ == "__main__":
stream_a_variable()
asyncio.run(async_stream())
FROM nvcr.io/nvidia/tritonserver:24.01-py3
WORKDIR /opt
RUN git clone https://github.com/sgl-project/sglang.git
WORKDIR /opt/sglang
RUN pip install --upgrade pip && \
pip install -e "python[all]" && \
pip install datasets
# sglang_triton
Build the docker image:
```
docker build -t sglang-triton .
```
Then do:
```
docker run -ti --gpus=all --network=host --name sglang-triton -v ./models:/mnt/models sglang-triton
```
inside the docker container:
```
cd sglang
python3 -m sglang.launch_server --model-path mistralai/Mistral-7B-Instruct-v0.2 --port 30000 --mem-fraction-static 0.9
```
with another shell, inside the docker container:
```
docker exec -ti sglang-triton /bin/bash
cd /mnt
tritonserver --model-repository=/mnt/models
```
Send request to the server:
```
curl -X POST http://localhost:8000/v2/models/character_generation/generate \
-H "Content-Type: application/json" \
-d '{
"INPUT_TEXT": ["harry"]
}'
```
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment