Unverified Commit c07a4428 authored by Massimiliano Pronesti's avatar Massimiliano Pronesti Committed by GitHub
Browse files

chore(examples-docs): upgrade to OpenAI V1 (#1785)

parent cd3aa153
......@@ -157,11 +157,16 @@ Since this server is compatible with OpenAI API, you can use it as a drop-in rep
.. code-block:: python
import openai
from openai import OpenAI
# Modify OpenAI's API key and API base to use vLLM's API server.
openai.api_key = "EMPTY"
openai.api_base = "http://localhost:8000/v1"
completion = openai.Completion.create(model="facebook/opt-125m",
openai_api_key = "EMPTY"
openai_api_base = "http://localhost:8000/v1"
client = OpenAI(
api_key=openai_api_key,
base_url=openai_api_base,
)
completion = client.completions.create(model="facebook/opt-125m",
prompt="San Francisco is a")
print("Completion result:", completion)
......@@ -194,11 +199,17 @@ Using the `openai` python package, you can also communicate with the model in a
.. code-block:: python
import openai
from openai import OpenAI
# Set OpenAI's API key and API base to use vLLM's API server.
openai.api_key = "EMPTY"
openai.api_base = "http://localhost:8000/v1"
chat_response = openai.ChatCompletion.create(
openai_api_key = "EMPTY"
openai_api_base = "http://localhost:8000/v1"
client = OpenAI(
api_key=openai_api_key,
base_url=openai_api_base,
)
chat_response = client.chat.completions.create(
model="facebook/opt-125m",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
......
import openai
from openai import OpenAI
# Modify OpenAI's API key and API base to use vLLM's API server.
openai.api_key = "EMPTY"
openai.api_base = "http://localhost:8000/v1"
openai_api_key = "EMPTY"
openai_api_base = "http://localhost:8000/v1"
# List models API
models = openai.Model.list()
print("Models:", models)
client = OpenAI(
# defaults to os.environ.get("OPENAI_API_KEY")
api_key=openai_api_key,
base_url=openai_api_base,
)
model = models["data"][0]["id"]
models = client.models.list()
model = models.data[0].id
# Chat completion API
chat_completion = openai.ChatCompletion.create(
model=model,
chat_completion = client.chat.completions.create(
messages=[{
"role": "system",
"content": "You are a helpful assistant."
......@@ -27,7 +28,10 @@ chat_completion = openai.ChatCompletion.create(
}, {
"role": "user",
"content": "Where was it played?"
}])
}],
model=model,
)
print("Chat completion results:")
print(chat_completion)
import openai
from openai import OpenAI
# Modify OpenAI's API key and API base to use vLLM's API server.
openai.api_key = "EMPTY"
openai.api_base = "http://localhost:8000/v1"
openai_api_key = "EMPTY"
openai_api_base = "http://localhost:8000/v1"
# List models API
models = openai.Model.list()
print("Models:", models)
client = OpenAI(
# defaults to os.environ.get("OPENAI_API_KEY")
api_key=openai_api_key,
base_url=openai_api_base,
)
model = models["data"][0]["id"]
models = client.models.list()
model = models.data[0].id
# Completion API
stream = False
completion = openai.Completion.create(
completion = client.completions.create(
model=model,
prompt="A robot may not injure a human being",
echo=False,
n=2,
stream=stream,
logprobs=3)
logprobs=3
)
print("Completion results:")
if stream:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment