Spaces:
Runtime error
Runtime error
File size: 2,379 Bytes
2f6ea1f 7cc3907 8c18b31 7cc3907 2f6ea1f 7cc3907 2f6ea1f 014336b ff4c076 014336b 00ed004 ff4c076 014336b 2f6ea1f 8c18b31 014336b 2f6ea1f 014336b 2f6ea1f 014336b 8c18b31 7cc3907 014336b 7cc3907 014336b 8c18b31 014336b 8c18b31 014336b 2f6ea1f 014336b 7cc3907 014336b 2f6ea1f 7cc3907 2f6ea1f 014336b 2f6ea1f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 |
# You can find this code for Chainlit python streaming here (https://docs.chainlit.io/concepts/streaming/python)
# OpenAI Chat completion
import os
from openai import AsyncOpenAI # importing openai for API usage
import chainlit as cl # importing chainlit for our app
from chainlit.prompt import Prompt, PromptMessage # importing prompt tools
from chainlit.playground.providers import ChatOpenAI # importing ChatOpenAI tools
from dotenv import load_dotenv
load_dotenv()
# ChatOpenAI Templates
system_template = """You are a funny and unhelpful assistant. Every time you answer, you throw a joke or make fun of the question.
"""
user_template = """
Serious Question: {input}
Funny Response
"""
@cl.on_chat_start # marks a function that will be executed at the start of a user session
async def start_chat():
settings = {
"model": "gpt-3.5-turbo",
"temperature": 0,
"max_tokens": 500,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
}
cl.user_session.set("settings", settings)
@cl.on_message # marks a function that should be run each time the chatbot receives a message from a user
async def main(message: cl.Message):
settings = cl.user_session.get("settings")
client = AsyncOpenAI()
print(message.content)
prompt = Prompt(
provider=ChatOpenAI.id,
messages=[
PromptMessage(
role="system",
template=system_template,
formatted=system_template,
),
PromptMessage(
role="user",
template=user_template,
formatted=user_template.format(input=message.content),
),
],
inputs={"input": message.content},
settings=settings,
)
print([m.to_openai() for m in prompt.messages])
msg = cl.Message(content="")
# Call OpenAI
async for stream_resp in await client.chat.completions.create(
messages=[m.to_openai() for m in prompt.messages], stream=True, **settings
):
token = stream_resp.choices[0].delta.content
if not token:
token = ""
await msg.stream_token(token)
# Update the prompt object with the completion
prompt.completion = msg.content
msg.prompt = prompt
# Send and close the message stream
await msg.send()
|