File size: 2,088 Bytes
b7b44ee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9d11405
b7b44ee
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
#Importing Libraries

import os
import openai
import chainlit as cl
from chainlit.prompt import Prompt, PromptMessage
from chainlit.playground.providers import ChatOpenAI
from dotenv import load_dotenv

load_dotenv()
openai.api_key = os.environ["OPENAI_API_KEY"]

#Templates
system_template = """
You are a helpful assistant who always speaks in a pleasant tone!
"""

user_template = """{input}
Think through your response step by step.
"""

# Runs at the start of the user cession
@cl.on_chat_start
async def start_chat():
    settings = {
        "model": "gpt-3.5-turbo",# Model that we will be using
        "temperature": 0, # Randomness in the answer
        "max_tokens": 500,# Maximum length of tokens as input
        "top_p": 1, #
        "frequency_penalty": 0,
        "presence_penalty": 0,
    }

    cl.user_session.set("settings", settings)

#Function that will run on each time the bot gets a user input
@cl.on_message
async def main(message:str):
    settings = cl.user_session.get("settings")

    prompt = Prompt(
        provider = ChatOpenAI.id,
        messages = [
            PromptMessage(
                role = "system",
                template = system_template,
                formatted = system_template,
            ),
            PromptMessage(
                role = "user",
                template = user_template,
                formatted=user_template.format(input=message)
            ),
        ],
        inputs = {"input":message},
        settings = settings,
    )

    print([m.to_openai() for m in prompt.messages])

    msg = cl.Message(content="")

    # Call OpenAI
    async for stream_resp  in await openai.ChatCompletion.acreate(
        messages = [m.to_openai() for m in prompt.messages],stream = True, **settings
    ):
        token = stream_resp.choices[0]["delta"].get("content","")
        await msg.stream_token(token)
    
    #Updating the prompt answer to the answer generated by the LLM
    prompt.completion = msg.content
    #Updating the message to set the context right.
    msg.prompt = prompt

    await msg.send()