File size: 1,033 Bytes
87f911f 6a2d777 87f911f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import modal
APP_NAME = "llm-server"
ENABLE_STREAMING = True
SYSTEM_PROMPT = "You are a friendly Chatbot. Please respond in the same language as the user."
VLLMModel = modal.Cls.from_name(APP_NAME, "VLLMModel")
model = VLLMModel()
chat_history = []
chat_history.append(
{"role": "system", "content": [{"type": "text", "text": SYSTEM_PROMPT}]}
)
# User prompt
user_prompt = "Hi!"
print(f"USER: {user_prompt}\n")
chat_history.append(
{"role": "user", "content": [{"type": "text", "text": user_prompt}]}
)
print("Calling chat function...")
# AI response
if ENABLE_STREAMING:
"""Streaming version"""
print("AI: ", end="", flush=True)
response = ""
for chunk in model.generate_stream.remote_gen(chat_history):
print(chunk, end="", flush=True)
response += chunk
print()
else:
"""Non-streaming version"""
response = model.generate.remote(chat_history)
print("AI:", response)
chat_history.append(
{"role": "assistant", "content": [{"type": "text", "text": response}]}
)
|