Taarun19's picture
Update app.py
fca976e verified
raw
history blame contribute delete
632 Bytes
from langchain.llms import OpenAI
# from dotenv import load_dotenv
import streamlit as st
# import os
# load_dotenv()
## Function to load OpenAI model and get response
def get_openai_response(question):
llm = OpenAI(temperature= 0.5)
response = llm(question)
return response
## Initialize our streamlit app
st.set_page_config(page_title="Q&A Demo")
st.header("Langchain Application")
input1 = st.text_input("Input: ", key="input")
response = get_openai_response(input1)
submit = st.button("Generate")
## If generate button is clicked
if submit:
st.subheader("The response is")
st.write(response)