Zeel commited on
Commit
339d432
·
verified ·
1 Parent(s): e3b7d4b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -8,6 +8,7 @@ from datetime import datetime
8
  from src import preprocess_and_load_df, load_agent, ask_agent, decorate_with_code, show_response, get_from_user, load_smart_df, ask_question
9
  from dotenv import load_dotenv
10
  from langchain_groq.chat_models import ChatGroq
 
11
  from streamlit_feedback import streamlit_feedback
12
  from huggingface_hub import HfApi
13
  st.set_page_config(layout="wide")
@@ -16,7 +17,7 @@ st.set_page_config(layout="wide")
16
  load_dotenv()
17
  Groq_Token = os.environ["GROQ_API_KEY"]
18
  hf_token = os.environ["HF_TOKEN"]
19
- models = {"llama3":"llama3-70b-8192","mixtral": "mixtral-8x7b-32768", "llama2": "llama2-70b-4096", "gemma": "gemma-7b-it"}
20
 
21
  self_path = os.path.dirname(os.path.abspath(__file__))
22
 
 
8
  from src import preprocess_and_load_df, load_agent, ask_agent, decorate_with_code, show_response, get_from_user, load_smart_df, ask_question
9
  from dotenv import load_dotenv
10
  from langchain_groq.chat_models import ChatGroq
11
+ from langchain_google_genai import GoogleGenerativeAI
12
  from streamlit_feedback import streamlit_feedback
13
  from huggingface_hub import HfApi
14
  st.set_page_config(layout="wide")
 
17
  load_dotenv()
18
  Groq_Token = os.environ["GROQ_API_KEY"]
19
  hf_token = os.environ["HF_TOKEN"]
20
+ models = {"llama3":"llama3-70b-8192","mixtral": "mixtral-8x7b-32768", "llama2": "llama2-70b-4096", "gemma": "gemma-7b-it", "gemini-pro": "gemini-pro"}
21
 
22
  self_path = os.path.dirname(os.path.abspath(__file__))
23