Update agents.py
Browse files
agents.py
CHANGED
@@ -55,14 +55,15 @@ def get_file_content_as_text(task_id: str) -> str:
|
|
55 |
|
56 |
def load_hf_model(modelName: str):
|
57 |
"""
|
58 |
-
|
59 |
-
:param modelName: Name
|
60 |
:return: model
|
61 |
"""
|
62 |
load_dotenv() # Lädt automatisch .env im Projektordner
|
63 |
|
64 |
-
|
65 |
-
|
|
|
66 |
# Modell initialisieren
|
67 |
model = HfApiModel(model_id=modelName)
|
68 |
return model
|
@@ -70,9 +71,9 @@ def load_hf_model(modelName: str):
|
|
70 |
|
71 |
def load_ollama_model(modelName: str):
|
72 |
"""
|
73 |
-
|
74 |
-
:param modelName: Name
|
75 |
-
:return: model
|
76 |
"""
|
77 |
# Modell initialisieren
|
78 |
model = OpenAIServerModel(model_id=modelName, api_base="http://localhost:11434/v1")
|
@@ -80,23 +81,23 @@ def load_ollama_model(modelName: str):
|
|
80 |
|
81 |
def load_lmStudio_model(modelName: str):
|
82 |
"""
|
83 |
-
|
84 |
-
:param modelName: Name
|
85 |
-
:return: model
|
86 |
"""
|
87 |
# Modell initialisieren
|
88 |
#model = LiteLLMModel(model_id=modelName, api_base="http://localhost:1234")
|
89 |
model = OpenAIServerModel(model_id=modelName, api_base="http://localhost:1234/v1")
|
90 |
return model
|
91 |
|
92 |
-
def load_gemini_model():
|
93 |
"""
|
94 |
-
|
95 |
:return: model
|
96 |
"""
|
97 |
try:
|
98 |
print(f"Gemini API Key: {os.getenv('GEMINI_API_KEY')}")
|
99 |
-
model = LiteLLMModel(model_id="gemini/
|
100 |
api_key=os.getenv("GEMINI_API_KEY"))
|
101 |
#model = GeminiModel(api_key=os.getenv("GEMINI_API_KEY"))
|
102 |
return model
|
@@ -115,7 +116,7 @@ def get_agent(model_name:str, model_type:str) -> Optional[CodeAgent]:
|
|
115 |
case "Ollama":
|
116 |
model = load_ollama_model(model_name)
|
117 |
case "Gemini":
|
118 |
-
model = load_gemini_model()
|
119 |
case "LMStudio":
|
120 |
model = load_lmStudio_model(model_name)
|
121 |
case _:
|
|
|
55 |
|
56 |
def load_hf_model(modelName: str):
|
57 |
"""
|
58 |
+
Loads a model from the hugging face hub
|
59 |
+
:param modelName: Name of the model
|
60 |
:return: model
|
61 |
"""
|
62 |
load_dotenv() # Lädt automatisch .env im Projektordner
|
63 |
|
64 |
+
# for local usage, we might use a hf token to log in
|
65 |
+
# hf_token = os.getenv("hugging_face")
|
66 |
+
# login(token=hf_token) # Authentifizierung bei Hugging Face
|
67 |
# Modell initialisieren
|
68 |
model = HfApiModel(model_id=modelName)
|
69 |
return model
|
|
|
71 |
|
72 |
def load_ollama_model(modelName: str):
|
73 |
"""
|
74 |
+
Loads the requested model in ollama
|
75 |
+
:param modelName: Name of the model
|
76 |
+
:return: model (via OpenAI compatible API)
|
77 |
"""
|
78 |
# Modell initialisieren
|
79 |
model = OpenAIServerModel(model_id=modelName, api_base="http://localhost:11434/v1")
|
|
|
81 |
|
82 |
def load_lmStudio_model(modelName: str):
|
83 |
"""
|
84 |
+
Loads the requested model into lm studio
|
85 |
+
:param modelName: Name of the model
|
86 |
+
:return: model, accessible through the OpenAI compatible API
|
87 |
"""
|
88 |
# Modell initialisieren
|
89 |
#model = LiteLLMModel(model_id=modelName, api_base="http://localhost:1234")
|
90 |
model = OpenAIServerModel(model_id=modelName, api_base="http://localhost:1234/v1")
|
91 |
return model
|
92 |
|
93 |
+
def load_gemini_model(model_name: str):
|
94 |
"""
|
95 |
+
Loads
|
96 |
:return: model
|
97 |
"""
|
98 |
try:
|
99 |
print(f"Gemini API Key: {os.getenv('GEMINI_API_KEY')}")
|
100 |
+
model = LiteLLMModel(model_id=f"gemini/{model_name}",
|
101 |
api_key=os.getenv("GEMINI_API_KEY"))
|
102 |
#model = GeminiModel(api_key=os.getenv("GEMINI_API_KEY"))
|
103 |
return model
|
|
|
116 |
case "Ollama":
|
117 |
model = load_ollama_model(model_name)
|
118 |
case "Gemini":
|
119 |
+
model = load_gemini_model(model_name)
|
120 |
case "LMStudio":
|
121 |
model = load_lmStudio_model(model_name)
|
122 |
case _:
|