marluwe commited on
Commit
142599f
·
verified ·
1 Parent(s): b175db9

Update agents.py

Browse files
Files changed (1) hide show
  1. agents.py +15 -14
agents.py CHANGED
@@ -55,14 +55,15 @@ def get_file_content_as_text(task_id: str) -> str:
55
 
56
  def load_hf_model(modelName: str):
57
  """
58
- Lädt ein Hugging Face Modell und gibt den Agenten zurück.
59
- :param modelName: Name des Modells
60
  :return: model
61
  """
62
  load_dotenv() # Lädt automatisch .env im Projektordner
63
 
64
- hf_token = os.getenv("hugging_face")
65
- login(token=hf_token) # Authentifizierung bei Hugging Face
 
66
  # Modell initialisieren
67
  model = HfApiModel(model_id=modelName)
68
  return model
@@ -70,9 +71,9 @@ def load_hf_model(modelName: str):
70
 
71
  def load_ollama_model(modelName: str):
72
  """
73
- Lädt ein Ollama Modell und gibt den Agenten zurück.
74
- :param modelName: Name des Modells
75
- :return: model
76
  """
77
  # Modell initialisieren
78
  model = OpenAIServerModel(model_id=modelName, api_base="http://localhost:11434/v1")
@@ -80,23 +81,23 @@ def load_ollama_model(modelName: str):
80
 
81
  def load_lmStudio_model(modelName: str):
82
  """
83
- Lädt ein LM Studio Modell und gibt den Agenten zurück.
84
- :param modelName: Name des Modells
85
- :return: model
86
  """
87
  # Modell initialisieren
88
  #model = LiteLLMModel(model_id=modelName, api_base="http://localhost:1234")
89
  model = OpenAIServerModel(model_id=modelName, api_base="http://localhost:1234/v1")
90
  return model
91
 
92
- def load_gemini_model():
93
  """
94
- Lädt ein Gemini Modell und gibt den Agenten zurück.
95
  :return: model
96
  """
97
  try:
98
  print(f"Gemini API Key: {os.getenv('GEMINI_API_KEY')}")
99
- model = LiteLLMModel(model_id="gemini/gemini-2.0-flash-exp",
100
  api_key=os.getenv("GEMINI_API_KEY"))
101
  #model = GeminiModel(api_key=os.getenv("GEMINI_API_KEY"))
102
  return model
@@ -115,7 +116,7 @@ def get_agent(model_name:str, model_type:str) -> Optional[CodeAgent]:
115
  case "Ollama":
116
  model = load_ollama_model(model_name)
117
  case "Gemini":
118
- model = load_gemini_model()
119
  case "LMStudio":
120
  model = load_lmStudio_model(model_name)
121
  case _:
 
55
 
56
  def load_hf_model(modelName: str):
57
  """
58
+ Loads a model from the hugging face hub
59
+ :param modelName: Name of the model
60
  :return: model
61
  """
62
  load_dotenv() # Lädt automatisch .env im Projektordner
63
 
64
+ # for local usage, we might use a hf token to log in
65
+ # hf_token = os.getenv("hugging_face")
66
+ # login(token=hf_token) # Authentifizierung bei Hugging Face
67
  # Modell initialisieren
68
  model = HfApiModel(model_id=modelName)
69
  return model
 
71
 
72
  def load_ollama_model(modelName: str):
73
  """
74
+ Loads the requested model in ollama
75
+ :param modelName: Name of the model
76
+ :return: model (via OpenAI compatible API)
77
  """
78
  # Modell initialisieren
79
  model = OpenAIServerModel(model_id=modelName, api_base="http://localhost:11434/v1")
 
81
 
82
  def load_lmStudio_model(modelName: str):
83
  """
84
+ Loads the requested model into lm studio
85
+ :param modelName: Name of the model
86
+ :return: model, accessible through the OpenAI compatible API
87
  """
88
  # Modell initialisieren
89
  #model = LiteLLMModel(model_id=modelName, api_base="http://localhost:1234")
90
  model = OpenAIServerModel(model_id=modelName, api_base="http://localhost:1234/v1")
91
  return model
92
 
93
+ def load_gemini_model(model_name: str):
94
  """
95
+ Loads
96
  :return: model
97
  """
98
  try:
99
  print(f"Gemini API Key: {os.getenv('GEMINI_API_KEY')}")
100
+ model = LiteLLMModel(model_id=f"gemini/{model_name}",
101
  api_key=os.getenv("GEMINI_API_KEY"))
102
  #model = GeminiModel(api_key=os.getenv("GEMINI_API_KEY"))
103
  return model
 
116
  case "Ollama":
117
  model = load_ollama_model(model_name)
118
  case "Gemini":
119
+ model = load_gemini_model(model_name)
120
  case "LMStudio":
121
  model = load_lmStudio_model(model_name)
122
  case _: