chizhikchi commited on
Commit
dfcee3d
·
verified ·
1 Parent(s): fff7deb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -32
app.py CHANGED
@@ -8,34 +8,6 @@ import pandas as pd
8
 
9
  from Gradio_UI import GradioUI
10
 
11
- # Below is an example of a tool that does nothing. Amaze us with your creativity !
12
- @tool
13
- def f1_tack_getter(track_name: str)-> str: #it's import to specify the return type
14
- #Keep this format for the description / args / args description but feel free to modify the tool
15
- """
16
- Returns data for a specified race
17
- Args:
18
- track_name: A string respresenting a valid track name from the 2024 F1 calendar
19
- """
20
- df = pd.read_csv('./Formula1_2024season_raceResults.csv')
21
- return df.groupby('Track').get_group(track_name)
22
-
23
- @tool
24
- def get_current_time_in_timezone(timezone: str) -> str:
25
- """A tool that fetches the current local time in a specified timezone.
26
- Args:
27
- timezone: A string representing a valid timezone (e.g., 'America/New_York').
28
- """
29
- try:
30
- # Create timezone object
31
- tz = pytz.timezone(timezone)
32
- # Get current time in that timezone
33
- local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
34
- return f"The current local time in {timezone} is: {local_time}"
35
- except Exception as e:
36
- return f"Error fetching time for timezone '{timezone}': {str(e)}"
37
-
38
-
39
  final_answer = FinalAnswerTool()
40
 
41
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
@@ -48,16 +20,34 @@ model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may
48
  custom_role_conversions=None,
49
  )
50
 
51
-
52
- # Import tool from Hub
53
- image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
  with open("prompts.yaml", 'r') as stream:
56
  prompt_templates = yaml.safe_load(stream)
57
 
58
  agent = CodeAgent(
59
  model=model,
60
- tools=[f1_tack_getter, final_answer], ## add your tools here (don't remove final answer)
61
  max_steps=6,
62
  verbosity_level=1,
63
  grammar=None,
 
8
 
9
  from Gradio_UI import GradioUI
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  final_answer = FinalAnswerTool()
12
 
13
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
 
20
  custom_role_conversions=None,
21
  )
22
 
23
+ @tool
24
+ def f1_tackinfo_getter(track_name: str)-> str: #it's import to specify the return type
25
+ #Keep this format for the description / args / args description but feel free to modify the tool
26
+ """
27
+ Returns data for a specified race
28
+ Args:
29
+ track_name: A string respresenting a valid track name from the 2024 F1 calendar
30
+ Returns:
31
+ A string with information about the given race
32
+ """
33
+ df = pd.read_csv('./Formula1_2024season_raceResults.csv')
34
+ info = str(df.groupby('Track').get_group(track_name))
35
+ client = InferenceClient("meta-llama/Llama-3.2-3B-Instruct")
36
+ system_prompt = "You are an expert in F1 race analysis. You will be given data about a race and your goal is to provide a concise analysis of these recults"
37
+ output = client.chat.completions.create(
38
+ messages = [
39
+ {'role': 'system', 'content': system_prompt},
40
+ {'role': 'user', 'content': f'Here is the data about the race: {info}'}
41
+ ]
42
+ )
43
+ return output.choices[0].message.content
44
 
45
  with open("prompts.yaml", 'r') as stream:
46
  prompt_templates = yaml.safe_load(stream)
47
 
48
  agent = CodeAgent(
49
  model=model,
50
+ tools=[f1_tackinfo_getter, final_answer], ## add your tools here (don't remove final answer)
51
  max_steps=6,
52
  verbosity_level=1,
53
  grammar=None,