cyberosa
commited on
Commit
·
9bb36bd
1
Parent(s):
290b25a
updating daily data
Browse files- active_traders.parquet +2 -2
- all_trades_profitability.parquet.gz +2 -2
- closed_market_metrics.parquet +2 -2
- closed_markets_div.parquet +2 -2
- daily_info.parquet +2 -2
- daily_mech_requests.parquet +2 -2
- daily_mech_requests_by_pearl_agents.parquet +2 -2
- error_by_markets.parquet +2 -2
- errors_by_mech.parquet +2 -2
- invalid_trades.parquet +2 -2
- latest_result_DAA_Pearl.parquet +2 -2
- latest_result_DAA_QS.parquet +2 -2
- old_tools_accuracy.csv +0 -13
- pearl_agents.parquet +2 -2
- retention_activity.parquet.gz +2 -2
- scripts/get_mech_info.py +0 -4
- scripts/mech_request_utils.py +1 -0
- scripts/pull_data.py +2 -2
- scripts/update_tools_accuracy.py +181 -79
- service_map.pkl +2 -2
- tools_accuracy.csv +12 -12
- tools_accuracy_version3_0.csv +13 -0
- traders_weekly_metrics.parquet +2 -2
- two_weeks_avg_roi_pearl_agents.parquet +1 -1
- unknown_traders.parquet +2 -2
- weekly_avg_roi_pearl_agents.parquet +2 -2
- weekly_mech_calls.parquet +2 -2
- winning_df.parquet +2 -2
active_traders.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e8ebf6f31796904a9dced9d32d78ce78e527e5e12c52681a099f0485d84dd98b
|
3 |
+
size 17691664
|
all_trades_profitability.parquet.gz
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f347e49277ab3009e97bcc6d9cf96c49a3e622e0c58385f9df0c708594a0431e
|
3 |
+
size 17823081
|
closed_market_metrics.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e77231f1ab081d6d56e7e370b29e4c2ce896fa81c7def1384eeafebe4e960e08
|
3 |
+
size 149870
|
closed_markets_div.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2a792c95c5dc7ee5fca1114a4ecd5e62e83b758bfda60c441d107ecc1d9db2ec
|
3 |
+
size 89735
|
daily_info.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:07b4e629fda57dd4704c3ffcd0c034842f3d8f1b9ae029b26376fe4dcabbde00
|
3 |
+
size 3678287
|
daily_mech_requests.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e5da96c306d6255ec5f6062e515ecadcb0c55f3bf106ef98ddf7a04031b9d2f1
|
3 |
+
size 8037
|
daily_mech_requests_by_pearl_agents.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fc3959d55d3cd0b0ff28875cac276a858c473c0f93e1417017bc4170b9a88d31
|
3 |
+
size 4551
|
error_by_markets.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:90829b29bd74d6b4333e4b0ef49e627f68e2a14a9d46e1d330223d8243aef403
|
3 |
+
size 11736
|
errors_by_mech.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c4604e6043191bad48b966531574aea05039e50aab27eb75bdaba5bd057916c1
|
3 |
+
size 6116
|
invalid_trades.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a2b5b469ee799e8c4555fe7fd07b3d0f74448401a6c8c048eacaf223070ae222
|
3 |
+
size 334820
|
latest_result_DAA_Pearl.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ecffc3e903e6068d330fb5109c6f7a5497f5a41d0b216a2f85f13ed94f8fca9e
|
3 |
+
size 5520
|
latest_result_DAA_QS.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3ab2f0716f02958a98132e469347af21cf5675aac510f35063f09b5116880b6b
|
3 |
+
size 6141
|
old_tools_accuracy.csv
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
tool,tool_accuracy,total_requests,min,max
|
2 |
-
claude-prediction-offline,72.53057384760113,383346,2025-04-02 05:16:45,2025-06-06 00:13:05
|
3 |
-
claude-prediction-online,62.65060240963856,166266,2025-04-02 00:01:00,2025-05-21 18:47:35
|
4 |
-
prediction-offline,62.179688199982074,2767227,2025-04-02 00:00:05,2025-06-09 07:28:55
|
5 |
-
prediction-offline-sme,61.504424778761056,20986,2025-04-02 00:57:25,2025-06-07 08:53:25
|
6 |
-
prediction-online,55.565397106584,153914,2025-04-02 07:32:50,2025-06-08 22:40:55
|
7 |
-
prediction-online-sme,54.13848631239936,89155,2025-04-02 07:33:25,2025-06-09 04:59:30
|
8 |
-
prediction-request-rag,33.33333333333333,1547,2025-04-02 14:01:55,2025-06-03 18:59:40
|
9 |
-
prediction-request-rag-claude,25.0,1776,2025-04-02 12:05:10,2025-06-03 17:51:10
|
10 |
-
prediction-request-reasoning,51.56896642045157,1200639,2025-04-02 07:03:25,2025-06-08 23:46:20
|
11 |
-
prediction-request-reasoning-claude,66.66666666666666,1341,2025-04-02 12:25:50,2025-05-28 09:25:40
|
12 |
-
prediction-url-cot-claude,55.55555555555556,5230,2025-04-02 05:46:40,2025-05-20 15:21:20
|
13 |
-
superforcaster,60.26234567901234,119300,2025-04-02 00:02:10,2025-06-08 21:00:40
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pearl_agents.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aee09298538c45ac060ed1a1f7ed7f66b81706845cecccd16f4e3a010e2fa3a1
|
3 |
+
size 47490
|
retention_activity.parquet.gz
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c09129294d0d09f5e9e7b8e615a0dd6b32837f1cc5c14f37bf87d38dfb47d2c0
|
3 |
+
size 4354408
|
scripts/get_mech_info.py
CHANGED
@@ -324,10 +324,6 @@ def get_mech_events_since_last_run(logger, mech_sandbox: bool = False):
|
|
324 |
try:
|
325 |
all_trades = read_all_trades_profitability()
|
326 |
latest_timestamp = max(all_trades.creation_timestamp)
|
327 |
-
# cutoff_date = "2025-01-13"
|
328 |
-
# latest_timestamp = pd.Timestamp(
|
329 |
-
# datetime.strptime(cutoff_date, "%Y-%m-%d")
|
330 |
-
# ).tz_localize("UTC")
|
331 |
print(f"Updating data since {latest_timestamp}")
|
332 |
except Exception:
|
333 |
print("Error while reading the profitability parquet file")
|
|
|
324 |
try:
|
325 |
all_trades = read_all_trades_profitability()
|
326 |
latest_timestamp = max(all_trades.creation_timestamp)
|
|
|
|
|
|
|
|
|
327 |
print(f"Updating data since {latest_timestamp}")
|
328 |
except Exception:
|
329 |
print("Error while reading the profitability parquet file")
|
scripts/mech_request_utils.py
CHANGED
@@ -624,6 +624,7 @@ def get_ipfs_data(input_filename: str, output_filename: str, logger):
|
|
624 |
updated_mech_requests.update(partial_dict)
|
625 |
|
626 |
save_json_file(updated_mech_requests, output_filename)
|
|
|
627 |
logger.info(f"NUMBER OF MECH REQUEST IPFS ERRORS={nr_errors}")
|
628 |
|
629 |
# delivers
|
|
|
624 |
updated_mech_requests.update(partial_dict)
|
625 |
|
626 |
save_json_file(updated_mech_requests, output_filename)
|
627 |
+
|
628 |
logger.info(f"NUMBER OF MECH REQUEST IPFS ERRORS={nr_errors}")
|
629 |
|
630 |
# delivers
|
scripts/pull_data.py
CHANGED
@@ -136,12 +136,12 @@ def only_new_weekly_analysis():
|
|
136 |
|
137 |
save_historical_data()
|
138 |
try:
|
139 |
-
clean_old_data_from_parquet_files("2025-06-
|
140 |
clean_old_data_from_json_files()
|
141 |
except Exception as e:
|
142 |
print("Error cleaning the oldest information from parquet files")
|
143 |
print(f"reason = {e}")
|
144 |
-
compute_tools_accuracy()
|
145 |
compute_tools_based_datasets()
|
146 |
# move to tmp folder the new generated files
|
147 |
move_files()
|
|
|
136 |
|
137 |
save_historical_data()
|
138 |
try:
|
139 |
+
clean_old_data_from_parquet_files("2025-06-06")
|
140 |
clean_old_data_from_json_files()
|
141 |
except Exception as e:
|
142 |
print("Error cleaning the oldest information from parquet files")
|
143 |
print(f"reason = {e}")
|
144 |
+
# compute_tools_accuracy()
|
145 |
compute_tools_based_datasets()
|
146 |
# move to tmp folder the new generated files
|
147 |
move_files()
|
scripts/update_tools_accuracy.py
CHANGED
@@ -17,12 +17,19 @@ import time
|
|
17 |
ACCURACY_FILENAME = "tools_accuracy.csv"
|
18 |
IPFS_SERVER = "/dns/registry.autonolas.tech/tcp/443/https"
|
19 |
GCP_IPFS_SERVER = "/dns/registry.gcp.autonolas.tech/tcp/443/https"
|
20 |
-
SAMPLING_POPULATION_SIZE =
|
21 |
RECENTS_SAMPLES_SIZE = 5000
|
22 |
NR_SUBSETS = 100
|
23 |
SAMPLES_THRESHOLD = 50
|
24 |
DEFAULT_ACCURACY = 51.0
|
25 |
LAST_MODEL_UPDATE = "2025-06-03"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
|
28 |
def mean_and_std(numbers):
|
@@ -270,6 +277,70 @@ def update_global_accuracy(
|
|
270 |
return global_accuracies
|
271 |
|
272 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
273 |
def add_historical_data(
|
274 |
tools_historical_file: str,
|
275 |
tools_df: pd.DataFrame,
|
@@ -317,6 +388,7 @@ def add_historical_data(
|
|
317 |
recent_samples = get_recent_samples(historical_tool_data, needed_samples)
|
318 |
# Combine the current tools with the historical ones
|
319 |
tools_df = pd.concat([tools_df, recent_samples], ignore_index=True)
|
|
|
320 |
valid_tools[tool] = count + needed_samples
|
321 |
completed_tools.append(tool)
|
322 |
# Remove the tool from more_sample_tools
|
@@ -326,34 +398,9 @@ def add_historical_data(
|
|
326 |
return tools_df
|
327 |
|
328 |
|
329 |
-
def
|
330 |
-
|
331 |
-
|
332 |
-
n_subsets: int = NR_SUBSETS,
|
333 |
-
) -> Tuple[Dict, Dict]:
|
334 |
-
"""
|
335 |
-
For the tools in the dataset, it creates different subsets of the same size (using downsampling or upsampling) and
|
336 |
-
computes the accuracy for each subset. Finally it averages the accuracies across all subsets.
|
337 |
-
|
338 |
-
Args:
|
339 |
-
tools_df: DataFrame containing the tools data
|
340 |
-
sample_size: Target number of samples per tool
|
341 |
-
n_subsets: Number of balanced datasets to create
|
342 |
-
|
343 |
-
Returns:
|
344 |
-
List of global accuracies for the tools
|
345 |
-
"""
|
346 |
-
valid_tools, more_sample_tools = classify_tools(
|
347 |
-
tools_df, recent_samples_size=RECENTS_SAMPLES_SIZE
|
348 |
-
)
|
349 |
-
global_accuracies = {}
|
350 |
-
if len(valid_tools) > 0:
|
351 |
-
# Compute the accuracy for tools in valid_tools
|
352 |
-
update_global_accuracy(
|
353 |
-
valid_tools, tools_df, global_accuracies, sample_size, n_subsets
|
354 |
-
)
|
355 |
-
|
356 |
-
# Check historical files for tools that need more samples
|
357 |
client = initialize_client()
|
358 |
# first attempt: historical file download
|
359 |
tool_names = list(more_sample_tools.keys())
|
@@ -361,7 +408,7 @@ def compute_global_accuracy_same_population(
|
|
361 |
completed_tools = []
|
362 |
if len(tool_names) > 0:
|
363 |
print("First attempt to complete the population size")
|
364 |
-
# first attempt: historical file from
|
365 |
tools_historical_file = download_tools_historical_files(
|
366 |
client, skip_files_count=FILES_IN_TWO_MONTHS
|
367 |
)
|
@@ -375,7 +422,7 @@ def compute_global_accuracy_same_population(
|
|
375 |
completed_tools,
|
376 |
)
|
377 |
print(more_sample_tools)
|
378 |
-
# second attempt: historical file from
|
379 |
if len(more_sample_tools) > 0:
|
380 |
print("Second attempt to complete the population size")
|
381 |
# second historical file download
|
@@ -403,25 +450,101 @@ def compute_global_accuracy_same_population(
|
|
403 |
n_subsets,
|
404 |
one_tool=tool,
|
405 |
)
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
412 |
return global_accuracies, new_tools
|
413 |
|
414 |
|
415 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
416 |
"""
|
417 |
Extracts accuracy information from the tools DataFrame.
|
418 |
"""
|
419 |
|
420 |
# compute global accuracy information for the tools
|
421 |
-
global_accuracies, new_tools = compute_global_accuracy_same_population(
|
422 |
-
|
|
|
|
|
|
|
423 |
)
|
424 |
-
|
425 |
# transform the dictionary global_accuracies into a DataFrame
|
426 |
wins = pd.DataFrame(
|
427 |
[
|
@@ -472,18 +595,17 @@ def update_tools_accuracy_same_model(
|
|
472 |
acc_info["max"] = acc_info["max"].dt.strftime("%Y-%m-%d %H:%M:%S")
|
473 |
all_accuracies = []
|
474 |
final_acc_df = pd.DataFrame(columns=tools_acc.columns)
|
|
|
|
|
475 |
for tool in tools_to_update:
|
476 |
-
if tool in new_tools:
|
477 |
-
continue
|
478 |
-
if tool not in existing_tools:
|
479 |
new_tools.append(tool)
|
480 |
continue
|
481 |
new_accuracy = round(
|
482 |
acc_info[acc_info["tool"] == tool]["tool_accuracy"].values[0], 2
|
483 |
)
|
484 |
all_accuracies.append(new_accuracy)
|
485 |
-
|
486 |
-
new_volume = SAMPLING_POPULATION_SIZE
|
487 |
if no_timeline_info:
|
488 |
new_min_timeline = None
|
489 |
new_max_timeline = None
|
@@ -548,49 +670,29 @@ def update_tools_accuracy_same_model(
|
|
548 |
return final_acc_df
|
549 |
|
550 |
|
551 |
-
def update_tools_accuracy(
|
552 |
-
tools_acc: pd.DataFrame,
|
553 |
-
tools_df: pd.DataFrame,
|
554 |
-
inc_tools: List[str],
|
555 |
-
) -> pd.DataFrame:
|
556 |
-
"""To compute/update the latest accuracy information for the different mech tools
|
557 |
-
but splitting by date 3rd of June when the gpt 4.1 update happened 2025"""
|
558 |
-
|
559 |
-
tools_df["request_time"] = pd.to_datetime(tools_df["request_time"])
|
560 |
-
tools_df["request_date"] = tools_df["request_time"].dt.date
|
561 |
-
tools_df["request_date"] = pd.to_datetime(tools_df["request_date"])
|
562 |
-
tools_df["request_date"] = tools_df["request_date"].dt.strftime("%Y-%m-%d")
|
563 |
-
|
564 |
-
# split the data into two parts: before and after the 3rd of June
|
565 |
-
split_date = pd.to_datetime(LAST_MODEL_UPDATE).tz_localize("UTC")
|
566 |
-
before_split = tools_df[tools_df["request_time"] < split_date]
|
567 |
-
after_split = tools_df[tools_df["request_time"] >= split_date]
|
568 |
-
print(f"Number of requests before {split_date}: {len(before_split)}")
|
569 |
-
print(f"Number of requests after {split_date}: {len(after_split)}")
|
570 |
-
|
571 |
-
acc_info_after = update_tools_accuracy_same_model(tools_acc, after_split, inc_tools)
|
572 |
-
# return the two different dataframes
|
573 |
-
return acc_info_after
|
574 |
-
|
575 |
-
|
576 |
def compute_tools_accuracy():
|
|
|
|
|
|
|
577 |
print("Computing accuracy of tools")
|
578 |
print("Reading tools parquet file")
|
579 |
-
|
580 |
-
|
581 |
-
print("Computing tool accuracy information")
|
582 |
# Check if the file exists
|
583 |
-
acc_data =
|
584 |
if os.path.exists(ROOT_DIR / ACCURACY_FILENAME):
|
585 |
acc_data = pd.read_csv(ROOT_DIR / ACCURACY_FILENAME)
|
586 |
|
587 |
-
|
|
|
|
|
|
|
|
|
588 |
|
589 |
-
|
590 |
-
print("Saving into a csv files")
|
591 |
new_acc_data.to_csv(ROOT_DIR / ACCURACY_FILENAME, index=False)
|
592 |
# save the data into IPFS
|
593 |
-
push_csv_file_to_ipfs()
|
594 |
|
595 |
|
596 |
def push_csv_file_to_ipfs(filename: str = ACCURACY_FILENAME) -> str:
|
|
|
17 |
ACCURACY_FILENAME = "tools_accuracy.csv"
|
18 |
IPFS_SERVER = "/dns/registry.autonolas.tech/tcp/443/https"
|
19 |
GCP_IPFS_SERVER = "/dns/registry.gcp.autonolas.tech/tcp/443/https"
|
20 |
+
SAMPLING_POPULATION_SIZE = 500
|
21 |
RECENTS_SAMPLES_SIZE = 5000
|
22 |
NR_SUBSETS = 100
|
23 |
SAMPLES_THRESHOLD = 50
|
24 |
DEFAULT_ACCURACY = 51.0
|
25 |
LAST_MODEL_UPDATE = "2025-06-03"
|
26 |
+
CLAUDE_TOOLS = [
|
27 |
+
"claude-prediction-online",
|
28 |
+
"claude-prediction-offline",
|
29 |
+
"prediction-request-rag-claude",
|
30 |
+
"prediction-request-reasoning-claude",
|
31 |
+
"prediction-url-cot-claude",
|
32 |
+
]
|
33 |
|
34 |
|
35 |
def mean_and_std(numbers):
|
|
|
277 |
return global_accuracies
|
278 |
|
279 |
|
280 |
+
def check_upgrade_dates(
|
281 |
+
tools_df,
|
282 |
+
tools_list,
|
283 |
+
new_tools,
|
284 |
+
claude_upgrade_date="30-07-2025",
|
285 |
+
gpt_upgrade_date="03-06-2025",
|
286 |
+
) -> None:
|
287 |
+
# Convert upgrade dates to datetime
|
288 |
+
claude_upgrade_date = pd.to_datetime(claude_upgrade_date, format="%d-%m-%Y").date()
|
289 |
+
gpt_upgrade_date = pd.to_datetime(gpt_upgrade_date, format="%d-%m-%Y").date()
|
290 |
+
for tool in tools_list.keys():
|
291 |
+
print(f"checking tool {tool}")
|
292 |
+
# take the RECENT_SAMPLES from tools_df
|
293 |
+
tool_data = tools_df[tools_df["tool"] == tool]
|
294 |
+
# sort tool_data by request date in ascending order
|
295 |
+
tool_data = tool_data.sort_values(by="request_date", ascending=True)
|
296 |
+
if len(tool_data) < RECENTS_SAMPLES_SIZE:
|
297 |
+
new_tools.append(tool)
|
298 |
+
continue
|
299 |
+
recent_samples = get_recent_samples(
|
300 |
+
tool_data, recent_samples_size=RECENTS_SAMPLES_SIZE
|
301 |
+
)
|
302 |
+
recent_samples = recent_samples.sort_values(by="request_date", ascending=True)
|
303 |
+
print(recent_samples.head())
|
304 |
+
oldest_sample_date = recent_samples.iloc[0].request_date
|
305 |
+
if isinstance(oldest_sample_date, str):
|
306 |
+
oldest_sample_date = pd.to_datetime(oldest_sample_date).date()
|
307 |
+
print(f"tool {tool}: oldest sample date {oldest_sample_date}")
|
308 |
+
if tool in CLAUDE_TOOLS:
|
309 |
+
# if oldest_sample_date is before claude_upgrade_date then remove the tool
|
310 |
+
# from valid_tools and add it to the list of other_tools
|
311 |
+
if oldest_sample_date < claude_upgrade_date:
|
312 |
+
print(f"the oldest sample found is older than {claude_upgrade_date}")
|
313 |
+
new_tools.append(tool)
|
314 |
+
elif oldest_sample_date < gpt_upgrade_date:
|
315 |
+
print(f"the oldest sample found is older than {gpt_upgrade_date}")
|
316 |
+
new_tools.append(tool)
|
317 |
+
return
|
318 |
+
|
319 |
+
|
320 |
+
def check_upgraded_tools(
|
321 |
+
tools_df,
|
322 |
+
valid_tools,
|
323 |
+
other_tools,
|
324 |
+
):
|
325 |
+
"""
|
326 |
+
Function to update the input lists and remove from valid tools any tools whose oldest date is before the upgrade dates
|
327 |
+
"""
|
328 |
+
new_tools = []
|
329 |
+
# Check and remove tools from valid_tools
|
330 |
+
check_upgrade_dates(tools_df, valid_tools, new_tools)
|
331 |
+
for tool in new_tools:
|
332 |
+
if tool in valid_tools.keys():
|
333 |
+
print(f"removing tool {tool} from valid tools")
|
334 |
+
del valid_tools[tool]
|
335 |
+
# Check and remove tools from other_tools
|
336 |
+
check_upgrade_dates(tools_df, other_tools, new_tools)
|
337 |
+
for tool in new_tools:
|
338 |
+
if tool in other_tools.keys():
|
339 |
+
print(f"removing tool {tool} from other tools")
|
340 |
+
del other_tools[tool]
|
341 |
+
return new_tools
|
342 |
+
|
343 |
+
|
344 |
def add_historical_data(
|
345 |
tools_historical_file: str,
|
346 |
tools_df: pd.DataFrame,
|
|
|
388 |
recent_samples = get_recent_samples(historical_tool_data, needed_samples)
|
389 |
# Combine the current tools with the historical ones
|
390 |
tools_df = pd.concat([tools_df, recent_samples], ignore_index=True)
|
391 |
+
tools_df = tools_df.sort_values(by="request_date", ascending=True)
|
392 |
valid_tools[tool] = count + needed_samples
|
393 |
completed_tools.append(tool)
|
394 |
# Remove the tool from more_sample_tools
|
|
|
398 |
return tools_df
|
399 |
|
400 |
|
401 |
+
def check_historical_samples(
|
402 |
+
global_accuracies, more_sample_tools, valid_tools, sample_size, n_subsets
|
403 |
+
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
404 |
client = initialize_client()
|
405 |
# first attempt: historical file download
|
406 |
tool_names = list(more_sample_tools.keys())
|
|
|
408 |
completed_tools = []
|
409 |
if len(tool_names) > 0:
|
410 |
print("First attempt to complete the population size")
|
411 |
+
# first attempt: historical file from 4 months ago
|
412 |
tools_historical_file = download_tools_historical_files(
|
413 |
client, skip_files_count=FILES_IN_TWO_MONTHS
|
414 |
)
|
|
|
422 |
completed_tools,
|
423 |
)
|
424 |
print(more_sample_tools)
|
425 |
+
# second attempt: historical file from 6 months ago
|
426 |
if len(more_sample_tools) > 0:
|
427 |
print("Second attempt to complete the population size")
|
428 |
# second historical file download
|
|
|
450 |
n_subsets,
|
451 |
one_tool=tool,
|
452 |
)
|
453 |
+
return
|
454 |
+
|
455 |
+
|
456 |
+
def compute_global_accuracy_same_population(
|
457 |
+
tools_df: pd.DataFrame,
|
458 |
+
recent_samples_size: int = RECENTS_SAMPLES_SIZE,
|
459 |
+
sample_size: int = SAMPLING_POPULATION_SIZE,
|
460 |
+
n_subsets: int = NR_SUBSETS,
|
461 |
+
) -> Tuple[Dict, Dict]:
|
462 |
+
"""
|
463 |
+
For the tools in the dataset, it creates different subsets of the same size (using downsampling or upsampling) and
|
464 |
+
computes the accuracy for each subset. Finally it averages the accuracies across all subsets.
|
465 |
+
|
466 |
+
Args:
|
467 |
+
tools_df: DataFrame containing the tools data
|
468 |
+
sample_size: Target number of samples per tool
|
469 |
+
n_subsets: Number of balanced datasets to create
|
470 |
+
|
471 |
+
Returns:
|
472 |
+
List of global accuracies for the tools
|
473 |
+
"""
|
474 |
+
|
475 |
+
valid_tools, more_sample_tools = classify_tools(tools_df, recent_samples_size)
|
476 |
+
# check tools that were upgraded recently
|
477 |
+
# otherwise they will be moved as new_tools
|
478 |
+
new_tools = check_upgraded_tools(tools_df, valid_tools, more_sample_tools)
|
479 |
+
print(f"new tools {new_tools} after checking the upgraded tools")
|
480 |
+
global_accuracies = {}
|
481 |
+
if len(valid_tools) > 0:
|
482 |
+
# Compute the accuracy for tools in valid_tools
|
483 |
+
update_global_accuracy(
|
484 |
+
valid_tools, tools_df, global_accuracies, sample_size, n_subsets
|
485 |
+
)
|
486 |
+
|
487 |
+
# Check historical files for tools that need more samples
|
488 |
+
if len(more_sample_tools) > 0:
|
489 |
+
new_tools.extend(more_sample_tools.keys())
|
490 |
+
# check_historical_samples(
|
491 |
+
# global_accuracies, more_sample_tools, valid_tools, sample_size, n_subsets
|
492 |
+
# )
|
493 |
+
# for tool in more_sample_tools.keys():
|
494 |
+
# # tool but not reaching yet the population size so treated as a new tool
|
495 |
+
# new_tools.append(tool)
|
496 |
return global_accuracies, new_tools
|
497 |
|
498 |
|
499 |
+
def compute_global_weekly_accuracy(clean_tools_df):
|
500 |
+
"""
|
501 |
+
Compute accuracy following version 5.0 of spec"""
|
502 |
+
# get the information in clean_tools_df from last two weeks only, timestamp column is request_time
|
503 |
+
|
504 |
+
three_weeks_ago = pd.Timestamp.now(tz="UTC") - pd.Timedelta(days=21)
|
505 |
+
recent_df = clean_tools_df[clean_tools_df["request_time"] >= three_weeks_ago]
|
506 |
+
|
507 |
+
# compute at the tool level (using "tool" column) the volume of requests per tool
|
508 |
+
tool_volumes = (
|
509 |
+
recent_df.groupby("tool")["request_id"].count().reset_index(name="volume")
|
510 |
+
)
|
511 |
+
min_volume = tool_volumes["volume"].min()
|
512 |
+
max_volume = tool_volumes["volume"].max()
|
513 |
+
|
514 |
+
# compute the average volume of tool requests in the last two weeks, excluding min and max
|
515 |
+
filtered_volumes = tool_volumes[
|
516 |
+
(tool_volumes["volume"] != min_volume) & (tool_volumes["volume"] != max_volume)
|
517 |
+
]
|
518 |
+
avg_volume = filtered_volumes["volume"].mean()
|
519 |
+
|
520 |
+
print("Tool volumes in last three weeks:")
|
521 |
+
print(tool_volumes)
|
522 |
+
print(
|
523 |
+
f"Average volume of tool requests in last three weeks excluding min and max: {avg_volume}"
|
524 |
+
)
|
525 |
+
sampling_size = int(avg_volume / 2)
|
526 |
+
print(f"Sampling size = {sampling_size}")
|
527 |
+
|
528 |
+
return compute_global_accuracy_same_population(
|
529 |
+
tools_df=recent_df,
|
530 |
+
recent_samples_size=avg_volume,
|
531 |
+
sample_size=sampling_size,
|
532 |
+
n_subsets=50,
|
533 |
+
)
|
534 |
+
|
535 |
+
|
536 |
+
def get_accuracy_info(clean_tools_df: pd.DataFrame) -> [pd.DataFrame, bool, List]:
|
537 |
"""
|
538 |
Extracts accuracy information from the tools DataFrame.
|
539 |
"""
|
540 |
|
541 |
# compute global accuracy information for the tools
|
542 |
+
# global_accuracies, new_tools = compute_global_accuracy_same_population(
|
543 |
+
# tools_df=clean_tools_df,
|
544 |
+
# )
|
545 |
+
global_accuracies, new_tools = compute_global_weekly_accuracy(
|
546 |
+
clean_tools_df=clean_tools_df
|
547 |
)
|
|
|
548 |
# transform the dictionary global_accuracies into a DataFrame
|
549 |
wins = pd.DataFrame(
|
550 |
[
|
|
|
595 |
acc_info["max"] = acc_info["max"].dt.strftime("%Y-%m-%d %H:%M:%S")
|
596 |
all_accuracies = []
|
597 |
final_acc_df = pd.DataFrame(columns=tools_acc.columns)
|
598 |
+
# accuracy has been computed over the same population size
|
599 |
+
new_volume = SAMPLING_POPULATION_SIZE
|
600 |
for tool in tools_to_update:
|
601 |
+
if tool in new_tools or tool not in existing_tools:
|
|
|
|
|
602 |
new_tools.append(tool)
|
603 |
continue
|
604 |
new_accuracy = round(
|
605 |
acc_info[acc_info["tool"] == tool]["tool_accuracy"].values[0], 2
|
606 |
)
|
607 |
all_accuracies.append(new_accuracy)
|
608 |
+
|
|
|
609 |
if no_timeline_info:
|
610 |
new_min_timeline = None
|
611 |
new_max_timeline = None
|
|
|
670 |
return final_acc_df
|
671 |
|
672 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
673 |
def compute_tools_accuracy():
|
674 |
+
"""To compute/update the latest accuracy information. Relevant dates
|
675 |
+
-- 3rd of June 2025 when the gpt 4.1 update happened
|
676 |
+
-- 30th of July 2025 when the Claude 4 update happened"""
|
677 |
print("Computing accuracy of tools")
|
678 |
print("Reading tools parquet file")
|
679 |
+
tools_df = pd.read_parquet(TMP_DIR / "tools.parquet")
|
680 |
+
|
|
|
681 |
# Check if the file exists
|
682 |
+
acc_data = None
|
683 |
if os.path.exists(ROOT_DIR / ACCURACY_FILENAME):
|
684 |
acc_data = pd.read_csv(ROOT_DIR / ACCURACY_FILENAME)
|
685 |
|
686 |
+
tools_df["request_time"] = pd.to_datetime(tools_df["request_time"])
|
687 |
+
tools_df["request_date"] = tools_df["request_time"].dt.date
|
688 |
+
tools_df["request_date"] = pd.to_datetime(tools_df["request_date"])
|
689 |
+
tools_df["request_date"] = tools_df["request_date"].dt.strftime("%Y-%m-%d")
|
690 |
+
new_acc_data = update_tools_accuracy_same_model(acc_data, tools_df, INC_TOOLS)
|
691 |
|
692 |
+
print("Saving into a csv file")
|
|
|
693 |
new_acc_data.to_csv(ROOT_DIR / ACCURACY_FILENAME, index=False)
|
694 |
# save the data into IPFS
|
695 |
+
# push_csv_file_to_ipfs()
|
696 |
|
697 |
|
698 |
def push_csv_file_to_ipfs(filename: str = ACCURACY_FILENAME) -> str:
|
service_map.pkl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:789d1027897cd87ab4ffd46c3cc995e7a96771ed77575264a6b663e3046893f1
|
3 |
+
size 172245
|
tools_accuracy.csv
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
tool,tool_accuracy,total_requests,min,max
|
2 |
-
prediction-offline,61.
|
3 |
-
prediction-
|
4 |
-
prediction-online,
|
5 |
-
prediction-
|
6 |
-
|
7 |
-
claude-prediction-
|
8 |
-
|
9 |
-
|
10 |
-
prediction-
|
11 |
-
prediction-
|
12 |
-
prediction-
|
13 |
-
prediction-
|
|
|
1 |
tool,tool_accuracy,total_requests,min,max
|
2 |
+
prediction-offline,61.97,500,2025-06-03 00:00:05,2025-08-03 23:44:55
|
3 |
+
prediction-online-sme,52.33,500,2025-06-03 00:04:30,2025-08-03 22:49:45
|
4 |
+
prediction-online,59.38,500,2025-06-03 00:00:05,2025-08-03 22:31:35
|
5 |
+
prediction-request-reasoning,58.02,500,2025-06-03 00:00:30,2025-08-03 23:44:40
|
6 |
+
claude-prediction-offline,57.92,500,2025-06-06 00:13:05,2025-08-03 21:46:10
|
7 |
+
claude-prediction-online,57.92,500,2025-06-11 07:23:05,2025-08-03 23:02:15
|
8 |
+
superforcaster,57.92,500,2025-06-03 01:15:10,2025-08-03 22:50:05
|
9 |
+
prediction-request-reasoning-claude,57.92,500,2025-06-16 11:02:15,2025-08-03 22:59:10
|
10 |
+
prediction-request-rag-claude,57.92,500,2025-06-03 17:51:10,2025-08-03 22:52:10
|
11 |
+
prediction-offline-sme,57.92,500,2025-06-03 11:55:10,2025-08-02 07:55:50
|
12 |
+
prediction-url-cot-claude,57.92,500,2025-06-12 20:36:25,2025-07-27 08:15:15
|
13 |
+
prediction-request-rag,57.92,500,2025-06-03 18:59:40,2025-08-03 21:23:40
|
tools_accuracy_version3_0.csv
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
tool,tool_accuracy,total_requests,min,max
|
2 |
+
prediction-offline,61.52,300,2025-06-03 00:00:05,2025-07-30 23:44:15
|
3 |
+
prediction-request-reasoning,55.59,300,2025-06-03 00:00:30,2025-07-30 23:43:05
|
4 |
+
prediction-online,60.18,300,2025-06-03 00:00:05,2025-07-30 23:23:30
|
5 |
+
prediction-online-sme,54.13,300,2025-06-03 00:04:30,2025-07-30 23:43:35
|
6 |
+
superforcaster,63.67,300,2025-06-03 01:15:10,2025-07-30 23:41:00
|
7 |
+
claude-prediction-offline,59.72,300,2025-06-06 00:13:05,2025-07-30 23:24:35
|
8 |
+
prediction-request-rag,49.97,300,2025-06-03 18:59:40,2025-07-30 23:41:15
|
9 |
+
claude-prediction-online,51.46,300,2025-06-11 07:23:05,2025-07-30 23:41:30
|
10 |
+
prediction-offline-sme,54.5,300,2025-06-03 11:55:10,2025-07-29 11:56:35
|
11 |
+
prediction-request-rag-claude,56.75,300,2025-06-03 17:51:10,2025-07-30 22:26:00
|
12 |
+
prediction-request-reasoning-claude,56.75,300,2025-06-16 11:02:15,2025-07-30 20:25:20
|
13 |
+
prediction-url-cot-claude,56.75,300,2025-06-12 20:36:25,2025-07-27 08:15:15
|
traders_weekly_metrics.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3e433c7ac52c2770887833b8dadb9f013e2b49d15430cddbc32e11cd1706466f
|
3 |
+
size 190345
|
two_weeks_avg_roi_pearl_agents.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 3045
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fc1ff47e9188cb0de7f9faacdb14c00fb9834c85f4c0b8adb9dfaaa58725f851
|
3 |
size 3045
|
unknown_traders.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:071fa24aa479b61a4941a9f8a209e8b7a970d8b2e5babf9600699a9db07d6b98
|
3 |
+
size 1482486
|
weekly_avg_roi_pearl_agents.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dae3501daee304809183d2b6a8814b447e6efebfe2e6f803998c9d1752d6c65c
|
3 |
+
size 2413
|
weekly_mech_calls.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:26de376fe9c84db0a9fd152e098f6c5d87d00cc11b2e13788c208ad3e336dce1
|
3 |
+
size 52263
|
winning_df.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e70921e5f249d72bf8267eb8313e9d736b188a2830f8d8fc7fc988ec8df76e80
|
3 |
+
size 12272
|