woletee commited on
Commit
fa5a9b6
·
1 Parent(s): 6fb64b4

this is the finla version I think

Browse files
Files changed (1) hide show
  1. app.py +2 -19
app.py CHANGED
@@ -38,7 +38,6 @@ def upload():
38
  for sample in data.get("train", []):
39
  input_grid = sample["input"]
40
  output_grid = sample["output"]
41
-
42
  concept_label, _ = run_inference(model, input_grid, output_grid)
43
  predicted_HLCs.append(concept_label)
44
  input_output_pairs.append((tolist_safe(input_grid), tolist_safe(output_grid)))
@@ -55,17 +54,6 @@ def upload():
55
  predicted_HLCs=predicted_HLCs
56
  )
57
 
58
- # Last grid for optional evaluation
59
- last_input = input_output_pairs[-1][0] if input_output_pairs else []
60
- last_ground_truth = input_output_pairs[-1][1] if input_output_pairs else []
61
-
62
- try:
63
- predicted_output = tolist_safe(best_program.evaluate(last_input))
64
- except Exception as e:
65
- print("Error during best_program evaluation:", e)
66
- predicted_output = [["ERROR"]]
67
-
68
- # Evaluate program on test pairs
69
  test_pairs = []
70
  predicted_test_outputs = []
71
 
@@ -73,13 +61,11 @@ def upload():
73
  test_input = tolist_safe(sample["input"])
74
  test_output = tolist_safe(sample["output"])
75
  test_pairs.append((test_input, test_output))
76
-
77
  try:
78
  predicted = tolist_safe(best_program.evaluate(test_input))
79
  except Exception as e:
80
  print("Error evaluating test input:", e)
81
  predicted = [["ERROR"]]
82
-
83
  predicted_test_outputs.append(predicted)
84
 
85
  return render_template("results.html",
@@ -87,10 +73,7 @@ def upload():
87
  input_output_pairs=input_output_pairs,
88
  test_pairs=test_pairs,
89
  predicted_test_outputs=predicted_test_outputs,
90
- best_program=str(best_program),
91
- last_input=last_input,
92
- last_ground_truth=last_ground_truth,
93
- predicted_output=predicted_output)
94
 
95
  if __name__ == '__main__':
96
- app.run(host="0.0.0.0", port=7860)
 
38
  for sample in data.get("train", []):
39
  input_grid = sample["input"]
40
  output_grid = sample["output"]
 
41
  concept_label, _ = run_inference(model, input_grid, output_grid)
42
  predicted_HLCs.append(concept_label)
43
  input_output_pairs.append((tolist_safe(input_grid), tolist_safe(output_grid)))
 
54
  predicted_HLCs=predicted_HLCs
55
  )
56
 
 
 
 
 
 
 
 
 
 
 
 
57
  test_pairs = []
58
  predicted_test_outputs = []
59
 
 
61
  test_input = tolist_safe(sample["input"])
62
  test_output = tolist_safe(sample["output"])
63
  test_pairs.append((test_input, test_output))
 
64
  try:
65
  predicted = tolist_safe(best_program.evaluate(test_input))
66
  except Exception as e:
67
  print("Error evaluating test input:", e)
68
  predicted = [["ERROR"]]
 
69
  predicted_test_outputs.append(predicted)
70
 
71
  return render_template("results.html",
 
73
  input_output_pairs=input_output_pairs,
74
  test_pairs=test_pairs,
75
  predicted_test_outputs=predicted_test_outputs,
76
+ best_program=str(best_program))
 
 
 
77
 
78
  if __name__ == '__main__':
79
+ app.run(debug=True)