seanpedrickcase commited on
Commit
6da6ac6
·
1 Parent(s): aba68bf

Removed unnecessary print statements

Browse files
Files changed (3) hide show
  1. app.py +2 -1
  2. tools/config.py +0 -5
  3. tools/llm_api_call.py +1 -5
app.py CHANGED
@@ -22,7 +22,8 @@ def ensure_folder_exists(output_folder:str):
22
  os.makedirs(output_folder, exist_ok=True)
23
  print(f"Created the {output_folder} folder.")
24
  else:
25
- print(f"The {output_folder} folder already exists.")
 
26
 
27
  ensure_folder_exists(CONFIG_FOLDER)
28
  ensure_folder_exists(OUTPUT_FOLDER)
 
22
  os.makedirs(output_folder, exist_ok=True)
23
  print(f"Created the {output_folder} folder.")
24
  else:
25
+ pass
26
+ #print(f"The {output_folder} folder already exists.")
27
 
28
  ensure_folder_exists(CONFIG_FOLDER)
29
  ensure_folder_exists(OUTPUT_FOLDER)
tools/config.py CHANGED
@@ -284,11 +284,6 @@ elif CHOSEN_LOCAL_MODEL_TYPE == "gpt-oss-20b":
284
  LOCAL_MODEL_FILE = GPT_OSS_MODEL_FILE
285
  LOCAL_MODEL_FOLDER = GPT_OSS_MODEL_FOLDER
286
 
287
- print("CHOSEN_LOCAL_MODEL_TYPE:", CHOSEN_LOCAL_MODEL_TYPE)
288
- print("LOCAL_REPO_ID:", LOCAL_REPO_ID)
289
- print("LOCAL_MODEL_FILE:", LOCAL_MODEL_FILE)
290
- print("LOCAL_MODEL_FOLDER:", LOCAL_MODEL_FOLDER)
291
-
292
  LLM_MAX_GPU_LAYERS = int(get_or_create_env_var('LLM_MAX_GPU_LAYERS','-1')) # Maximum possible
293
  LLM_TEMPERATURE = float(get_or_create_env_var('LLM_TEMPERATURE', '0.1'))
294
  LLM_TOP_K = int(get_or_create_env_var('LLM_TOP_K','96')) # https://docs.unsloth.ai/basics/gemma-3-how-to-run-and-fine-tune
 
284
  LOCAL_MODEL_FILE = GPT_OSS_MODEL_FILE
285
  LOCAL_MODEL_FOLDER = GPT_OSS_MODEL_FOLDER
286
 
 
 
 
 
 
287
  LLM_MAX_GPU_LAYERS = int(get_or_create_env_var('LLM_MAX_GPU_LAYERS','-1')) # Maximum possible
288
  LLM_TEMPERATURE = float(get_or_create_env_var('LLM_TEMPERATURE', '0.1'))
289
  LLM_TOP_K = int(get_or_create_env_var('LLM_TOP_K','96')) # https://docs.unsloth.ai/basics/gemma-3-how-to-run-and-fine-tune
tools/llm_api_call.py CHANGED
@@ -361,17 +361,13 @@ def write_llm_output_and_logs(response_text: str,
361
  whole_conversation_metadata_str = '\n'.join(whole_conversation_metadata)
362
  start_row_reported = start_row + 1
363
 
364
- print("model_choice_clean in write_llm_output_and_logs:", model_choice_clean)
365
-
366
  batch_file_path_details = create_batch_file_path_details(file_name)
367
 
368
  # Need to reduce output file names as full length files may be too long
369
  model_choice_clean_short = clean_column_name(model_choice_clean, max_length=20, front_characters=False)
370
  # in_column_cleaned = clean_column_name(in_column, max_length=20)
371
- # file_name_clean = clean_column_name(file_name, max_length=20, front_characters=True)
372
- #
373
 
374
- print("model_choice_clean_short in write_llm_output_and_logs:", model_choice_clean_short)
375
 
376
  # # Save outputs for each batch. If master file created, label file as master
377
  # batch_file_path_details = f"{file_name_clean}_batch_{latest_batch_completed + 1}_size_{batch_size_number}_col_{in_column_cleaned}"
 
361
  whole_conversation_metadata_str = '\n'.join(whole_conversation_metadata)
362
  start_row_reported = start_row + 1
363
 
 
 
364
  batch_file_path_details = create_batch_file_path_details(file_name)
365
 
366
  # Need to reduce output file names as full length files may be too long
367
  model_choice_clean_short = clean_column_name(model_choice_clean, max_length=20, front_characters=False)
368
  # in_column_cleaned = clean_column_name(in_column, max_length=20)
369
+ # file_name_clean = clean_column_name(file_name, max_length=20, front_characters=True)
 
370
 
 
371
 
372
  # # Save outputs for each batch. If master file created, label file as master
373
  # batch_file_path_details = f"{file_name_clean}_batch_{latest_batch_completed + 1}_size_{batch_size_number}_col_{in_column_cleaned}"