| import os | |
| import json | |
| import gzip | |
| import csv | |
| from multiprocessing import Pool, cpu_count | |
| import time | |
| def process_json_file(file_path): | |
| with gzip.open(file_path, 'rt', encoding='utf-8') as gz_file: | |
| data = json.load(gz_file) | |
| return data.get('meta', {}) | |
| def get_file_size_mb(file_path): | |
| return round(os.path.getsize(file_path) / (1024 * 1024), 2) | |
| def write_to_csv_and_md(output_csv, output_md, headers, data): | |
| with open(output_csv, 'w', newline='', encoding='utf-8') as csv_file: | |
| writer = csv.DictWriter(csv_file, fieldnames=headers) | |
| writer.writeheader() | |
| writer.writerows(data) | |
| with open(output_md, 'w', encoding='utf-8') as md_file: | |
| md_file.write("| " + " | ".join(headers) + " |\n") | |
| md_file.write("|" + "|".join([" --- " for _ in headers]) + "|\n") | |
| for row in data: | |
| md_file.write("| " + " | ".join([str(row[header]) for header in headers]) + " |\n") | |
| def process_file(file_name, input_directory, base_url): | |
| file_path = os.path.join(input_directory, file_name) | |
| meta_data = process_json_file(file_path) | |
| file_size_mb = get_file_size_mb(file_path) | |
| row_data = { | |
| "filesize": file_size_mb, | |
| "filename": file_name, | |
| "URL": f"{base_url}{file_name.replace('.json.gz', '')}", | |
| **meta_data | |
| } | |
| return row_data | |
| def main(input_directory, output_csv, output_md, base_url="https://do-me.github.io/SemanticFinder/?hf="): | |
| headers = [ | |
| "filesize", "textTitle", "textAuthor", "textYear", "textLanguage", "URL", | |
| "modelName", "quantized", "splitParam", "splitType", "characters", "chunks", | |
| "wordsToAvoidAll", "wordsToCheckAll", "wordsToAvoidAny", "wordsToCheckAny", | |
| "exportDecimals", "lines", "textNotes", "textSourceURL", "filename" | |
| ] | |
| all_data = [] | |
| start_time = time.time() | |
| file_list = [file_name for file_name in os.listdir(input_directory) if file_name.endswith('.json.gz')] | |
| with Pool(cpu_count()) as pool: | |
| all_data = pool.starmap(process_file, [(file_name, input_directory, base_url) for file_name in file_list]) | |
| write_to_csv_and_md(output_csv, output_md, headers, all_data) | |
| end_time = time.time() | |
| processing_time = end_time - start_time | |
| print(f"Processing time: {round(processing_time, 2)} seconds") | |
| if __name__ == "__main__": | |
| input_directory = "." | |
| output_csv = "meta_data.csv" | |
| output_md = "meta_data.md" | |
| main(input_directory, output_csv, output_md) | |