Spaces:
Sleeping
Sleeping
Update file_loader.py
Browse files- file_loader.py +7 -5
file_loader.py
CHANGED
|
@@ -19,11 +19,18 @@ def get_vectorstore():
|
|
| 19 |
docx_files = list_docx_files(folder_path)
|
| 20 |
|
| 21 |
all_splits = [] # Khởi tạo danh sách lưu kết quả
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
print('Feeding .docx files')
|
| 23 |
for i, file_path in enumerate(tqdm(docx_files, desc="Đang xử lý", unit="file")):
|
| 24 |
output_json_path = f"output_{i}.json"
|
| 25 |
splits = get_splits(file_path, output_json_path)
|
| 26 |
all_splits += splits
|
|
|
|
| 27 |
print('Feeding .json files')
|
| 28 |
# Xử lý FAQ
|
| 29 |
FAQ_path = "syllabus_nct_word_format/FAQ.json"
|
|
@@ -33,11 +40,6 @@ def get_vectorstore():
|
|
| 33 |
FAQ_path = "syllabus_nct_word_format/FAQ2.json"
|
| 34 |
FAQ_splits = get_json_splits_only(FAQ_path)
|
| 35 |
all_splits += FAQ_splits
|
| 36 |
-
print("Feeding relevent websites' contents")
|
| 37 |
-
base_urls = ['https://fda.neu.edu.vn/hoi-nghi-khoa-hoc-cong-nghe-dai-hoc-kinh-te-quoc-dan-nam-2025/']
|
| 38 |
-
#['https://nct.neu.edu.vn/', 'https://fsf.neu.edu.vn/', 'https://mfe.neu.edu.vn/', 'https://mis.neu.edu.vn/', 'https://fda.neu.edu.vn/', 'https://khoathongke.neu.edu.vn/', 'https://fit.neu.edu.vn/']
|
| 39 |
-
website_contents = scrape_website(base_urls='base_urls')
|
| 40 |
-
all_splits += website_contents
|
| 41 |
|
| 42 |
# Lưu vào vectorstore với nhúng từ Google GenAI
|
| 43 |
# embedding = GoogleGenerativeAIEmbeddings(model="models/text-embedding-004")
|
|
|
|
| 19 |
docx_files = list_docx_files(folder_path)
|
| 20 |
|
| 21 |
all_splits = [] # Khởi tạo danh sách lưu kết quả
|
| 22 |
+
print("Feeding relevent websites' contents")
|
| 23 |
+
base_urls = ['https://fda.neu.edu.vn/hoi-nghi-khoa-hoc-cong-nghe-dai-hoc-kinh-te-quoc-dan-nam-2025/']
|
| 24 |
+
#['https://nct.neu.edu.vn/', 'https://fsf.neu.edu.vn/', 'https://mfe.neu.edu.vn/', 'https://mis.neu.edu.vn/', 'https://fda.neu.edu.vn/', 'https://khoathongke.neu.edu.vn/', 'https://fit.neu.edu.vn/']
|
| 25 |
+
website_contents = scrape_website(base_urls='base_urls')
|
| 26 |
+
all_splits += website_contents
|
| 27 |
+
|
| 28 |
print('Feeding .docx files')
|
| 29 |
for i, file_path in enumerate(tqdm(docx_files, desc="Đang xử lý", unit="file")):
|
| 30 |
output_json_path = f"output_{i}.json"
|
| 31 |
splits = get_splits(file_path, output_json_path)
|
| 32 |
all_splits += splits
|
| 33 |
+
|
| 34 |
print('Feeding .json files')
|
| 35 |
# Xử lý FAQ
|
| 36 |
FAQ_path = "syllabus_nct_word_format/FAQ.json"
|
|
|
|
| 40 |
FAQ_path = "syllabus_nct_word_format/FAQ2.json"
|
| 41 |
FAQ_splits = get_json_splits_only(FAQ_path)
|
| 42 |
all_splits += FAQ_splits
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
|
| 44 |
# Lưu vào vectorstore với nhúng từ Google GenAI
|
| 45 |
# embedding = GoogleGenerativeAIEmbeddings(model="models/text-embedding-004")
|