import os.path from transformers import BertTokenizer, BertForSequenceClassification,TextClassificationPipeline, AutoModelForSequenceClassification # Load tokenizer and model from the fine-tuned directory model_path = './intent_classification/TinyBERT_106_V2' # can try other checkpoints tokenizer = BertTokenizer.from_pretrained('KhairulAmirinUM/Advisor_AI_BERT') # model = BertForSequenceClassification.from_pretrained(model_path) model = AutoModelForSequenceClassification.from_pretrained(model_path, local_files_only=True) print(os.path.exists(model_path)) print("TInyBERT model is ready to use") # for classification pipeline text_pipeline = TextClassificationPipeline(model=model, tokenizer=tokenizer) # function to generate response def generate_response(user_query): response = text_pipeline(user_query) # example of response: [{'label': 'LABEL_4', 'score': 0.9997817873954773}] label_name = response[0].get('label') score = response[0].get('score') # label for each math topic based on label_name topic_label='NA' match label_name: case "LABEL_0": topic_label='RAG' case "LABEL_1": topic_label = 'Neo4j' return topic_label, score print(generate_response("Procedure to withdraw"))