{ "cells": [ { "cell_type": "code", "execution_count": 28, "id": "a7a23542", "metadata": {}, "outputs": [], "source": [ "import pandas as pd \n", "dataset = pd.read_csv(\"data/hackerank/combined_data.csv\")\n", "import ast\n", "\n", "def clean_tags(tag_string):\n", " # Convert the string to a list\n", " tag_list = ast.literal_eval(tag_string)\n", " # Join the list into a comma-separated string\n", " return ', '.join(tag_list)\n", "\n", "dataset['Tags']=dataset['Tags'].apply(clean_tags)" ] }, { "cell_type": "code", "execution_count": 29, "id": "da4491c7", "metadata": {}, "outputs": [], "source": [ "df = dataset" ] }, { "cell_type": "code", "execution_count": 30, "id": "362c543a", "metadata": {}, "outputs": [], "source": [ "from sklearn.feature_extraction.text import TfidfVectorizer\n", "from sklearn.preprocessing import LabelEncoder\n", "\n", "def vectorirse_text(text):\n", " \"\"\" Recieves text as input and returns TF-IDF vectors\"\"\"\n", " tfidf = TfidfVectorizer(max_features=500000)\n", " X = tfidf.fit_transform(text)\n", " return X\n", "\n", "def label_encoding(input):\n", " label_encoder = LabelEncoder()\n", " return label_encoder.fit_transform(input)\n", "\n", "\n", "X = vectorirse_text(df['Input'])\n", "y = label_encoding(df['Tags'])" ] }, { "cell_type": "code", "execution_count": 31, "id": "b86c2a03", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✅ Accuracy on Test Set: 0.9124\n", "\n", "✅ Classification Report on Test Set:\n", "\n", " precision recall f1-score support\n", "\n", " 0 0.95 0.90 0.92 792\n", " 1 0.86 0.85 0.85 709\n", " 2 0.92 0.93 0.92 729\n", " 3 0.94 0.94 0.94 830\n", " 4 0.92 0.86 0.89 486\n", " 5 0.94 0.94 0.94 680\n", " 6 0.94 0.99 0.96 803\n", " 7 0.85 0.88 0.86 634\n", " 8 0.83 0.85 0.84 704\n", " 9 0.97 0.95 0.96 684\n", "\n", " accuracy 0.91 7051\n", " macro avg 0.91 0.91 0.91 7051\n", "weighted avg 0.91 0.91 0.91 7051\n", "\n", "✅ Confusion Matrix on Test Set:\n", "\n", "[[711 22 8 4 0 7 4 22 12 2]\n", " [ 12 604 8 3 2 10 5 12 51 2]\n", " [ 7 2 679 5 7 7 8 8 6 0]\n", " [ 1 6 4 783 16 5 2 5 6 2]\n", " [ 1 6 10 21 418 1 4 4 17 4]\n", " [ 5 10 8 1 0 639 11 3 3 0]\n", " [ 0 0 6 0 0 3 793 1 0 0]\n", " [ 6 26 7 4 1 0 11 557 19 3]\n", " [ 7 27 10 11 4 4 4 36 597 4]\n", " [ 0 1 1 4 4 3 1 11 7 652]]\n", "✅ Saved probabilities to Phase1/TFIDF.csv successfully!\n" ] }, { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
y_trueclass_0class_1class_2class_3class_4class_5class_6class_7class_8class_9
020.0048560.0098640.8644220.0089400.0091830.0166390.0392770.0265710.0108240.009424
120.0052970.0065580.9134010.0062620.0115970.0189830.0189090.0067440.0093020.002946
220.0300780.0353630.6811360.0389870.0254770.0609350.0247960.0331980.0456270.024402
320.0144400.0154720.7909730.0239520.0235290.0203450.0376770.0270910.0305670.015954
420.0126460.0197500.6123340.1162180.0365320.0279830.0459180.0606000.0361020.031918
\n", "
" ], "text/plain": [ " y_true class_0 class_1 class_2 class_3 class_4 class_5 \\\n", "0 2 0.004856 0.009864 0.864422 0.008940 0.009183 0.016639 \n", "1 2 0.005297 0.006558 0.913401 0.006262 0.011597 0.018983 \n", "2 2 0.030078 0.035363 0.681136 0.038987 0.025477 0.060935 \n", "3 2 0.014440 0.015472 0.790973 0.023952 0.023529 0.020345 \n", "4 2 0.012646 0.019750 0.612334 0.116218 0.036532 0.027983 \n", "\n", " class_6 class_7 class_8 class_9 \n", "0 0.039277 0.026571 0.010824 0.009424 \n", "1 0.018909 0.006744 0.009302 0.002946 \n", "2 0.024796 0.033198 0.045627 0.024402 \n", "3 0.037677 0.027091 0.030567 0.015954 \n", "4 0.045918 0.060600 0.036102 0.031918 " ] }, "execution_count": 31, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import pandas as pd\n", "from sklearn.model_selection import train_test_split\n", "from sklearn.linear_model import LogisticRegression\n", "from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n", "\n", "# Assuming df is already loaded\n", "# And vectorise_text and label_encoding are already available\n", "\n", "# Step 1: Preprocess\n", "# X = vectorise_text(df['text'])\n", "# y = label_encoding(df['label'])\n", "\n", "# Step 2: Train-test split\n", "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n", "\n", "# Step 3: Train Logistic Regression\n", "model = LogisticRegression(max_iter=1000)\n", "model.fit(X_train, y_train)\n", "\n", "# Step 4: Predict on the test set\n", "y_preds = model.predict(X_test)\n", "\n", "# Step 5: Evaluate\n", "acc = accuracy_score(y_test, y_preds)\n", "print(f\"✅ Accuracy on Test Set: {acc:.4f}\\n\")\n", "\n", "print(\"✅ Classification Report on Test Set:\\n\")\n", "print(classification_report(y_test, y_preds))\n", "\n", "print(\"✅ Confusion Matrix on Test Set:\\n\")\n", "print(confusion_matrix(y_test, y_preds))\n", "\n", "# ----------------------------\n", "# Step 6: Predict probabilities for FULL fX\n", "# ----------------------------\n", "\n", "# Assuming fX and fy are available (full dataset for probability generation)\n", "# fX = vectorise_text(df_full['text'])\n", "# fy = label_encoding(df_full['label'])\n", "\n", "y_proba = model.predict_proba(X)\n", "\n", "# Step 7: Save probabilities\n", "proba_df = pd.DataFrame(y_proba, columns=[f'class_{i}' for i in range(y_proba.shape[1])])\n", "proba_df.insert(0, 'y_true', y)\n", "\n", "proba_df.to_csv('Phase1/TFIDF.csv', index=False)\n", "\n", "print(\"✅ Saved probabilities to Phase1/TFIDF.csv successfully!\")\n", "proba_df.head()\n" ] }, { "cell_type": "code", "execution_count": null, "id": "4bdabea9", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "35251" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [] }, { "cell_type": "code", "execution_count": 32, "id": "7114ae44", "metadata": {}, "outputs": [], "source": [ "import gensim\n", "import numpy as np\n", "from sklearn.ensemble import RandomForestClassifier\n", "\n", "df['tokens'] = df['Input'].apply(lambda x: x.split())\n", "\n", "w2v_model = gensim.models.Word2Vec(sentences=df['tokens'], vector_size=10000, window=5, min_count=2, workers=10)\n", "\n", "def get_sentence_embedding(tokens):\n", " vectors = [w2v_model.wv[word] for word in tokens if word in w2v_model.wv]\n", " return np.mean(vectors, axis=0) if vectors else np.zeros(100)\n", "\n", "X_w2v = np.array(df['Input'].apply(get_sentence_embedding).tolist())\n", "y_encoded = label_encoding(df['Tags'])\n" ] }, { "cell_type": "code", "execution_count": 33, "id": "de12ab7c", "metadata": {}, "outputs": [], "source": [ "X = X_w2v\n", "y = y_encoded" ] }, { "cell_type": "code", "execution_count": 34, "id": "c5777437", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/home/darth/.pyenv/versions/major02/lib/python3.10/site-packages/xgboost/training.py:183: UserWarning: [20:55:48] WARNING: /workspace/src/learner.cc:738: \n", "Parameters: { \"use_label_encoder\" } are not used.\n", "\n", " bst.update(dtrain, iteration=i, fobj=obj)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "✅ Accuracy on Test Set: 0.3624\n", "\n", "✅ Classification Report on Test Set:\n", "\n", " precision recall f1-score support\n", "\n", " 0 0.36 0.37 0.36 792\n", " 1 0.23 0.21 0.22 709\n", " 2 0.35 0.31 0.33 729\n", " 3 0.36 0.42 0.39 830\n", " 4 0.46 0.36 0.40 486\n", " 5 0.37 0.33 0.35 680\n", " 6 0.46 0.62 0.53 803\n", " 7 0.29 0.27 0.28 634\n", " 8 0.31 0.24 0.27 704\n", " 9 0.40 0.44 0.42 684\n", "\n", " accuracy 0.36 7051\n", " macro avg 0.36 0.36 0.35 7051\n", "weighted avg 0.36 0.36 0.36 7051\n", "\n", "✅ Confusion Matrix on Test Set:\n", "\n", "[[293 69 53 87 15 55 61 51 49 59]\n", " [119 150 63 103 22 43 52 58 54 45]\n", " [ 74 80 225 73 20 55 64 53 50 35]\n", " [ 62 65 54 349 37 40 70 41 47 65]\n", " [ 16 30 28 61 173 21 36 30 31 60]\n", " [ 58 46 44 52 17 224 123 47 36 33]\n", " [ 26 38 37 53 13 52 495 36 22 31]\n", " [ 52 60 44 77 18 45 80 173 32 53]\n", " [ 79 62 65 69 36 47 42 52 170 82]\n", " [ 39 48 31 56 29 24 46 46 62 303]]\n", "\n", "✅ Also saved class probabilities to Phase1/Word2Vec_Proba.csv!\n" ] }, { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
y_trueclass_0class_1class_2class_3class_4class_5class_6class_7class_8class_9
020.0018490.0256840.8770310.0034760.0353810.0014870.0059560.0284490.0090450.011643
120.0154270.0649570.7666130.0132900.0037480.0150200.0710540.0312610.0151510.003479
220.0029200.0288540.8957880.0038700.0012940.0271670.0165740.0029370.0157330.004863
320.0421820.0808830.6109610.0179620.0139550.0628330.0944890.0485410.0087790.019415
420.0473610.0354540.2549780.5781280.0002580.0026490.0035030.0447630.0302600.002646
\n", "
" ], "text/plain": [ " y_true class_0 class_1 class_2 class_3 class_4 class_5 \\\n", "0 2 0.001849 0.025684 0.877031 0.003476 0.035381 0.001487 \n", "1 2 0.015427 0.064957 0.766613 0.013290 0.003748 0.015020 \n", "2 2 0.002920 0.028854 0.895788 0.003870 0.001294 0.027167 \n", "3 2 0.042182 0.080883 0.610961 0.017962 0.013955 0.062833 \n", "4 2 0.047361 0.035454 0.254978 0.578128 0.000258 0.002649 \n", "\n", " class_6 class_7 class_8 class_9 \n", "0 0.005956 0.028449 0.009045 0.011643 \n", "1 0.071054 0.031261 0.015151 0.003479 \n", "2 0.016574 0.002937 0.015733 0.004863 \n", "3 0.094489 0.048541 0.008779 0.019415 \n", "4 0.003503 0.044763 0.030260 0.002646 " ] }, "execution_count": 34, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import xgboost as xgb\n", "from sklearn.model_selection import train_test_split\n", "from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n", "import pandas as pd\n", "import os\n", "\n", "# Step 1: Ensure Phase1 folder exists\n", "os.makedirs('Phase1', exist_ok=True)\n", "\n", "# Step 2: Train-test split\n", "X_train, X_test, y_train, y_test = train_test_split(X_w2v, y_encoded, test_size=0.2, random_state=42)\n", "\n", "# Step 3: Train XGBoost model\n", "model = xgb.XGBClassifier(use_label_encoder=False, eval_metric='mlogloss')\n", "model.fit(X_train, y_train)\n", "\n", "# Step 4: Predict on the test set\n", "y_preds = model.predict(X_test)\n", "\n", "# Step 5: Evaluate\n", "acc = accuracy_score(y_test, y_preds)\n", "print(f\"✅ Accuracy on Test Set: {acc:.4f}\\n\")\n", "\n", "print(\"✅ Classification Report on Test Set:\\n\")\n", "print(classification_report(y_test, y_preds))\n", "\n", "print(\"✅ Confusion Matrix on Test Set:\\n\")\n", "print(confusion_matrix(y_test, y_preds))\n", "\n", "# ---------------------------------\n", "# Step 6: Predict probabilities on full data\n", "# ---------------------------------\n", "y_proba = model.predict_proba(X_w2v)\n", "\n", "proba_df = pd.DataFrame(y_proba, columns=[f'class_{i}' for i in range(y_proba.shape[1])])\n", "proba_df.insert(0, 'y_true', y_encoded)\n", "\n", "proba_df.to_csv('Phase1/Word2Vec_Proba.csv', index=False)\n", "\n", "print(\"\\n✅ Also saved class probabilities to Phase1/Word2Vec_Proba.csv!\")\n", "proba_df.head()\n" ] }, { "cell_type": "code", "execution_count": 13, "id": "22efac38", "metadata": {}, "outputs": [], "source": [ "proba_df.to_csv('Phase1/Word2Vec.csv', index=False)" ] }, { "cell_type": "code", "execution_count": 35, "id": "73fff7e2", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Generating GloVe Embeddings: 100%|██████████| 35251/35251 [00:01<00:00, 22919.42it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ "(35251, 300)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n" ] } ], "source": [ "import numpy as np\n", "import gensim.downloader as api\n", "\n", "# Load Pretrained GloVe Model\n", "glove_model = api.load(\"glove-wiki-gigaword-300\") \n", "\n", "# Convert a sentence to a vector by averaging word embeddings\n", "def get_sentence_embedding(sentence, model, dim=300):\n", " words = sentence.split()\n", " word_vectors = [model[word] for word in words if word in model]\n", " return np.mean(word_vectors, axis=0) if word_vectors else np.zeros(dim)\n", "\n", "text_samples = [\"GloVe embeddings capture word semantics\"]\n", "embeddings = np.array([get_sentence_embedding(text, glove_model) for text in text_samples])\n", "\n", "\n", "from tqdm import tqdm\n", "\n", "# Generate GloVe embeddings for the entire dataset with a progress bar\n", "df['glove_embeddings'] = df['Input'].apply(lambda x: get_sentence_embedding(x, glove_model))\n", "\n", "# Convert the list of embeddings to a numpy array\n", "X_glove = np.array([get_sentence_embedding(text, glove_model) for text in tqdm(df['Input'], desc=\"Generating GloVe Embeddings\")])\n", "\n", "# Check the shape of the generated embeddings\n", "print(X_glove.shape)" ] }, { "cell_type": "code", "execution_count": 36, "id": "05754d5c", "metadata": {}, "outputs": [], "source": [ "X = df['glove_embeddings']\n", "y = label_encoding(df['Tags']) \n" ] }, { "cell_type": "code", "execution_count": 37, "id": "3092399e", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✅ Accuracy on Test Set: 0.7103\n", "\n", "✅ Classification Report on Test Set:\n", "\n", " precision recall f1-score support\n", "\n", " 0 0.66 0.64 0.65 792\n", " 1 0.45 0.50 0.47 709\n", " 2 0.80 0.79 0.79 729\n", " 3 0.70 0.70 0.70 830\n", " 4 0.72 0.63 0.67 486\n", " 5 0.81 0.83 0.82 680\n", " 6 0.91 0.94 0.93 803\n", " 7 0.68 0.74 0.70 634\n", " 8 0.59 0.53 0.56 704\n", " 9 0.78 0.76 0.77 684\n", "\n", " accuracy 0.71 7051\n", " macro avg 0.71 0.71 0.71 7051\n", "weighted avg 0.71 0.71 0.71 7051\n", "\n", "✅ Confusion Matrix on Test Set:\n", "\n", "[[506 137 14 16 5 24 5 31 42 12]\n", " [123 354 16 20 7 31 12 37 88 21]\n", " [ 17 22 575 28 22 25 9 14 14 3]\n", " [ 19 35 15 581 45 16 5 27 47 40]\n", " [ 4 26 24 63 308 10 11 8 18 14]\n", " [ 19 29 17 11 5 565 13 7 2 12]\n", " [ 1 4 10 5 4 7 758 9 4 1]\n", " [ 24 37 19 19 7 5 11 466 24 22]\n", " [ 39 108 28 48 12 5 6 59 376 23]\n", " [ 9 30 5 43 13 8 3 30 24 519]]\n", "\n", "✅ Saved probabilities to Phase1/GloVe.csv!\n" ] }, { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
y_trueclass_0class_1class_2class_3class_4class_5class_6class_7class_8class_9
023.351565e-083.210468e-080.9997350.0000020.0000070.0000020.0000290.0000170.0002040.000004
125.428256e-045.070415e-050.9989220.0000060.0000390.0002330.0001010.0000280.0000660.000011
221.161756e-031.901476e-030.9927950.0003410.0004290.0007180.0000080.0000790.0008420.001723
325.475805e-021.380654e-010.1128600.0742550.0133190.0101170.1162540.1052380.2719000.103235
421.400376e-027.604539e-020.1151920.2510550.0872610.0829120.0305330.1850330.0742750.083690
\n", "
" ], "text/plain": [ " y_true class_0 class_1 class_2 class_3 class_4 class_5 \\\n", "0 2 3.351565e-08 3.210468e-08 0.999735 0.000002 0.000007 0.000002 \n", "1 2 5.428256e-04 5.070415e-05 0.998922 0.000006 0.000039 0.000233 \n", "2 2 1.161756e-03 1.901476e-03 0.992795 0.000341 0.000429 0.000718 \n", "3 2 5.475805e-02 1.380654e-01 0.112860 0.074255 0.013319 0.010117 \n", "4 2 1.400376e-02 7.604539e-02 0.115192 0.251055 0.087261 0.082912 \n", "\n", " class_6 class_7 class_8 class_9 \n", "0 0.000029 0.000017 0.000204 0.000004 \n", "1 0.000101 0.000028 0.000066 0.000011 \n", "2 0.000008 0.000079 0.000842 0.001723 \n", "3 0.116254 0.105238 0.271900 0.103235 \n", "4 0.030533 0.185033 0.074275 0.083690 " ] }, "execution_count": 37, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from sklearn.svm import SVC\n", "from sklearn.model_selection import train_test_split\n", "from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n", "import pandas as pd\n", "import os\n", "import numpy as np\n", "\n", "# Step 0: Ensure Phase1 folder exists\n", "os.makedirs('Phase1', exist_ok=True)\n", "\n", "# Step 1: Fix X if needed\n", "if isinstance(X.iloc[0], np.ndarray):\n", " X = np.vstack(X.values)\n", "\n", "# Step 2: Train-test split\n", "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n", "\n", "# Step 3: Train SVM model\n", "model = SVC(kernel='linear', probability=True)\n", "model.fit(X_train, y_train)\n", "\n", "# Step 4: Predict on the test set\n", "y_preds = model.predict(X_test)\n", "\n", "# Step 5: Evaluate\n", "acc = accuracy_score(y_test, y_preds)\n", "print(f\"✅ Accuracy on Test Set: {acc:.4f}\\n\")\n", "\n", "print(\"✅ Classification Report on Test Set:\\n\")\n", "print(classification_report(y_test, y_preds))\n", "\n", "print(\"✅ Confusion Matrix on Test Set:\\n\")\n", "print(confusion_matrix(y_test, y_preds))\n", "\n", "# ---------------------------------\n", "# Step 6: Predict probabilities on full data\n", "# ---------------------------------\n", "y_proba = model.predict_proba(X)\n", "\n", "# Step 7: Save probability scores\n", "proba_df = pd.DataFrame(y_proba, columns=[f'class_{i}' for i in range(y_proba.shape[1])])\n", "proba_df.insert(0, 'y_true', y)\n", "\n", "proba_df.to_csv('Phase1/GloVe.csv', index=False)\n", "\n", "print(\"\\n✅ Saved probabilities to Phase1/GloVe.csv!\")\n", "proba_df.head()\n" ] }, { "cell_type": "code", "execution_count": 25, "id": "046eeb56", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Collecting transformers\n", " Using cached transformers-4.51.3-py3-none-any.whl (10.4 MB)\n", "Requirement already satisfied: filelock in /home/darth/.pyenv/versions/3.10.12/envs/major02/lib/python3.10/site-packages (from transformers) (3.18.0)\n", "Collecting requests\n", " Using cached requests-2.32.3-py3-none-any.whl (64 kB)\n", "Collecting pyyaml>=5.1\n", " Using cached PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (751 kB)\n", "Collecting huggingface-hub<1.0,>=0.30.0\n", " Using cached huggingface_hub-0.30.2-py3-none-any.whl (481 kB)\n", "Collecting tokenizers<0.22,>=0.21\n", " Using cached tokenizers-0.21.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.0 MB)\n", "Collecting safetensors>=0.4.3\n", " Using cached safetensors-0.5.3-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (471 kB)\n", "Requirement already satisfied: numpy>=1.17 in /home/darth/.pyenv/versions/3.10.12/envs/major02/lib/python3.10/site-packages (from transformers) (1.26.4)\n", "Requirement already satisfied: tqdm>=4.27 in /home/darth/.pyenv/versions/3.10.12/envs/major02/lib/python3.10/site-packages (from transformers) (4.67.1)\n", "Requirement already satisfied: packaging>=20.0 in /home/darth/.pyenv/versions/3.10.12/envs/major02/lib/python3.10/site-packages (from transformers) (25.0)\n", "Requirement already satisfied: regex!=2019.12.17 in /home/darth/.pyenv/versions/3.10.12/envs/major02/lib/python3.10/site-packages (from transformers) (2024.11.6)\n", "Requirement already satisfied: fsspec>=2023.5.0 in /home/darth/.pyenv/versions/3.10.12/envs/major02/lib/python3.10/site-packages (from huggingface-hub<1.0,>=0.30.0->transformers) (2025.3.2)\n", "Requirement already satisfied: typing-extensions>=3.7.4.3 in /home/darth/.pyenv/versions/3.10.12/envs/major02/lib/python3.10/site-packages (from huggingface-hub<1.0,>=0.30.0->transformers) (4.13.2)\n", "Collecting idna<4,>=2.5\n", " Using cached idna-3.10-py3-none-any.whl (70 kB)\n", "Collecting certifi>=2017.4.17\n", " Using cached certifi-2025.4.26-py3-none-any.whl (159 kB)\n", "Collecting urllib3<3,>=1.21.1\n", " Using cached urllib3-2.4.0-py3-none-any.whl (128 kB)\n", "Collecting charset-normalizer<4,>=2\n", " Using cached charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (146 kB)\n", "Installing collected packages: urllib3, safetensors, pyyaml, idna, charset-normalizer, certifi, requests, huggingface-hub, tokenizers, transformers\n", "Successfully installed certifi-2025.4.26 charset-normalizer-3.4.1 huggingface-hub-0.30.2 idna-3.10 pyyaml-6.0.2 requests-2.32.3 safetensors-0.5.3 tokenizers-0.21.1 transformers-4.51.3 urllib3-2.4.0\n", "\n", "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.0.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m25.1\u001b[0m\n", "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n", "Note: you may need to restart the kernel to use updated packages.\n" ] } ], "source": [ "%pip install transformers" ] }, { "cell_type": "code", "execution_count": null, "id": "76540636", "metadata": {}, "outputs": [ { "ename": "NameError", "evalue": "name 'label_encoding' is not defined", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", "Cell \u001b[0;32mIn[5], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m X \u001b[38;5;241m=\u001b[39m embeddings\n\u001b[0;32m----> 2\u001b[0m y \u001b[38;5;241m=\u001b[39m \u001b[43mlabel_encoding\u001b[49m(df[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mTags\u001b[39m\u001b[38;5;124m'\u001b[39m])\n", "\u001b[0;31mNameError\u001b[0m: name 'label_encoding' is not defined" ] } ], "source": [] }, { "cell_type": "code", "execution_count": 38, "id": "335daa95", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Processing: 100%|██████████| 2204/2204 [02:07<00:00, 17.25it/s]\n" ] } ], "source": [ "import torch\n", "import pandas as pd\n", "import numpy as np\n", "from tqdm import tqdm\n", "from transformers import AlbertTokenizer, AlbertModel\n", "\n", "# Load ALBERT tokenizer and model\n", "tokenizer = AlbertTokenizer.from_pretrained(\"albert-base-v2\")\n", "model = AlbertModel.from_pretrained(\"albert-base-v2\")\n", "\n", "# Set device (GPU if available, otherwise CPU)\n", "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", "model.to(device)\n", "\n", "# Load your dataset (Ensure df['text'] contains the text data)\n", "# df = pd.read_csv(\"your_dataset.csv\") # Replace with your dataset file\n", "texts = df[\"Input\"].tolist()\n", "\n", "# Function to generate embeddings\n", "def get_albert_embeddings(texts, batch_size=16):\n", " model.eval() # Set model to evaluation mode\n", " embeddings_list = []\n", "\n", " for i in tqdm(range(0, len(texts), batch_size), desc=\"Processing\"):\n", " batch_texts = texts[i : i + batch_size]\n", " \n", " # Tokenize the batch\n", " inputs = tokenizer(batch_texts, return_tensors=\"pt\", padding=True, truncation=True, max_length=512)\n", " inputs = {key: val.to(device) for key, val in inputs.items()} # Move to GPU/CPU\n", " \n", " # Get embeddings\n", " with torch.no_grad():\n", " outputs = model(**inputs)\n", " \n", " # Extract [CLS] token representation (sentence embedding)\n", " cls_embeddings = outputs.last_hidden_state[:, 0, :].cpu().numpy()\n", " embeddings_list.append(cls_embeddings)\n", "\n", " return np.vstack(embeddings_list) # Stack all embeddings\n", "\n", "# Generate embeddings for all texts\n", "embeddings = get_albert_embeddings(texts)" ] }, { "cell_type": "code", "execution_count": 39, "id": "4cca424b", "metadata": {}, "outputs": [], "source": [ "from sklearn.feature_extraction.text import TfidfVectorizer\n", "from sklearn.preprocessing import LabelEncoder\n", "\n", "def vectorirse_text(text):\n", " \"\"\" Recieves text as input and returns TF-IDF vectors\"\"\"\n", " tfidf = TfidfVectorizer(max_features=500000)\n", " X = tfidf.fit_transform(text)\n", " return X\n", "\n", "def label_encoding(input):\n", " label_encoder = LabelEncoder()\n", " return label_encoder.fit_transform(input)\n", "\n", "X = embeddings\n", "y = label_encoding(df['Tags'])" ] }, { "cell_type": "code", "execution_count": 40, "id": "c7f18d8e", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/home/darth/.pyenv/versions/major02/lib/python3.10/site-packages/sklearn/linear_model/_logistic.py:465: ConvergenceWarning: lbfgs failed to converge (status=1):\n", "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", "\n", "Increase the number of iterations (max_iter) or scale the data as shown in:\n", " https://scikit-learn.org/stable/modules/preprocessing.html\n", "Please also refer to the documentation for alternative solver options:\n", " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", " n_iter_i = _check_optimize_result(\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "✅ Accuracy on Test Set: 0.7949\n", "\n", "✅ Classification Report on Test Set:\n", "\n", " precision recall f1-score support\n", "\n", " 0 0.76 0.73 0.74 792\n", " 1 0.66 0.66 0.66 709\n", " 2 0.84 0.82 0.83 729\n", " 3 0.79 0.82 0.80 830\n", " 4 0.79 0.76 0.77 486\n", " 5 0.90 0.92 0.91 680\n", " 6 0.98 0.99 0.98 803\n", " 7 0.73 0.75 0.74 634\n", " 8 0.65 0.63 0.64 704\n", " 9 0.82 0.86 0.84 684\n", "\n", " accuracy 0.79 7051\n", " macro avg 0.79 0.79 0.79 7051\n", "weighted avg 0.79 0.79 0.79 7051\n", "\n", "✅ Confusion Matrix on Test Set:\n", "\n", "[[575 95 18 10 3 12 0 33 28 18]\n", " [ 84 467 12 11 5 16 1 28 69 16]\n", " [ 13 9 595 26 26 21 3 12 23 1]\n", " [ 6 15 11 677 39 3 0 15 41 23]\n", " [ 5 10 11 40 370 1 6 8 21 14]\n", " [ 9 12 16 4 4 623 3 3 1 5]\n", " [ 1 1 3 1 1 0 791 4 0 1]\n", " [ 25 27 16 23 6 3 5 478 35 16]\n", " [ 34 56 24 38 13 6 1 58 441 33]\n", " [ 6 17 3 26 3 5 0 13 23 588]]\n", "\n", "✅ Saved class probabilities to Phase1/Albert.csv!\n" ] }, { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
y_trueclass_0class_1class_2class_3class_4class_5class_6class_7class_8class_9
024.046189e-061.673384e-060.9967288.242731e-040.0021500.0000162.555716e-078.192644e-050.0001948.416093e-09
122.162968e-078.667423e-070.9999403.937645e-070.0000040.0000046.603543e-082.003625e-070.0000512.772808e-09
221.016335e-043.334410e-040.9982805.001128e-050.0000060.0007302.283566e-071.428000e-060.0004977.577643e-07
321.920308e-031.006953e-020.9554552.492423e-030.0066550.0001833.224371e-041.568429e-020.0066735.461269e-04
422.279302e-043.630354e-050.9445984.439209e-020.0024570.0000466.339463e-067.960528e-030.0002334.337906e-05
\n", "
" ], "text/plain": [ " y_true class_0 class_1 class_2 class_3 class_4 \\\n", "0 2 4.046189e-06 1.673384e-06 0.996728 8.242731e-04 0.002150 \n", "1 2 2.162968e-07 8.667423e-07 0.999940 3.937645e-07 0.000004 \n", "2 2 1.016335e-04 3.334410e-04 0.998280 5.001128e-05 0.000006 \n", "3 2 1.920308e-03 1.006953e-02 0.955455 2.492423e-03 0.006655 \n", "4 2 2.279302e-04 3.630354e-05 0.944598 4.439209e-02 0.002457 \n", "\n", " class_5 class_6 class_7 class_8 class_9 \n", "0 0.000016 2.555716e-07 8.192644e-05 0.000194 8.416093e-09 \n", "1 0.000004 6.603543e-08 2.003625e-07 0.000051 2.772808e-09 \n", "2 0.000730 2.283566e-07 1.428000e-06 0.000497 7.577643e-07 \n", "3 0.000183 3.224371e-04 1.568429e-02 0.006673 5.461269e-04 \n", "4 0.000046 6.339463e-06 7.960528e-03 0.000233 4.337906e-05 " ] }, "execution_count": 40, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from sklearn.linear_model import LogisticRegression\n", "from sklearn.model_selection import train_test_split\n", "from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n", "import pandas as pd\n", "import os\n", "\n", "# Step 0: Ensure Phase1 folder exists\n", "os.makedirs('Phase1', exist_ok=True)\n", "\n", "# Step 1: Train-test split\n", "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n", "\n", "# Step 2: Train Logistic Regression on training set\n", "model = LogisticRegression(max_iter=1000)\n", "model.fit(X_train, y_train)\n", "\n", "# Step 3: Predict on test set\n", "y_preds = model.predict(X_test)\n", "\n", "# Step 4: Evaluate\n", "acc = accuracy_score(y_test, y_preds)\n", "print(f\"✅ Accuracy on Test Set: {acc:.4f}\\n\")\n", "\n", "print(\"✅ Classification Report on Test Set:\\n\")\n", "print(classification_report(y_test, y_preds))\n", "\n", "print(\"✅ Confusion Matrix on Test Set:\\n\")\n", "print(confusion_matrix(y_test, y_preds))\n", "\n", "# --------------------------------\n", "# Step 5: Predict probabilities on full dataset\n", "# --------------------------------\n", "y_proba = model.predict_proba(X)\n", "\n", "# Step 6: Save probabilities\n", "proba_df = pd.DataFrame(y_proba, columns=[f'class_{i}' for i in range(y_proba.shape[1])])\n", "proba_df.insert(0, 'y_true', y)\n", "\n", "proba_df.to_csv('Phase1/Albert.csv', index=False)\n", "\n", "print(\"\\n✅ Saved class probabilities to Phase1/Albert.csv!\")\n", "proba_df.head()\n" ] }, { "cell_type": "markdown", "id": "8b9c1929", "metadata": {}, "source": [ "## Ensemble" ] }, { "cell_type": "code", "execution_count": 41, "id": "4e6433ba", "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
y_trueclass_0class_1class_2class_3class_4class_5class_6class_7class_8...class_0class_1class_2class_3class_4class_5class_6class_7class_8class_9
020.0048560.0098640.8644220.0089400.0091830.0166390.0392770.0265710.010824...4.046189e-061.673384e-060.9967288.242731e-040.0021500.0000162.555716e-078.192644e-050.0001948.416093e-09
120.0052970.0065580.9134010.0062620.0115970.0189830.0189090.0067440.009302...2.162968e-078.667423e-070.9999403.937645e-070.0000040.0000046.603543e-082.003625e-070.0000512.772808e-09
220.0300780.0353630.6811360.0389870.0254770.0609350.0247960.0331980.045627...1.016335e-043.334410e-040.9982805.001128e-050.0000060.0007302.283566e-071.428000e-060.0004977.577643e-07
320.0144400.0154720.7909730.0239520.0235290.0203450.0376770.0270910.030567...1.920308e-031.006953e-020.9554552.492423e-030.0066550.0001833.224371e-041.568429e-020.0066735.461269e-04
420.0126460.0197500.6123340.1162180.0365320.0279830.0459180.0606000.036102...2.279302e-043.630354e-050.9445984.439209e-020.0024570.0000466.339463e-067.960528e-030.0002334.337906e-05
\n", "

5 rows × 41 columns

\n", "
" ], "text/plain": [ " y_true class_0 class_1 class_2 class_3 class_4 class_5 \\\n", "0 2 0.004856 0.009864 0.864422 0.008940 0.009183 0.016639 \n", "1 2 0.005297 0.006558 0.913401 0.006262 0.011597 0.018983 \n", "2 2 0.030078 0.035363 0.681136 0.038987 0.025477 0.060935 \n", "3 2 0.014440 0.015472 0.790973 0.023952 0.023529 0.020345 \n", "4 2 0.012646 0.019750 0.612334 0.116218 0.036532 0.027983 \n", "\n", " class_6 class_7 class_8 ... class_0 class_1 class_2 \\\n", "0 0.039277 0.026571 0.010824 ... 4.046189e-06 1.673384e-06 0.996728 \n", "1 0.018909 0.006744 0.009302 ... 2.162968e-07 8.667423e-07 0.999940 \n", "2 0.024796 0.033198 0.045627 ... 1.016335e-04 3.334410e-04 0.998280 \n", "3 0.037677 0.027091 0.030567 ... 1.920308e-03 1.006953e-02 0.955455 \n", "4 0.045918 0.060600 0.036102 ... 2.279302e-04 3.630354e-05 0.944598 \n", "\n", " class_3 class_4 class_5 class_6 class_7 class_8 \\\n", "0 8.242731e-04 0.002150 0.000016 2.555716e-07 8.192644e-05 0.000194 \n", "1 3.937645e-07 0.000004 0.000004 6.603543e-08 2.003625e-07 0.000051 \n", "2 5.001128e-05 0.000006 0.000730 2.283566e-07 1.428000e-06 0.000497 \n", "3 2.492423e-03 0.006655 0.000183 3.224371e-04 1.568429e-02 0.006673 \n", "4 4.439209e-02 0.002457 0.000046 6.339463e-06 7.960528e-03 0.000233 \n", "\n", " class_9 \n", "0 8.416093e-09 \n", "1 2.772808e-09 \n", "2 7.577643e-07 \n", "3 5.461269e-04 \n", "4 4.337906e-05 \n", "\n", "[5 rows x 41 columns]" ] }, "execution_count": 41, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import pandas as pd\n", "\n", "# Read the CSV files\n", "tfidf_df = pd.read_csv('Phase1/TFIDF.csv')\n", "word2vec_df = pd.read_csv('Phase1/Word2Vec.csv')\n", "glove_df = pd.read_csv('Phase1/GloVe.csv')\n", "albert_df = pd.read_csv('Phase1/Albert.csv')\n", "\n", "# Concatenate the DataFrames along the columns (axis=1)\n", "combined_df = pd.concat([tfidf_df, word2vec_df.drop([\"y_true\"], axis=1), glove_df.drop([\"y_true\"], axis=1), albert_df.drop([\"y_true\"], axis=1)], axis=1)\n", "\n", "# Optionally, save the combined DataFrame to a new CSV file\n", "# combined_df.to_csv('Phase1/Combined.csv', index=False)\n", "\n", "# Display the combined DataFrame\n", "combined_df.head()\n" ] }, { "cell_type": "code", "execution_count": 42, "id": "cfd48211", "metadata": {}, "outputs": [], "source": [ "combined_df.to_csv('Phase1/Combined.csv', index=False)" ] }, { "cell_type": "code", "execution_count": 43, "id": "3a6d7971", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "35251" ] }, "execution_count": 43, "metadata": {}, "output_type": "execute_result" } ], "source": [ "len(combined_df)" ] }, { "cell_type": "code", "execution_count": 44, "id": "eaa4cbff", "metadata": {}, "outputs": [], "source": [ "X = combined_df.drop([\"y_true\"], axis =1 )\n", "y = combined_df[\"y_true\"]" ] }, { "cell_type": "code", "execution_count": 45, "id": "b7ed0d29", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Logistic Regression:\n", "Accuracy: 0.7984683023684583\n", " precision recall f1-score support\n", "\n", " 0 0.76 0.79 0.77 792\n", " 1 0.69 0.71 0.70 709\n", " 2 0.84 0.82 0.83 729\n", " 3 0.76 0.88 0.82 830\n", " 4 0.84 0.74 0.79 486\n", " 5 0.87 0.88 0.88 680\n", " 6 0.96 0.98 0.97 803\n", " 7 0.71 0.66 0.68 634\n", " 8 0.70 0.57 0.63 704\n", " 9 0.83 0.88 0.86 684\n", "\n", " accuracy 0.80 7051\n", " macro avg 0.80 0.79 0.79 7051\n", "weighted avg 0.80 0.80 0.80 7051\n", "\n", "\n", "Decision Tree Classifier:\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/home/darth/.pyenv/versions/major02/lib/python3.10/site-packages/sklearn/linear_model/_logistic.py:1247: FutureWarning: 'multi_class' was deprecated in version 1.5 and will be removed in 1.7. From then on, it will always use 'multinomial'. Leave it to its default value to avoid this warning.\n", " warnings.warn(\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Accuracy: 0.4355410580059566\n", " precision recall f1-score support\n", "\n", " 0 0.56 0.52 0.54 792\n", " 1 0.43 0.39 0.41 709\n", " 2 0.34 0.33 0.34 729\n", " 3 0.44 0.50 0.47 830\n", " 4 0.52 0.31 0.39 486\n", " 5 0.33 0.46 0.39 680\n", " 6 0.54 0.68 0.60 803\n", " 7 0.33 0.34 0.34 634\n", " 8 0.29 0.27 0.28 704\n", " 9 0.68 0.46 0.55 684\n", "\n", " accuracy 0.44 7051\n", " macro avg 0.45 0.43 0.43 7051\n", "weighted avg 0.45 0.44 0.43 7051\n", "\n", "\n", "Random Forest Classifier:\n", "Accuracy: 0.5664444759608566\n", " precision recall f1-score support\n", "\n", " 0 0.51 0.57 0.54 792\n", " 1 0.37 0.35 0.36 709\n", " 2 0.56 0.56 0.56 729\n", " 3 0.57 0.65 0.60 830\n", " 4 0.63 0.48 0.55 486\n", " 5 0.65 0.63 0.64 680\n", " 6 0.83 0.97 0.90 803\n", " 7 0.47 0.42 0.44 634\n", " 8 0.40 0.30 0.34 704\n", " 9 0.58 0.62 0.60 684\n", "\n", " accuracy 0.57 7051\n", " macro avg 0.56 0.56 0.55 7051\n", "weighted avg 0.56 0.57 0.56 7051\n", "\n", "\n", "Support Vector Machine (SVM):\n", "Accuracy: 0.7730818323642037\n", " precision recall f1-score support\n", "\n", " 0 0.73 0.76 0.74 792\n", " 1 0.64 0.70 0.67 709\n", " 2 0.81 0.80 0.80 729\n", " 3 0.73 0.86 0.79 830\n", " 4 0.81 0.64 0.72 486\n", " 5 0.88 0.85 0.86 680\n", " 6 0.94 0.99 0.96 803\n", " 7 0.70 0.67 0.69 634\n", " 8 0.66 0.53 0.59 704\n", " 9 0.82 0.86 0.84 684\n", "\n", " accuracy 0.77 7051\n", " macro avg 0.77 0.76 0.77 7051\n", "weighted avg 0.77 0.77 0.77 7051\n", "\n", "\n", "k-Nearest Neighbors (k-NN):\n", "Accuracy: 0.83562615231882\n", " precision recall f1-score support\n", "\n", " 0 0.81 0.86 0.83 792\n", " 1 0.71 0.77 0.74 709\n", " 2 0.88 0.86 0.87 729\n", " 3 0.82 0.91 0.86 830\n", " 4 0.87 0.77 0.81 486\n", " 5 0.91 0.88 0.90 680\n", " 6 0.97 0.99 0.98 803\n", " 7 0.75 0.75 0.75 634\n", " 8 0.73 0.62 0.67 704\n", " 9 0.90 0.89 0.90 684\n", "\n", " accuracy 0.84 7051\n", " macro avg 0.84 0.83 0.83 7051\n", "weighted avg 0.84 0.84 0.83 7051\n", "\n", "\n", "Naïve Bayes Classifier:\n", "Accuracy: 0.882428024393703\n", " precision recall f1-score support\n", "\n", " 0 0.90 0.85 0.88 792\n", " 1 0.78 0.82 0.80 709\n", " 2 0.93 0.88 0.91 729\n", " 3 0.90 0.91 0.90 830\n", " 4 0.88 0.84 0.86 486\n", " 5 0.93 0.94 0.93 680\n", " 6 0.98 0.99 0.98 803\n", " 7 0.81 0.83 0.82 634\n", " 8 0.77 0.80 0.78 704\n", " 9 0.94 0.92 0.93 684\n", "\n", " accuracy 0.88 7051\n", " macro avg 0.88 0.88 0.88 7051\n", "weighted avg 0.88 0.88 0.88 7051\n", "\n" ] } ], "source": [ "from models import *\n", "print(\"Logistic Regression:\")\n", "report, acc = logistic_regression(X, y)\n", "print(\"Accuracy:\", acc)\n", "print(report)\n", "\n", "print(\"\\nDecision Tree Classifier:\")\n", "report, acc = decision_tree(X, y)\n", "print(\"Accuracy:\", acc)\n", "print(report)\n", "\n", "print(\"\\nRandom Forest Classifier:\")\n", "report, acc = random_forest(X, y)\n", "print(\"Accuracy:\", acc)\n", "print(report)\n", "\n", "print(\"\\nSupport Vector Machine (SVM):\")\n", "report, acc = support_vector_machine(X, y)\n", "print(\"Accuracy:\", acc)\n", "print(report)\n", "\n", "print(\"\\nk-Nearest Neighbors (k-NN):\")\n", "report, acc = knn(X, y, k=5)\n", "print(\"Accuracy:\", acc)\n", "print(report)\n", "\n", "print(\"\\nNaïve Bayes Classifier:\")\n", "report, acc = naive_bayes(X, y)\n", "print(\"Accuracy:\", acc)\n", "print(report)\n", "\n", "# print(\"\\nGradient Boosting (XGBoost):\")\n", "# report, acc = xgboost_classifier(X, y)\n", "# print(\"Accuracy:\", acc)\n", "# print(report)\n", "\n", "# print(\"\\nMulti-Layer Perceptron (MLP - Neural Network):\")\n", "# report, acc = mlp_classifier(X, y)\n", "\n", "# print(\"Accuracy:\", acc)\n", "# print(report)\n" ] }, { "cell_type": "code", "execution_count": null, "id": "fd8e808d", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "major02", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.12" } }, "nbformat": 4, "nbformat_minor": 5 }