|
import numpy as np |
|
import pandas as pd |
|
from sklearn.model_selection import train_test_split |
|
from sklearn.preprocessing import StandardScaler |
|
from sklearn.metrics import classification_report, accuracy_score |
|
|
|
|
|
from sklearn.linear_model import LogisticRegression |
|
from sklearn.tree import DecisionTreeClassifier |
|
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier |
|
from sklearn.svm import SVC |
|
from sklearn.neighbors import KNeighborsClassifier |
|
from sklearn.naive_bayes import MultinomialNB |
|
from sklearn.neural_network import MLPClassifier |
|
from xgboost import XGBClassifier |
|
|
|
|
|
|
|
def preprocess_data(X, scale=False): |
|
if scale: |
|
scaler = StandardScaler() |
|
X = scaler.fit_transform(X) |
|
return X |
|
|
|
|
|
def logistic_regression(X, y): |
|
|
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) |
|
|
|
model = LogisticRegression(multi_class='multinomial', solver='lbfgs', max_iter=1000) |
|
model.fit(X_train, y_train) |
|
|
|
y_pred = model.predict(X_test) |
|
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred) |
|
|
|
|
|
def decision_tree(X, y): |
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) |
|
|
|
model = DecisionTreeClassifier() |
|
model.fit(X_train, y_train) |
|
|
|
y_pred = model.predict(X_test) |
|
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred) |
|
|
|
|
|
def random_forest(X, y): |
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) |
|
|
|
model = RandomForestClassifier(n_estimators=100, random_state=42) |
|
model.fit(X_train, y_train) |
|
|
|
y_pred = model.predict(X_test) |
|
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred) |
|
|
|
|
|
def support_vector_machine(X, y): |
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) |
|
|
|
model = SVC(kernel='rbf', probability=True) |
|
model.fit(X_train, y_train) |
|
|
|
y_pred = model.predict(X_test) |
|
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred) |
|
|
|
|
|
def knn(X, y, k=5): |
|
|
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) |
|
|
|
model = KNeighborsClassifier(n_neighbors=k) |
|
model.fit(X_train, y_train) |
|
|
|
y_pred = model.predict(X_test) |
|
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred) |
|
|
|
|
|
def naive_bayes(X, y): |
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) |
|
|
|
model = MultinomialNB() |
|
|
|
model.fit(X_train, y_train) |
|
|
|
y_pred = model.predict(X_test) |
|
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred) |
|
|
|
|
|
def xgboost_classifier(X, y): |
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) |
|
|
|
model = XGBClassifier(use_label_encoder=False, eval_metric='mlogloss') |
|
model.fit(X_train, y_train) |
|
|
|
y_pred = model.predict(X_test) |
|
return classification_report(y_test, y_pred), accuracy_score(y_test, y_pred) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import torch |
|
import torch.nn as nn |
|
import torch.optim as optim |
|
from sklearn.model_selection import train_test_split |
|
from sklearn.preprocessing import StandardScaler |
|
from sklearn.metrics import classification_report, accuracy_score |
|
|
|
def mlp_classifier(X, y, epochs=500, lr=0.01): |
|
|
|
scaler = StandardScaler(with_mean=False) |
|
X = scaler.fit_transform(X) |
|
|
|
|
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) |
|
X_train, X_test = torch.tensor(X_train.toarray(), dtype=torch.float32), torch.tensor(X_test.toarray(), dtype=torch.float32) |
|
y_train, y_test = torch.tensor(y_train, dtype=torch.long), torch.tensor(y_test, dtype=torch.long) |
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
X_train, X_test, y_train, y_test = X_train.to(device), X_test.to(device), y_train.to(device), y_test.to(device) |
|
|
|
|
|
class MLP(nn.Module): |
|
def __init__(self, input_size, hidden_sizes, output_size): |
|
super(MLP, self).__init__() |
|
self.fc1 = nn.Linear(input_size, hidden_sizes[0]) |
|
self.fc2 = nn.Linear(hidden_sizes[0], hidden_sizes[1]) |
|
self.fc3 = nn.Linear(hidden_sizes[1], output_size) |
|
self.relu = nn.ReLU() |
|
|
|
def forward(self, x): |
|
x = self.relu(self.fc1(x)) |
|
x = self.relu(self.fc2(x)) |
|
x = self.fc3(x) |
|
return x |
|
|
|
|
|
model = MLP(input_size=X.shape[1], hidden_sizes=[100, 50], output_size=len(set(y))).to(device) |
|
|
|
|
|
criterion = nn.CrossEntropyLoss() |
|
optimizer = optim.Adam(model.parameters(), lr=lr) |
|
|
|
|
|
for epoch in range(epochs): |
|
model.train() |
|
optimizer.zero_grad() |
|
outputs = model(X_train) |
|
loss = criterion(outputs, y_train) |
|
loss.backward() |
|
optimizer.step() |
|
|
|
|
|
model.eval() |
|
with torch.no_grad(): |
|
y_pred = model(X_test) |
|
y_pred = torch.argmax(y_pred, dim=1).cpu().numpy() |
|
|
|
|
|
return classification_report(y_test.cpu().numpy(), y_pred), accuracy_score(y_test.cpu().numpy(), y_pred) |
|
|
|
|
|
|
|
|
|
|
|
|