nandovallec
commited on
Commit
·
fa7f40e
1
Parent(s):
d8bd4d4
Optimization
Browse files- app.py +0 -6
- recommender.py +4 -4
app.py
CHANGED
|
@@ -43,12 +43,6 @@ repo_mat = Repository(
|
|
| 43 |
local_dir="data_mat", clone_from=DATASET_REPO_URL_MAT, use_auth_token=HF_TOKEN, repo_type="dataset"
|
| 44 |
)
|
| 45 |
|
| 46 |
-
df_ps_train_ori = pd.read_hdf('model/df_ps_train_new.hdf')
|
| 47 |
-
df_ps_train_extra = pd.read_hdf('data_train/df_ps_train_extra.hdf')
|
| 48 |
-
pickle_path = 'model/giantMatrix_new.pickle'
|
| 49 |
-
with open(pickle_path, 'rb') as f:
|
| 50 |
-
ps_matrix_ori = pickle.load(f)
|
| 51 |
-
|
| 52 |
from fetchPlaylistTrackUris import *
|
| 53 |
from recommender import *
|
| 54 |
|
|
|
|
| 43 |
local_dir="data_mat", clone_from=DATASET_REPO_URL_MAT, use_auth_token=HF_TOKEN, repo_type="dataset"
|
| 44 |
)
|
| 45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
from fetchPlaylistTrackUris import *
|
| 47 |
from recommender import *
|
| 48 |
|
recommender.py
CHANGED
|
@@ -4,6 +4,7 @@ from scipy.sparse import csr_matrix
|
|
| 4 |
import numpy as np
|
| 5 |
import pandas as pd
|
| 6 |
from scipy.sparse import vstack
|
|
|
|
| 7 |
|
| 8 |
def add_row_train(df, list_tid):
|
| 9 |
new_pid_add = df.iloc[-1].name +1
|
|
@@ -25,9 +26,8 @@ def inference_row(list_tid, ps_matrix):
|
|
| 25 |
|
| 26 |
|
| 27 |
def get_best_tid(current_list, ps_matrix_row, K=50, MAX_tid=10):
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
df_ps_train = pd.concat([df_ps_train_ori,df_ps_train_extra])
|
| 31 |
|
| 32 |
sim_vector, sparse_row = inference_row(current_list, ps_matrix_row)
|
| 33 |
sim_vector = sim_vector.toarray()[0].tolist()
|
|
@@ -82,7 +82,7 @@ def inference_from_tid(list_tid, K=50, MAX_tid=10):
|
|
| 82 |
with open("data_mat/giantMatrix_extra.pickle",'rb') as f:
|
| 83 |
ps_matrix_extra = pickle.load(f)
|
| 84 |
|
| 85 |
-
ps_matrix = vstack((ps_matrix_ori,ps_matrix_extra))
|
| 86 |
|
| 87 |
result, sparse_row = get_best_tid(list_tid, ps_matrix.tocsr(), K, MAX_tid)
|
| 88 |
ps_matrix_extra = vstack((ps_matrix_extra,sparse_row.todok()))
|
|
|
|
| 4 |
import numpy as np
|
| 5 |
import pandas as pd
|
| 6 |
from scipy.sparse import vstack
|
| 7 |
+
import global_var
|
| 8 |
|
| 9 |
def add_row_train(df, list_tid):
|
| 10 |
new_pid_add = df.iloc[-1].name +1
|
|
|
|
| 26 |
|
| 27 |
|
| 28 |
def get_best_tid(current_list, ps_matrix_row, K=50, MAX_tid=10):
|
| 29 |
+
df_ps_train_extra = pd.read_hdf('data_train/df_ps_train_extra.hdf')
|
| 30 |
+
df_ps_train = pd.concat([global_var.df_ps_train_ori,df_ps_train_extra])
|
|
|
|
| 31 |
|
| 32 |
sim_vector, sparse_row = inference_row(current_list, ps_matrix_row)
|
| 33 |
sim_vector = sim_vector.toarray()[0].tolist()
|
|
|
|
| 82 |
with open("data_mat/giantMatrix_extra.pickle",'rb') as f:
|
| 83 |
ps_matrix_extra = pickle.load(f)
|
| 84 |
|
| 85 |
+
ps_matrix = vstack((global_var.ps_matrix_ori,ps_matrix_extra))
|
| 86 |
|
| 87 |
result, sparse_row = get_best_tid(list_tid, ps_matrix.tocsr(), K, MAX_tid)
|
| 88 |
ps_matrix_extra = vstack((ps_matrix_extra,sparse_row.todok()))
|