diff --git a/deeprobust/graph/defense/gcn_guard.py b/deeprobust/graph/defense/gcn_guard.py index eea8e74847c34ba31405eb6d3eb29449cbc0f990..b931f8134d70681cba422937b923acd0f96cdbab 100644 --- a/deeprobust/graph/defense/gcn_guard.py +++ b/deeprobust/graph/defense/gcn_guard.py @@ -25,7 +25,7 @@ from scipy.sparse import lil_matrix class GCNGuard(nn.Module): - def __init__(self, nfeat, nhid, nclass, dropout=0.5, lr=0.01, drop=False, weight_decay=5e-4, with_relu=True, + def __init__(self, nfeat, nhid, nclass, dropout=0.5, lr=0.01, drop=True, weight_decay=5e-4, with_relu=True, with_bias=True, device=None): super(GCNGuard, self).__init__() @@ -102,7 +102,7 @@ class GCNGuard(nn.Module): def forward(self, x, adj): """we don't change the edge_index, just update the edge_weight; some edge_weight are regarded as removed if it equals to zero""" - x = x.to_dense() # topology attack中需要注销掉(wisconsin不需要), 在meta attack还有 dice中均需要 + x = x.to_dense() """GCN and GAT""" if self.attention: @@ -189,7 +189,7 @@ class GCNGuard(nn.Module): if self.drop: character = np.vstack((att_dense_norm[row, col].A1, att_dense_norm[col, row].A1)) - character = torch.from_numpy(character.T) + character = torch.from_numpy(character.T).to(self.device) drop_score = self.drop_learn_1(character) drop_score = torch.sigmoid(drop_score) # do not use softmax since we only have one element mm = torch.nn.Threshold(0.5, 0) diff --git a/deeprobust/graph/defense/gcn_preprocess.py b/deeprobust/graph/defense/gcn_preprocess.py index 84165277f36e6ff89f135857faf06172fda30f1f..1ec10fd2271a284b2961bc59f66db3c1a00f1ea1 100644 --- a/deeprobust/graph/defense/gcn_preprocess.py +++ b/deeprobust/graph/defense/gcn_preprocess.py @@ -6,6 +6,7 @@ from torch.nn.parameter import Parameter from torch.nn.modules.module import Module from deeprobust.graph import utils from deeprobust.graph.defense import GCN +from sklearn.utils.extmath import randomized_svd from tqdm import tqdm import scipy.sparse as sp import numpy as np @@ -104,37 +105,86 @@ class GCNSVD(GCN): self.labels = labels super().fit(features, modified_adj, labels, idx_train, idx_val, train_iters=train_iters, initialize=initialize, verbose=verbose) - def truncatedSVD(self, data, k=50): - """Truncated SVD on input data. - - Parameters - ---------- - data : - input matrix to be decomposed - k : int - number of singular values and vectors to compute. + # def truncatedSVD(self, data, k=50): + # """Truncated SVD on input data. + + # Parameters + # ---------- + # data : + # input matrix to be decomposed + # k : int + # number of singular values and vectors to compute. + + # Returns + # ------- + # numpy.array + # reconstructed matrix. + # """ + # print('=== GCN-SVD: rank={} ==='.format(k)) + # print("stage1") + # if sp.issparse(data): + # data = data.asfptype() + # U, S, V = sp.linalg.svds(data, k=k) + # print("rank_after = {}".format(len(S.nonzero()[0]))) + # diag_S = np.diag(S) + # else: + # U, S, V = np.linalg.svd(data) + # U = U[:, :k] + # S = S[:k] + # V = V[:k, :] + # print("rank_before = {}".format(len(S.nonzero()[0]))) + # diag_S = np.diag(S) + # print("rank_after = {}".format(len(diag_S.nonzero()[0]))) + + # return U @ diag_S @ V - Returns - ------- - numpy.array - reconstructed matrix. - """ - print('=== GCN-SVD: rank={} ==='.format(k)) - if sp.issparse(data): - data = data.asfptype() - U, S, V = sp.linalg.svds(data, k=k) - print("rank_after = {}".format(len(S.nonzero()[0]))) - diag_S = np.diag(S) - else: - U, S, V = np.linalg.svd(data) - U = U[:, :k] - S = S[:k] - V = V[:k, :] - print("rank_before = {}".format(len(S.nonzero()[0]))) - diag_S = np.diag(S) - print("rank_after = {}".format(len(diag_S.nonzero()[0]))) + def truncatedSVD(self, data, k=50): + """Fast truncated SVD using randomized algorithm""" + print(f'=== Fast SVD: rank={k} ===') + + # Convert torch Tensor to NumPy + if isinstance(data, torch.Tensor): + data = data.detach().cpu().numpy() + + U, S, Vt = randomized_svd(data, n_components=k, n_iter=10, random_state=None) + print("rank_after =", np.count_nonzero(S)) + diag_S = np.diag(S) + return U @ diag_S @ Vt + + # def truncatedSVD(self, data, k=50): + + # """Truncated SVD on input data using GPU (torch.linalg.svd), returning numpy array.""" + # print('=== GCN-SVD (GPU): rank={} ==='.format(k)) + # # 转换为 dense 并放到 GPU + # if sp.issparse(data): + # print("[Info] Converting sparse matrix to dense (may be memory-intensive).") + # data = torch.from_numpy(data.toarray()).float().to(self.device) + # elif isinstance(data, np.ndarray): + # data = torch.from_numpy(data).float().to(self.device) + # elif isinstance(data, torch.Tensor): + # data = data.float().to(self.device) + # else: + # raise TypeError("Unsupported data type for SVD.") + + # print("stage1") + # # + # U, S, Vh = torch.linalg.svd(data, full_matrices=False) + + # print("stage2") + # # + # U_k = U[:, :k] + # S_k = S[:k] + # V_k = Vh[:k, :] + + # print(f"Singular values used (non-zero count): {(S_k != 0).sum().item()} / {len(S_k)}") + + # # 还原矩阵:U * S * V + # diag_S = torch.diag(S_k) + # reconstructed = U_k @ diag_S @ V_k + + # # 返回 numpy 类型(保持一致) + # return reconstructed.detach().cpu().numpy() - return U @ diag_S @ V def predict(self, features=None, adj=None): """By default, the inputs should be unnormalized adjacency @@ -276,6 +326,7 @@ class GCNJaccard(GCN): removed_cnt = dropedge_dis(adj_triu.data, adj_triu.indptr, adj_triu.indices, features, threshold=self.threshold) else: if self.binary_feature: + print("self.binary_feature", self.binary_feature) removed_cnt = dropedge_jaccard(adj_triu.data, adj_triu.indptr, adj_triu.indices, features, threshold=self.threshold) else: removed_cnt = dropedge_cosine(adj_triu.data, adj_triu.indptr, adj_triu.indices, features, threshold=self.threshold) @@ -378,6 +429,22 @@ def __dropedge_jaccard(A, iA, jA, features, threshold): removed_cnt += 1 return removed_cnt +# @njit +# def dropedge_jaccard(A, iA, jA, features, threshold): +# removed_cnt = 0 +# for row in range(len(iA)-1): +# for i in range(iA[row], iA[row+1]): +# # print(row, jA[i], A[i]) +# n1 = row +# n2 = jA[i] +# a, b = features[n1], features[n2] +# intersection = np.count_nonzero(a*b) +# J = intersection * 1.0 / (np.count_nonzero(a) + np.count_nonzero(b) - intersection) +# if J < threshold: +# A[i] = 0 +# # A[n2, n1] = 0 +# removed_cnt += 1 +# return removed_cnt @njit def dropedge_jaccard(A, iA, jA, features, threshold): removed_cnt = 0 @@ -388,7 +455,12 @@ def dropedge_jaccard(A, iA, jA, features, threshold): n2 = jA[i] a, b = features[n1], features[n2] intersection = np.count_nonzero(a*b) - J = intersection * 1.0 / (np.count_nonzero(a) + np.count_nonzero(b) - intersection) + union = np.count_nonzero(a) + np.count_nonzero(b) - intersection + if union == 0: + print("Running safe dropedge_jaccard") + J = 0 + else: + J = intersection * 1.0 / union if J < threshold: A[i] = 0 diff --git a/deeprobust/graph/global_attack/base_attack.py b/deeprobust/graph/global_attack/base_attack.py index 9beb1af32c4b00a5916648faa596e9436fd73088..237431189d85f4ae2454251e64dd0b4490686999 100644 --- a/deeprobust/graph/global_attack/base_attack.py +++ b/deeprobust/graph/global_attack/base_attack.py @@ -71,6 +71,7 @@ class BaseAttack(Module): assert torch.abs(adj - adj.t()).sum() == 0, "Input graph is not symmetric" assert adj.max() == 1, "Max value should be 1!" assert adj.min() == 0, "Min value should be 0!" + adj.fill_diagonal_(0) diag = adj.diag() assert diag.max() == 0, "Diagonal should be 0!" assert diag.min() == 0, "Diagonal should be 0!" diff --git a/deeprobust/graph/global_attack/topology_attack.py b/deeprobust/graph/global_attack/topology_attack.py index e7fba505ef9be7e2eec6fbbd616d9131245a79f4..a505531a236d7a7001a8ef7ea9ca0363b8d7ac60 100644 --- a/deeprobust/graph/global_attack/topology_attack.py +++ b/deeprobust/graph/global_attack/topology_attack.py @@ -79,7 +79,7 @@ class PGDAttack(BaseAttack): self.complementary = None - def attack(self, ori_features, ori_adj, labels, idx_train, n_perturbations, epochs=25, **kwargs): + def attack(self, ori_features, ori_adj, labels, idx_train, n_perturbations, epochs=50, **kwargs): """Generate perturbations on the input graph. Parameters diff --git a/examples/graph/cgscore_experiments/defense_method/GAT.py b/examples/graph/cgscore_experiments/defense_method/GAT.py index 36ea48ae6966bb986667f4f2356a8f863a43c6fb..b0e859bcb622bbdd3ac72b2e6d764f40d655616c 100644 --- a/examples/graph/cgscore_experiments/defense_method/GAT.py +++ b/examples/graph/cgscore_experiments/defense_method/GAT.py @@ -6,19 +6,23 @@ from deeprobust.graph.defense import GCNJaccard, GCN, GAT from deeprobust.graph.utils import * from deeprobust.graph.data import Dataset, PrePtbDataset, Dpr2Pyg import argparse +import csv +import os parser = argparse.ArgumentParser() parser.add_argument('--seed', type=int, default=15, help='Random seed.') -parser.add_argument('--dataset', type=str, default='cora', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset') -parser.add_argument('--ptb_rate', type=float, default=0.0, help='pertubation rate') -parser.add_argument('--ptb_type', type=str, default='clean', choices=['clean', 'meta', 'dice', 'minmax', 'pgd', 'random'], help='attack type') +parser.add_argument('--dataset', type=str, default='pubmed', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed', 'Flickr'], help='dataset') +parser.add_argument('--ptb_rate', type=float, default=0.25, help='pertubation rate') +parser.add_argument('--ptb_type', type=str, default='dice', choices=['clean', 'meta', 'dice', 'minmax', 'pgd', 'random'], help='attack type') parser.add_argument('--hidden', type=int, default=16, help='Number of hidden units.') parser.add_argument('--dropout', type=float, default=0.5, help='Dropout rate (1 - keep probability).') +parser.add_argument('--gpu', type=int, default=0, help='GPU device ID (default: 0)') args = parser.parse_args() args.cuda = torch.cuda.is_available() print('cuda: %s' % args.cuda) -device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") +device = torch.device(f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu") + # make sure you use the same data splits as you generated attacks np.random.seed(args.seed) @@ -57,5 +61,23 @@ print('=== train on clean graph ===') pyg_data = Dpr2Pyg(data) pyg_data.update_edge_index(perturbed_adj) # inplace operation -gat.fit(pyg_data, train_iters=200, verbose=True) # train with earlystopping -gat.test() +gat.fit(pyg_data, train_iters=500, verbose=True) # train with earlystopping +acc = gat.test() +print("acc:", acc) + +csv_dir = "../result" +os.makedirs(csv_dir, exist_ok=True) + +csv_filename = os.path.join(csv_dir, f"GAT_{args.dataset}_{args.ptb_type}_{args.ptb_rate}.csv") +row = [f"{args.dataset} ", f" {args.ptb_type} ", f" {args.ptb_rate} ", f" {acc}"] + +try: + file_exists = os.path.isfile(csv_filename) + with open(csv_filename, 'a', newline='') as csvfile: + writer = csv.writer(csvfile) + if not file_exists: + writer.writerow(["dataset ", "ptb_type ", "ptb_rate ", "accuracy"]) + writer.writerow(row) +except Exception as e: + print(f"[Error] Failed to write CSV: {e}") + diff --git a/examples/graph/cgscore_experiments/defense_method/GCN.py b/examples/graph/cgscore_experiments/defense_method/GCN.py index 52cd5713c07f74c964321017b49574803290fe6d..9779428136c3c664dc852ba00117a13cc9515c36 100644 --- a/examples/graph/cgscore_experiments/defense_method/GCN.py +++ b/examples/graph/cgscore_experiments/defense_method/GCN.py @@ -5,19 +5,22 @@ from deeprobust.graph.defense import GCNJaccard, GCN from deeprobust.graph.utils import * from deeprobust.graph.data import Dataset, PrePtbDataset import argparse +import os +import csv parser = argparse.ArgumentParser() parser.add_argument('--seed', type=int, default=15, help='Random seed.') -parser.add_argument('--dataset', type=str, default='cora', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset') -parser.add_argument('--ptb_rate', type=float, default=0.0, help='pertubation rate') -parser.add_argument('--ptb_type', type=str, default='clean', choices=['clean', 'meta', 'dice', 'minmax', 'pgd', 'random'], help='attack type') +parser.add_argument('--dataset', type=str, default='Flickr', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed', 'Flickr'], help='dataset') +parser.add_argument('--ptb_rate', type=float, default=0.25, help='pertubation rate') +parser.add_argument('--ptb_type', type=str, default='minmax', choices=['clean', 'meta', 'dice', 'minmax', 'pgd', 'random'], help='attack type') parser.add_argument('--hidden', type=int, default=16, help='Number of hidden units.') parser.add_argument('--dropout', type=float, default=0.5, help='Dropout rate (1 - keep probability).') +parser.add_argument('--gpu', type=int, default=0, help='GPU device ID (default: 0)') args = parser.parse_args() args.cuda = torch.cuda.is_available() print('cuda: %s' % args.cuda) -device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") +device = torch.device(f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu") # make sure you use the same data splits as you generated attacks np.random.seed(args.seed) @@ -63,7 +66,22 @@ def test(adj): def main(): - test(perturbed_adj) + acc = test(perturbed_adj) + csv_dir = "../result" + os.makedirs(csv_dir, exist_ok=True) + + csv_filename = os.path.join(csv_dir, f"GCN_{args.dataset}_{args.ptb_type}_{args.ptb_rate}.csv") + row = [f"{args.dataset} ", f" {args.ptb_type} ", f" {args.ptb_rate} ", f" {acc}"] + + try: + file_exists = os.path.isfile(csv_filename) + with open(csv_filename, 'a', newline='') as csvfile: + writer = csv.writer(csvfile) + if not file_exists: + writer.writerow(["dataset ", "ptb_type ", "ptb_rate ", "accuracy"]) + writer.writerow(row) + except Exception as e: + print(f"[Error] Failed to write CSV: {e}") # # if you want to save the modified adj/features, uncomment the code below # model.save_adj(root='./', name=f'mod_adj') diff --git a/examples/graph/cgscore_experiments/defense_method/GCNJaccard.py b/examples/graph/cgscore_experiments/defense_method/GCNJaccard.py index 18b089dbd630359e53a65cfe21ba0866cd8f812f..095d60624cf5410033fb944aade48204c1f20556 100644 --- a/examples/graph/cgscore_experiments/defense_method/GCNJaccard.py +++ b/examples/graph/cgscore_experiments/defense_method/GCNJaccard.py @@ -4,21 +4,24 @@ import torch.nn.functional as F from deeprobust.graph.defense import GCNJaccard, GCN from deeprobust.graph.utils import * from deeprobust.graph.data import Dataset, PrePtbDataset +import csv import argparse +import os parser = argparse.ArgumentParser() parser.add_argument('--seed', type=int, default=15, help='Random seed.') -parser.add_argument('--dataset', type=str, default='cora', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset') -parser.add_argument('--ptb_rate', type=float, default=0.0, help='pertubation rate') -parser.add_argument('--ptb_type', type=str, default='clean', choices=['clean', 'meta', 'dice', 'minmax', 'pgd', 'random'], help='attack type') +parser.add_argument('--dataset', type=str, default='Flickr', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed', 'Flickr'], help='dataset') +parser.add_argument('--ptb_rate', type=float, default=0.25, help='pertubation rate') +parser.add_argument('--ptb_type', type=str, default='dice', choices=['clean', 'meta', 'dice', 'minmax', 'pgd', 'random'], help='attack type') parser.add_argument('--hidden', type=int, default=16, help='Number of hidden units.') parser.add_argument('--dropout', type=float, default=0.5, help='Dropout rate (1 - keep probability).') parser.add_argument('--threshold', type=float, default=0.1, help='jaccard coeficient') +parser.add_argument('--gpu', type=int, default=0, help='GPU device ID (default: 0)') args = parser.parse_args() args.cuda = torch.cuda.is_available() print('cuda: %s' % args.cuda) -device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") +device = torch.device(f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu") # make sure you use the same data splits as you generated attacks np.random.seed(args.seed) @@ -54,15 +57,32 @@ def test_jaccard(adj): gcn = gcn.to(device) gcn.fit(features, adj, labels, idx_train, idx_val, threshold=args.threshold) gcn.eval() - gcn.test(idx_test) + test_acc = gcn.test(idx_test) + return test_acc def main(): - test_jaccard(perturbed_adj) - # # if you want to save the modified adj/features, uncomment the code below - # model.save_adj(root='./', name=f'mod_adj') - # model.save_features(root='./', name='mod_features') + acc = test_jaccard(perturbed_adj) + + print(acc) + # record result + csv_dir = "../result" + os.makedirs(csv_dir, exist_ok=True) # 自动创建目录(如果不存在) + + csv_filename = os.path.join(csv_dir, f"Jaccard_{args.dataset}_{args.ptb_type}_{args.ptb_rate}.csv") + row = [f"{args.dataset} ", f" {args.ptb_type} ", f" {args.ptb_rate} ", f" {args.threshold} ", f" {acc}"] + + # 写入 CSV 文件 + try: + file_exists = os.path.isfile(csv_filename) + with open(csv_filename, 'a', newline='') as csvfile: + writer = csv.writer(csvfile) + if not file_exists: + writer.writerow(["dataset ", "ptb_type ", "ptb_rate ", "threshold ", "accuracy"]) + writer.writerow(row) + except Exception as e: + print(f"[Error] Failed to write CSV: {e}") if __name__ == '__main__': main() \ No newline at end of file diff --git a/examples/graph/cgscore_experiments/defense_method/GCNSVD.py b/examples/graph/cgscore_experiments/defense_method/GCNSVD.py index 8f2e7b22d6209c5ad19c24fd5d58d4b8453fde73..b10186df2c7fcab451cf780ca4c3cdd52987239e 100644 --- a/examples/graph/cgscore_experiments/defense_method/GCNSVD.py +++ b/examples/graph/cgscore_experiments/defense_method/GCNSVD.py @@ -5,20 +5,25 @@ from deeprobust.graph.defense import GCNJaccard, GCN, GCNSVD from deeprobust.graph.utils import * from deeprobust.graph.data import Dataset, PrePtbDataset import argparse +import os +import csv parser = argparse.ArgumentParser() -parser.add_argument('--seed', type=int, default=15, help='Random seed.') -parser.add_argument('--dataset', type=str, default='cora', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset') -parser.add_argument('--ptb_rate', type=float, default=0.0, help='pertubation rate') -parser.add_argument('--ptb_type', type=str, default='clean', choices=['clean', 'meta', 'dice', 'minmax', 'pgd', 'random'], help='attack type') +parser.add_argument('--seed', type=int, default=50, help='Random seed.') +parser.add_argument('--dataset', type=str, default='pubmed', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed', 'Flickr'], help='dataset') +parser.add_argument('--ptb_rate', type=float, default=0.25, help='pertubation rate') +parser.add_argument('--ptb_type', type=str, default='minmax', choices=['clean', 'meta', 'dice', 'minmax', 'pgd', 'random'], help='attack type') parser.add_argument('--hidden', type=int, default=16, help='Number of hidden units.') parser.add_argument('--dropout', type=float, default=0.5, help='Dropout rate (1 - keep probability).') parser.add_argument('--k', type=int, default=50, help='Truncated Components.') +parser.add_argument('--gpu', type=int, default=5, help='GPU device ID (default: 0)') args = parser.parse_args() args.cuda = torch.cuda.is_available() print('cuda: %s' % args.cuda) -device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") +device = torch.device(f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu") + +os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) # make sure you use the same data splits as you generated attacks np.random.seed(args.seed) @@ -49,12 +54,32 @@ def test_svd(adj): gcn = gcn.to(device) gcn.fit(features, perturbed_adj, labels, idx_train, idx_val, k=args.k, verbose=True) gcn.eval() - gcn.test(idx_test) + test_acc = gcn.test(idx_test) + return test_acc def main(): - test_svd(perturbed_adj) + acc = test_svd(perturbed_adj) + + print(acc) + # record result + csv_dir = "../result" + os.makedirs(csv_dir, exist_ok=True) + + csv_filename = os.path.join(csv_dir, f"SVD_{args.dataset}_{args.ptb_type}_{args.ptb_rate}.csv") + row = [f"{args.dataset} ", f" {args.ptb_type} ", f" {args.ptb_rate} ", f" {args.k} ", f" {acc}"] + + # 写入 CSV 文件 + try: + file_exists = os.path.isfile(csv_filename) + with open(csv_filename, 'a', newline='') as csvfile: + writer = csv.writer(csvfile) + if not file_exists: + writer.writerow(["dataset ", "ptb_type ", "ptb_rate ", "svd_k " "accuracy"]) + writer.writerow(row) + except Exception as e: + print(f"[Error] Failed to write CSV: {e}") # # if you want to save the modified adj/features, uncomment the code below # model.save_adj(root='./', name=f'mod_adj') # model.save_features(root='./', name='mod_features') diff --git a/examples/graph/cgscore_experiments/defense_method/GNNGuard.py b/examples/graph/cgscore_experiments/defense_method/GNNGuard.py index f3c4a14076def239f2efa15ed750dde174bc7078..2a701bf0b5f3cc1f885ecd30fc94bc0dfe9e19f8 100644 --- a/examples/graph/cgscore_experiments/defense_method/GNNGuard.py +++ b/examples/graph/cgscore_experiments/defense_method/GNNGuard.py @@ -5,6 +5,8 @@ from deeprobust.graph.defense.gcn_guard import GCNGuard from deeprobust.graph.utils import * from deeprobust.graph.data import Dataset from deeprobust.graph.data import PtbDataset, PrePtbDataset +import os +import csv import argparse from scipy import sparse @@ -12,14 +14,15 @@ from scipy import sparse parser = argparse.ArgumentParser() parser.add_argument('--seed', type=int, default=15, help='Random seed') parser.add_argument('--GNNGuard', type=bool, default=True, choices=[True, False]) -parser.add_argument('--dataset', type=str, default='cora', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed', 'flickr'], help='dataset') -parser.add_argument('--ptb_rate', type=float, default=0.0, help='pertubation rate') -parser.add_argument('--ptb_type', type=str, default='clean', choices=['clean', 'meta', 'dice', 'minmax', 'pgd', 'random'], help='attack type') +parser.add_argument('--dataset', type=str, default='Flickr', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed', 'Flickr'], help='dataset') +parser.add_argument('--ptb_rate', type=float, default=0.25, help='pertubation rate') +parser.add_argument('--ptb_type', type=str, default='minmax', choices=['clean', 'meta', 'dice', 'minmax', 'pgd', 'random'], help='attack type') +parser.add_argument('--gpu', type=int, default=1, help='GPU device ID (default: 0)') args = parser.parse_args() args.cuda = torch.cuda.is_available() print('cuda: %s' % args.cuda) -device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") +device = torch.device(f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu") np.random.seed(args.seed) torch.manual_seed(args.seed) @@ -39,7 +42,7 @@ perturbed_adj = sp.csr_matrix(perturbed_adj.to('cpu').numpy()) def test(adj): # """defense models""" ''' testing model ''' - gcn = GCNGuard(nfeat=features.shape[1], nclass=labels.max().item() + 1, nhid=16, + gcn = GCNGuard(nfeat=features.shape[1], nclass=labels.max().item() + 1, nhid=16, drop=True, dropout=0.5, with_relu=False, with_bias=True, weight_decay=5e-4, device=device) gcn = gcn.to(device) @@ -57,7 +60,23 @@ def main(): # test(adj) # print('=== testing GCN on Mettacked graph ===') - test(perturbed_adj) + acc = test(perturbed_adj) + + csv_dir = "../result" + os.makedirs(csv_dir, exist_ok=True) + + csv_filename = os.path.join(csv_dir, f"GNNGuard_{args.dataset}_{args.ptb_type}_{args.ptb_rate}.csv") + row = [f"{args.dataset} ", f" {args.ptb_type} ", f" {args.ptb_rate} ", f" {acc}"] + + try: + file_exists = os.path.isfile(csv_filename) + with open(csv_filename, 'a', newline='') as csvfile: + writer = csv.writer(csvfile) + if not file_exists: + writer.writerow(["dataset ", "ptb_type ", "ptb_rate ", "accuracy"]) + writer.writerow(row) + except Exception as e: + print(f"[Error] Failed to write CSV: {e}") if __name__ == '__main__': main() diff --git a/examples/graph/cgscore_experiments/defense_method/ProGNN.py b/examples/graph/cgscore_experiments/defense_method/ProGNN.py index fb3fa50ffe8f21d427724f7352c075d0a56b7d1f..e6f3d141756d486911992b6ebc96b567e15e7ab3 100644 --- a/examples/graph/cgscore_experiments/defense_method/ProGNN.py +++ b/examples/graph/cgscore_experiments/defense_method/ProGNN.py @@ -5,12 +5,16 @@ from deeprobust.graph.defense import GCNJaccard, GCN, ProGNN from deeprobust.graph.utils import * from deeprobust.graph.data import Dataset, PrePtbDataset import argparse +import os +import csv parser = argparse.ArgumentParser() parser.add_argument('--seed', type=int, default=15, help='Random seed.') -parser.add_argument('--dataset', type=str, default='cora', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset') + +parser.add_argument('--dataset', type=str, default='pubmed', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed', 'Flickr'], help='dataset') parser.add_argument('--ptb_type', type=str, default='clean', choices=['clean', 'meta', 'dice', 'minmax', 'pgd', 'random'], help='attack type') parser.add_argument('--ptb_rate', type=float, default=0.0, help='pertubation rate') + parser.add_argument('--epochs', type=int, default=200, help='Number of epochs to train.') parser.add_argument('--hidden', type=int, default=16, help='Number of hidden units.') @@ -30,9 +34,11 @@ parser.add_argument('--outer_steps', type=int, default=1, help='steps for outer parser.add_argument('--lr_adj', type=float, default=0.01, help='lr for training adj') parser.add_argument('--symmetric', action='store_true', default=False, help='whether use symmetric matrix') +parser.add_argument('--gpu', type=int, default=6, help='GPU device ID (default: 0)') + args = parser.parse_args() args.cuda = torch.cuda.is_available() -device = torch.device("cuda:5" if args.cuda else "cpu") +device = torch.device(f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu") print('Using device:', device) # make sure you use the same data splits as you generated attacks @@ -70,11 +76,28 @@ def test_prognn(features, adj, labels): dropout=args.dropout, device=device).to(device) prognn = ProGNN(model, args, device) prognn.fit(features, adj, labels, idx_train, idx_val) - prognn.test(features, labels, idx_test) + acc = prognn.test(features, labels, idx_test) + return acc + def main(): + + acc = test_prognn(features, perturbed_adj, labels) + csv_dir = "../result" + os.makedirs(csv_dir, exist_ok=True) + + csv_filename = os.path.join(csv_dir, f"ProGNN_{args.dataset}_{args.ptb_type}_{args.ptb_rate}.csv") + row = [f"{args.dataset} ", f" {args.ptb_type} ", f" {args.ptb_rate} ", f" {acc}"] - test_prognn(features, perturbed_adj, labels) + try: + file_exists = os.path.isfile(csv_filename) + with open(csv_filename, 'a', newline='') as csvfile: + writer = csv.writer(csvfile) + if not file_exists: + writer.writerow(["dataset ", "ptb_type ", "ptb_rate ", "accuracy"]) + writer.writerow(row) + except Exception as e: + print(f"[Error] Failed to write CSV: {e}") if __name__ == '__main__': main() \ No newline at end of file diff --git a/examples/graph/cgscore_experiments/defense_method/RGCN.py b/examples/graph/cgscore_experiments/defense_method/RGCN.py index 5333121ccbc0b3209a38df0babcb9381997501eb..ed37de2d5c2d47f00f23037d6040aa64567b3c79 100644 --- a/examples/graph/cgscore_experiments/defense_method/RGCN.py +++ b/examples/graph/cgscore_experiments/defense_method/RGCN.py @@ -5,20 +5,23 @@ from deeprobust.graph.defense import RGCN from deeprobust.graph.utils import * from deeprobust.graph.data import Dataset, PrePtbDataset import argparse +import os +import csv parser = argparse.ArgumentParser() parser.add_argument('--seed', type=int, default=15, help='Random seed.') -parser.add_argument('--dataset', type=str, default='cora', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset') -parser.add_argument('--ptb_rate', type=float, default=0.0, help='pertubation rate') -parser.add_argument('--ptb_type', type=str, default='clean', choices=['clean', 'meta', 'dice', 'minmax', 'pgd', 'random'], help='attack type') +parser.add_argument('--dataset', type=str, default='Flickr', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed', 'Flickr'], help='dataset') +parser.add_argument('--ptb_rate', type=float, default=0.25, help='pertubation rate') +parser.add_argument('--ptb_type', type=str, default='minmax', choices=['clean', 'meta', 'dice', 'minmax', 'pgd', 'random'], help='attack type') parser.add_argument('--hidden', type=int, default=16, help='Number of hidden units.') parser.add_argument('--dropout', type=float, default=0.5, help='Dropout rate (1 - keep probability).') +parser.add_argument('--gpu', type=int, default=0, help='GPU device ID (default: 0)') args = parser.parse_args() args.cuda = torch.cuda.is_available() print('cuda: %s' % args.cuda) -device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") +device = torch.device(f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu") # make sure you use the same data splits as you generated attacks np.random.seed(args.seed) @@ -52,12 +55,29 @@ def test_rgcn(adj): gcn = gcn.to(device) gcn.fit(features, adj, labels, idx_train, idx_val, train_iters=200, verbose=True) gcn.eval() - gcn.test(idx_test) + acc = gcn.test(idx_test) + return acc def main(): - test_rgcn(perturbed_adj) + acc = test_rgcn(perturbed_adj) + + csv_dir = "../result" + os.makedirs(csv_dir, exist_ok=True) + + csv_filename = os.path.join(csv_dir, f"RGCN_{args.dataset}_{args.ptb_type}_{args.ptb_rate}.csv") + row = [f"{args.dataset} ", f" {args.ptb_type} ", f" {args.ptb_rate} ", f" {acc}"] + + try: + file_exists = os.path.isfile(csv_filename) + with open(csv_filename, 'a', newline='') as csvfile: + writer = csv.writer(csvfile) + if not file_exists: + writer.writerow(["dataset ", "ptb_type ", "ptb_rate ", "accuracy"]) + writer.writerow(row) + except Exception as e: + print(f"[Error] Failed to write CSV: {e}") # # if you want to save the modified adj/features, uncomment the code below # model.save_adj(root='./', name=f'mod_adj') # model.save_features(root='./', name='mod_features') diff --git a/examples/graph/cgscore_experiments/result/GAT_Flickr_clean_0.0.csv b/examples/graph/cgscore_experiments/result/GAT_Flickr_clean_0.0.csv new file mode 100644 index 0000000000000000000000000000000000000000..db2c5d7d703b4fdee10e0fe9835dca6706a814f8 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/GAT_Flickr_clean_0.0.csv @@ -0,0 +1,3 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +Flickr , clean , 0.0 , 0.4783828382838284 +Flickr , clean , 0.0 , 0.4641914191419142 diff --git a/examples/graph/cgscore_experiments/result/GAT_Flickr_dice_0.25.csv b/examples/graph/cgscore_experiments/result/GAT_Flickr_dice_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..0d75a10080600dabe8b11ca4160e6c7f7679cf8f --- /dev/null +++ b/examples/graph/cgscore_experiments/result/GAT_Flickr_dice_0.25.csv @@ -0,0 +1,3 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +Flickr , dice , 0.25 , 0.2811881188118812 +Flickr , dice , 0.25 , 0.27937293729372936 diff --git a/examples/graph/cgscore_experiments/result/GAT_Flickr_minmax_0.25.csv b/examples/graph/cgscore_experiments/result/GAT_Flickr_minmax_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..f879c23602f822ed6779ec06c9f7d30937d65f10 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/GAT_Flickr_minmax_0.25.csv @@ -0,0 +1,3 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +Flickr , minmax , 0.25 , 0.11204620462046205 +Flickr , minmax , 0.25 , 0.11716171617161716 diff --git a/examples/graph/cgscore_experiments/result/GAT_pubmed_clean_0.0.csv b/examples/graph/cgscore_experiments/result/GAT_pubmed_clean_0.0.csv new file mode 100644 index 0000000000000000000000000000000000000000..8bf963672c553b9e76b45ca92a726578552bc638 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/GAT_pubmed_clean_0.0.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +pubmed , clean , 0.0 , 0.8510206669202486 diff --git a/examples/graph/cgscore_experiments/result/GCN_Flickr_clean_0.0.csv b/examples/graph/cgscore_experiments/result/GCN_Flickr_clean_0.0.csv new file mode 100644 index 0000000000000000000000000000000000000000..ae82f0f2be5640ae0c755c154f0756fb395b9aeb --- /dev/null +++ b/examples/graph/cgscore_experiments/result/GCN_Flickr_clean_0.0.csv @@ -0,0 +1,3 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +Flickr , clean , 0.0 , 0.5414191419141914 +Flickr , clean , 0.0 , 0.5597359735973597 diff --git a/examples/graph/cgscore_experiments/result/GCN_Flickr_dice_0.25.csv b/examples/graph/cgscore_experiments/result/GCN_Flickr_dice_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..63d18227d8c8bc3f060b20143bb0d6283701107d --- /dev/null +++ b/examples/graph/cgscore_experiments/result/GCN_Flickr_dice_0.25.csv @@ -0,0 +1,6 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +Flickr , dice , 0.25 , 0.511056105610561 +Flickr , dice , 0.25 , 0.5151815181518152 +Flickr , dice , 0.25 , 0.5115511551155115 +Flickr , dice , 0.25 , 0.5171617161716172 +Flickr , dice , 0.25 , 0.5136963696369637 diff --git a/examples/graph/cgscore_experiments/result/GCN_Flickr_minmax_0.25.csv b/examples/graph/cgscore_experiments/result/GCN_Flickr_minmax_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..f6de933b0f06bfc5bdeef7b8af9af6a80336da03 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/GCN_Flickr_minmax_0.25.csv @@ -0,0 +1,4 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +Flickr , minmax , 0.25 , 0.116996699669967 +Flickr , minmax , 0.25 , 0.14570957095709572 +Flickr , minmax , 0.25 , 0.11831683168316831 diff --git a/examples/graph/cgscore_experiments/result/GCN_pubmed_minmax_0.25.csv b/examples/graph/cgscore_experiments/result/GCN_pubmed_minmax_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..c65253c8fc1ae17a32f77dceac19054b1145a601 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/GCN_pubmed_minmax_0.25.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +pubmed , minmax , 0.25 , 0.5567389374920756 diff --git a/examples/graph/cgscore_experiments/result/GNNGuard_Flickr_clean_0.0.csv b/examples/graph/cgscore_experiments/result/GNNGuard_Flickr_clean_0.0.csv new file mode 100644 index 0000000000000000000000000000000000000000..40c87b46a0aa93927d79425e4b3e60e708a49777 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/GNNGuard_Flickr_clean_0.0.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +Flickr , clean , 0.0 , 0.7541254125412541 diff --git a/examples/graph/cgscore_experiments/result/GNNGuard_Flickr_dice_0.25.csv b/examples/graph/cgscore_experiments/result/GNNGuard_Flickr_dice_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..29fef1efb26a563f3efbe6679ba407539b158fba --- /dev/null +++ b/examples/graph/cgscore_experiments/result/GNNGuard_Flickr_dice_0.25.csv @@ -0,0 +1,3 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +Flickr , dice , 0.25 , 0.7419141914191419 +Flickr , dice , 0.25 , 0.7422442244224422 diff --git a/examples/graph/cgscore_experiments/result/GNNGuard_citeseer_clean_0.0.csv b/examples/graph/cgscore_experiments/result/GNNGuard_citeseer_clean_0.0.csv new file mode 100644 index 0000000000000000000000000000000000000000..0e29f8492501f3e57ec575a279058dad5a5508d3 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/GNNGuard_citeseer_clean_0.0.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +citeseer , clean , 0.0 , 0.710308056872038 diff --git a/examples/graph/cgscore_experiments/result/GNNGuard_citeseer_meta_0.25.csv b/examples/graph/cgscore_experiments/result/GNNGuard_citeseer_meta_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..e8aa7ac6ef95f3483e34f236134144b16358e1f2 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/GNNGuard_citeseer_meta_0.25.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +citeseer , meta , 0.25 , 0.6475118483412323 diff --git a/examples/graph/cgscore_experiments/result/GNNGuard_cora_clean_0.0.csv b/examples/graph/cgscore_experiments/result/GNNGuard_cora_clean_0.0.csv new file mode 100644 index 0000000000000000000000000000000000000000..7869c71676daee3f3b7ca5117a3bef058430ca5e --- /dev/null +++ b/examples/graph/cgscore_experiments/result/GNNGuard_cora_clean_0.0.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +cora , clean , 0.0 , 0.7801810865191148 diff --git a/examples/graph/cgscore_experiments/result/GNNGuard_cora_dice_0.25.csv b/examples/graph/cgscore_experiments/result/GNNGuard_cora_dice_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..5fe41a6fbf7968cdac094abc3cf05b6f1277e26b --- /dev/null +++ b/examples/graph/cgscore_experiments/result/GNNGuard_cora_dice_0.25.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +cora , dice , 0.25 , 0.7550301810865192 diff --git a/examples/graph/cgscore_experiments/result/GNNGuard_cora_minmax_0.25.csv b/examples/graph/cgscore_experiments/result/GNNGuard_cora_minmax_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..6165c8f83061d54cbb9fbf2a21d76622ba408fcd --- /dev/null +++ b/examples/graph/cgscore_experiments/result/GNNGuard_cora_minmax_0.25.csv @@ -0,0 +1,3 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +cora , minmax , 0.25 , 0.7057344064386318 +cora , minmax , 0.25 , 0.7087525150905433 diff --git a/examples/graph/cgscore_experiments/result/GNNGuard_polblogs_meta_0.25.csv b/examples/graph/cgscore_experiments/result/GNNGuard_polblogs_meta_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..cd12dff244ba34e54869a1be902a0a35946a6d66 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/GNNGuard_polblogs_meta_0.25.csv @@ -0,0 +1,3 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +polblogs , meta , 0.25 , 0.4877300613496933 +polblogs , meta , 0.25 , 0.4877300613496933 diff --git a/examples/graph/cgscore_experiments/result/GNNGuard_polblogs_minmax_0.25.csv b/examples/graph/cgscore_experiments/result/GNNGuard_polblogs_minmax_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..13a586d9f06303d90805c57f4ab38e7cb95f2de8 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/GNNGuard_polblogs_minmax_0.25.csv @@ -0,0 +1,3 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +polblogs , minmax , 0.25 , 0.4877300613496933 +polblogs , minmax , 0.25 , 0.4877300613496933 diff --git a/examples/graph/cgscore_experiments/result/GNNGuard_pubmed_dice_0.25.csv b/examples/graph/cgscore_experiments/result/GNNGuard_pubmed_dice_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..b85fa947fce3bf503f43b2147ce2edb5b26c386d --- /dev/null +++ b/examples/graph/cgscore_experiments/result/GNNGuard_pubmed_dice_0.25.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +pubmed , dice , 0.25 , 0.8228730822873083 diff --git a/examples/graph/cgscore_experiments/result/Jaccard_Flickr_clean_0.0.csv b/examples/graph/cgscore_experiments/result/Jaccard_Flickr_clean_0.0.csv new file mode 100644 index 0000000000000000000000000000000000000000..3f05205f6a30d1f54923a0e7953c357eef6d69fe --- /dev/null +++ b/examples/graph/cgscore_experiments/result/Jaccard_Flickr_clean_0.0.csv @@ -0,0 +1,15 @@ +dataset ,ptb_type ,ptb_rate ,threshold ,accuracy +Flickr , clean , 0.0 , 0.05 , 0.7282178217821782 +Flickr , clean , 0.0 , 0.09 , 0.7422442244224422 +Flickr , clean , 0.0 , 0.2 , 0.7333333333333334 +Flickr , clean , 0.0 , 0.06 , 0.7366336633663366 +Flickr , clean , 0.0 , 0.1 , 0.7404290429042905 +Flickr , clean , 0.0 , 0.4 , 0.717986798679868 +Flickr , clean , 0.0 , 0.03 , 0.7156765676567657 +Flickr , clean , 0.0 , 0.08 , 0.7475247524752475 +Flickr , clean , 0.0 , 0.5 , 0.7358085808580859 +Flickr , clean , 0.0 , 0.04 , 0.7288778877887789 +Flickr , clean , 0.0 , 0.07 , 0.7410891089108911 +Flickr , clean , 0.0 , 0.02 , 0.6950495049504951 +Flickr , clean , 0.0 , 0.3 , 0.7242574257425742 +Flickr , clean , 0.0 , 0.01 , 0.6866336633663367 diff --git a/examples/graph/cgscore_experiments/result/Jaccard_Flickr_dice_0.25.csv b/examples/graph/cgscore_experiments/result/Jaccard_Flickr_dice_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..14716d7cf28be26c94244f42a9b6c5adb49f5e39 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/Jaccard_Flickr_dice_0.25.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,threshold ,accuracy +Flickr , dice , 0.25 , 0.1 , 0.7334983498349835 diff --git a/examples/graph/cgscore_experiments/result/Jaccard_Flickr_minmax_0.25.csv b/examples/graph/cgscore_experiments/result/Jaccard_Flickr_minmax_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..7686199d4a95125be80c1fb9dfe1e6189f145547 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/Jaccard_Flickr_minmax_0.25.csv @@ -0,0 +1,6 @@ +dataset ,ptb_type ,ptb_rate ,threshold ,accuracy +Flickr , minmax , 0.25 , 0.1 , 0.7336633663366336 +Flickr , minmax , 0.25 , 0.04 , 0.721947194719472 +Flickr , minmax , 0.25 , 0.5 , 0.7338283828382839 +Flickr , minmax , 0.25 , 0.01 , 0.6072607260726073 +Flickr , minmax , 0.25 , 0.06 , 0.7382838283828383 diff --git a/examples/graph/cgscore_experiments/result/Jaccard_citeseer_clean_0.0.csv b/examples/graph/cgscore_experiments/result/Jaccard_citeseer_clean_0.0.csv new file mode 100644 index 0000000000000000000000000000000000000000..4293a8a43246f796dcd8294165878330a2f12a16 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/Jaccard_citeseer_clean_0.0.csv @@ -0,0 +1,35 @@ +dataset ,ptb_type ,ptb_rate ,threshold ,accuracy +citeseer , clean , 0.0 , 0.07 , 0.7132701421800949 +citeseer , clean , 0.0 , 0.03 , 0.729265402843602 +citeseer , clean , 0.0 , 0.06 , 0.7109004739336493 +citeseer , clean , 0.0 , 0.05 , 0.7162322274881517 +citeseer , clean , 0.0 , 0.01 , 0.7209715639810427 +citeseer , clean , 0.0 , 0.08 , 0.7138625592417062 +citeseer , clean , 0.0 , 0.5 , 0.6255924170616114 +citeseer , clean , 0.0 , 0.4 , 0.6558056872037915 +citeseer , clean , 0.0 , 0.3 , 0.6463270142180095 +citeseer , clean , 0.0 , 0.3 , 0.6575829383886257 +citeseer , clean , 0.0 , 0.1 , 0.6990521327014219 +citeseer , clean , 0.0 , 0.03 , 0.7162322274881517 +citeseer , clean , 0.0 , 0.01 , 0.71978672985782 +citeseer , clean , 0.0 , 0.2 , 0.6652843601895735 +citeseer , clean , 0.0 , 0.08 , 0.7008293838862559 +citeseer , clean , 0.0 , 0.04 , 0.7221563981042655 +citeseer , clean , 0.0 , 0.5 , 0.6332938388625593 +citeseer , clean , 0.0 , 0.05 , 0.7097156398104266 +citeseer , clean , 0.0 , 0.06 , 0.7227488151658769 +citeseer , clean , 0.0 , 0.4 , 0.6712085308056872 +citeseer , clean , 0.0 , 0.05 , 0.7162322274881517 +citeseer , clean , 0.0 , 0.09 , 0.6978672985781991 +citeseer , clean , 0.0 , 0.07 , 0.715047393364929 +citeseer , clean , 0.0 , 0.3 , 0.6528436018957346 +citeseer , clean , 0.0 , 0.03 , 0.7257109004739337 +citeseer , clean , 0.0 , 0.02 , 0.7215639810426541 +citeseer , clean , 0.0 , 0.06 , 0.7209715639810427 +citeseer , clean , 0.0 , 0.2 , 0.6753554502369669 +citeseer , clean , 0.0 , 0.1 , 0.6937203791469195 +citeseer , clean , 0.0 , 0.08 , 0.7026066350710901 +citeseer , clean , 0.0 , 0.01 , 0.7298578199052134 +citeseer , clean , 0.0 , 0.4 , 0.6658767772511849 +citeseer , clean , 0.0 , 0.5 , 0.6380331753554502 +citeseer , clean , 0.0 , 0.04 , 0.7209715639810427 diff --git a/examples/graph/cgscore_experiments/result/Jaccard_citeseer_meta_0.25.csv b/examples/graph/cgscore_experiments/result/Jaccard_citeseer_meta_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..2794f60c3a5ca02a2aaea60a527305929be1a828 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/Jaccard_citeseer_meta_0.25.csv @@ -0,0 +1,15 @@ +dataset ,ptb_type ,ptb_rate ,threshold ,accuracy +citeseer , meta , 0.25 , 0.2 , 0.6362559241706162 +citeseer , meta , 0.25 , 0.3 , 0.625 +citeseer , meta , 0.25 , 0.1 , 0.7020142180094787 +citeseer , meta , 0.25 , 0.01 , 0.6107819905213271 +citeseer , meta , 0.25 , 0.5 , 0.658175355450237 +citeseer , meta , 0.25 , 0.06 , 0.6978672985781991 +citeseer , meta , 0.25 , 0.09 , 0.6978672985781991 +citeseer , meta , 0.25 , 0.08 , 0.6907582938388626 +citeseer , meta , 0.25 , 0.4 , 0.6611374407582938 +citeseer , meta , 0.25 , 0.03 , 0.6546208530805687 +citeseer , meta , 0.25 , 0.04 , 0.667654028436019 +citeseer , meta , 0.25 , 0.02 , 0.6433649289099527 +citeseer , meta , 0.25 , 0.05 , 0.6694312796208531 +citeseer , meta , 0.25 , 0.07 , 0.7014218009478673 diff --git a/examples/graph/cgscore_experiments/result/Jaccard_citeseer_minmax_0.25.csv b/examples/graph/cgscore_experiments/result/Jaccard_citeseer_minmax_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..d0f86822f5749f082c01a2e0e92dfef814672a49 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/Jaccard_citeseer_minmax_0.25.csv @@ -0,0 +1,15 @@ +dataset ,ptb_type ,ptb_rate ,threshold ,accuracy +citeseer , minmax , 0.25 , 0.05 , 0.6877962085308057 +citeseer , minmax , 0.25 , 0.02 , 0.6789099526066351 +citeseer , minmax , 0.25 , 0.08 , 0.7002369668246446 +citeseer , minmax , 0.25 , 0.09 , 0.6949052132701422 +citeseer , minmax , 0.25 , 0.2 , 0.6587677725118484 +citeseer , minmax , 0.25 , 0.04 , 0.6623222748815166 +citeseer , minmax , 0.25 , 0.03 , 0.6931279620853081 +citeseer , minmax , 0.25 , 0.3 , 0.648696682464455 +citeseer , minmax , 0.25 , 0.1 , 0.7120853080568721 +citeseer , minmax , 0.25 , 0.07 , 0.7014218009478673 +citeseer , minmax , 0.25 , 0.01 , 0.691350710900474 +citeseer , minmax , 0.25 , 0.5 , 0.6617298578199052 +citeseer , minmax , 0.25 , 0.06 , 0.6954976303317536 +citeseer , minmax , 0.25 , 0.4 , 0.6718009478672986 diff --git a/examples/graph/cgscore_experiments/result/Jaccard_citeseer_random_0.25.csv b/examples/graph/cgscore_experiments/result/Jaccard_citeseer_random_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..7fdd1a629cd5bc9d297b88d807d6844cc5eca96e --- /dev/null +++ b/examples/graph/cgscore_experiments/result/Jaccard_citeseer_random_0.25.csv @@ -0,0 +1,15 @@ +dataset ,ptb_type ,ptb_rate ,threshold ,accuracy +citeseer , random , 0.25 , 0.08 , 0.6907582938388626 +citeseer , random , 0.25 , 0.09 , 0.7120853080568721 +citeseer , random , 0.25 , 0.4 , 0.6516587677725119 +citeseer , random , 0.25 , 0.2 , 0.6777251184834123 +citeseer , random , 0.25 , 0.04 , 0.6978672985781991 +citeseer , random , 0.25 , 0.1 , 0.7008293838862559 +citeseer , random , 0.25 , 0.07 , 0.7138625592417062 +citeseer , random , 0.25 , 0.5 , 0.6433649289099527 +citeseer , random , 0.25 , 0.01 , 0.6836492890995262 +citeseer , random , 0.25 , 0.3 , 0.6516587677725119 +citeseer , random , 0.25 , 0.06 , 0.7209715639810427 +citeseer , random , 0.25 , 0.05 , 0.705568720379147 +citeseer , random , 0.25 , 0.02 , 0.7079383886255924 +citeseer , random , 0.25 , 0.03 , 0.7114928909952607 diff --git a/examples/graph/cgscore_experiments/result/Jaccard_cora_minmax_0.25.csv b/examples/graph/cgscore_experiments/result/Jaccard_cora_minmax_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..c103b9a8199b03a9490427a3da3d4af15a5e4a21 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/Jaccard_cora_minmax_0.25.csv @@ -0,0 +1,15 @@ +dataset ,ptb_type ,ptb_rate ,threshold ,accuracy +cora , minmax , 0.25 , 0.5 , 0.6091549295774649 +cora , minmax , 0.25 , 0.4 , 0.6307847082494971 +cora , minmax , 0.25 , 0.05 , 0.6830985915492959 +cora , minmax , 0.25 , 0.02 , 0.6584507042253521 +cora , minmax , 0.25 , 0.1 , 0.7087525150905433 +cora , minmax , 0.25 , 0.09 , 0.7263581488933603 +cora , minmax , 0.25 , 0.2 , 0.6498993963782697 +cora , minmax , 0.25 , 0.01 , 0.6609657947686117 +cora , minmax , 0.25 , 0.06 , 0.7223340040241449 +cora , minmax , 0.25 , 0.07 , 0.7067404426559357 +cora , minmax , 0.25 , 0.03 , 0.7188128772635816 +cora , minmax , 0.25 , 0.3 , 0.6453722334004025 +cora , minmax , 0.25 , 0.04 , 0.6770623742454729 +cora , minmax , 0.25 , 0.08 , 0.7188128772635816 diff --git a/examples/graph/cgscore_experiments/result/Jaccard_cora_random_0.25.csv b/examples/graph/cgscore_experiments/result/Jaccard_cora_random_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..da686eb42a873e089e35cfba5f1eda480e7e462a --- /dev/null +++ b/examples/graph/cgscore_experiments/result/Jaccard_cora_random_0.25.csv @@ -0,0 +1,15 @@ +dataset ,ptb_type ,ptb_rate ,threshold ,accuracy +cora , random , 0.25 , 0.02 , 0.7711267605633804 +cora , random , 0.25 , 0.03 , 0.7570422535211269 +cora , random , 0.25 , 0.2 , 0.6448692152917506 +cora , random , 0.25 , 0.08 , 0.7198189134808853 +cora , random , 0.25 , 0.5 , 0.6066398390342053 +cora , random , 0.25 , 0.09 , 0.7268611670020121 +cora , random , 0.25 , 0.1 , 0.6976861167002013 +cora , random , 0.25 , 0.4 , 0.6242454728370221 +cora , random , 0.25 , 0.01 , 0.7711267605633804 +cora , random , 0.25 , 0.05 , 0.7525150905432596 +cora , random , 0.25 , 0.04 , 0.7580482897384306 +cora , random , 0.25 , 0.06 , 0.7535211267605635 +cora , random , 0.25 , 0.3 , 0.6343058350100604 +cora , random , 0.25 , 0.07 , 0.7263581488933603 diff --git a/examples/graph/cgscore_experiments/result/Jaccard_polblogs_clean_0.0.csv b/examples/graph/cgscore_experiments/result/Jaccard_polblogs_clean_0.0.csv new file mode 100644 index 0000000000000000000000000000000000000000..980db50622aa98ecb3d604d86a784129cd85be31 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/Jaccard_polblogs_clean_0.0.csv @@ -0,0 +1,15 @@ +dataset ,ptb_type ,ptb_rate ,threshold ,accuracy +polblogs , clean , 0.0 , 0.2 , 0.5132924335378324 +polblogs , clean , 0.0 , 0.3 , 0.5173824130879346 +polblogs , clean , 0.0 , 0.06 , 0.5071574642126789 +polblogs , clean , 0.0 , 0.07 , 0.5204498977505113 +polblogs , clean , 0.0 , 0.08 , 0.5071574642126789 +polblogs , clean , 0.0 , 0.04 , 0.5030674846625768 +polblogs , clean , 0.0 , 0.09 , 0.5173824130879346 +polblogs , clean , 0.0 , 0.5 , 0.5051124744376279 +polblogs , clean , 0.0 , 0.05 , 0.5204498977505113 +polblogs , clean , 0.0 , 0.03 , 0.5286298568507158 +polblogs , clean , 0.0 , 0.01 , 0.49386503067484666 +polblogs , clean , 0.0 , 0.4 , 0.5265848670756647 +polblogs , clean , 0.0 , 0.02 , 0.5173824130879346 +polblogs , clean , 0.0 , 0.1 , 0.5040899795501023 diff --git a/examples/graph/cgscore_experiments/result/Jaccard_polblogs_dice_0.25.csv b/examples/graph/cgscore_experiments/result/Jaccard_polblogs_dice_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..d315a7a1b88406b0f72975f8e1fe5b62a798db0d --- /dev/null +++ b/examples/graph/cgscore_experiments/result/Jaccard_polblogs_dice_0.25.csv @@ -0,0 +1,15 @@ +dataset ,ptb_type ,ptb_rate ,threshold ,accuracy +polblogs , dice , 0.25 , 0.09 , 0.5255623721881391 +polblogs , dice , 0.25 , 0.4 , 0.5132924335378324 +polblogs , dice , 0.25 , 0.08 , 0.5204498977505113 +polblogs , dice , 0.25 , 0.05 , 0.4795501022494888 +polblogs , dice , 0.25 , 0.1 , 0.4795501022494888 +polblogs , dice , 0.25 , 0.5 , 0.5204498977505113 +polblogs , dice , 0.25 , 0.07 , 0.5245398773006136 +polblogs , dice , 0.25 , 0.2 , 0.5061349693251534 +polblogs , dice , 0.25 , 0.03 , 0.5102249488752557 +polblogs , dice , 0.25 , 0.06 , 0.5173824130879346 +polblogs , dice , 0.25 , 0.01 , 0.5173824130879346 +polblogs , dice , 0.25 , 0.02 , 0.5081799591002045 +polblogs , dice , 0.25 , 0.3 , 0.5204498977505113 +polblogs , dice , 0.25 , 0.04 , 0.514314928425358 diff --git a/examples/graph/cgscore_experiments/result/Jaccard_polblogs_meta_0.25.csv b/examples/graph/cgscore_experiments/result/Jaccard_polblogs_meta_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..0e194f31eb5ebe1534e72c80dea655452b01d012 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/Jaccard_polblogs_meta_0.25.csv @@ -0,0 +1,15 @@ +dataset ,ptb_type ,ptb_rate ,threshold ,accuracy +polblogs , meta , 0.25 , 0.2 , 0.5112474437627812 +polblogs , meta , 0.25 , 0.1 , 0.5051124744376279 +polblogs , meta , 0.25 , 0.01 , 0.49897750511247446 +polblogs , meta , 0.25 , 0.06 , 0.48261758691206547 +polblogs , meta , 0.25 , 0.5 , 0.5214723926380369 +polblogs , meta , 0.25 , 0.4 , 0.49897750511247446 +polblogs , meta , 0.25 , 0.07 , 0.5204498977505113 +polblogs , meta , 0.25 , 0.09 , 0.47137014314928427 +polblogs , meta , 0.25 , 0.03 , 0.49079754601227 +polblogs , meta , 0.25 , 0.08 , 0.523517382413088 +polblogs , meta , 0.25 , 0.04 , 0.5092024539877301 +polblogs , meta , 0.25 , 0.3 , 0.5204498977505113 +polblogs , meta , 0.25 , 0.05 , 0.5184049079754601 +polblogs , meta , 0.25 , 0.02 , 0.4979550102249489 diff --git a/examples/graph/cgscore_experiments/result/Jaccard_pubmed_minmax_0.25.csv b/examples/graph/cgscore_experiments/result/Jaccard_pubmed_minmax_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..245499cddb33e3af46291b275d0c86c275e95955 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/Jaccard_pubmed_minmax_0.25.csv @@ -0,0 +1,15 @@ +dataset ,ptb_type ,ptb_rate ,threshold ,accuracy +pubmed , minmax , 0.25 , 0.05 , 0.597629009762901 +pubmed , minmax , 0.25 , 0.03 , 0.5685304932166857 +pubmed , minmax , 0.25 , 0.3 , 0.8451248890579436 +pubmed , minmax , 0.25 , 0.1 , 0.7615062761506277 +pubmed , minmax , 0.25 , 0.01 , 0.5611132242931407 +pubmed , minmax , 0.25 , 0.09 , 0.7393812603017624 +pubmed , minmax , 0.25 , 0.04 , 0.5867249904906809 +pubmed , minmax , 0.25 , 0.2 , 0.8496893622416636 +pubmed , minmax , 0.25 , 0.07 , 0.6496766831494866 +pubmed , minmax , 0.25 , 0.5 , 0.8383415747432484 +pubmed , minmax , 0.25 , 0.4 , 0.8423988842398885 +pubmed , minmax , 0.25 , 0.02 , 0.5549004691264106 +pubmed , minmax , 0.25 , 0.06 , 0.6208951439076963 +pubmed , minmax , 0.25 , 0.08 , 0.685495118549512 diff --git a/examples/graph/cgscore_experiments/result/ProGNN_Flickr_clean_0.0.csv b/examples/graph/cgscore_experiments/result/ProGNN_Flickr_clean_0.0.csv new file mode 100644 index 0000000000000000000000000000000000000000..9b70e5db43611f7abc12cd422c4fe75531b0a560 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/ProGNN_Flickr_clean_0.0.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +Flickr , clean , 0.0 , 0.44686468646864685 diff --git a/examples/graph/cgscore_experiments/result/ProGNN_Flickr_dice_0.25.csv b/examples/graph/cgscore_experiments/result/ProGNN_Flickr_dice_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..d030e3ee9fc86e1811af1dcdf80dcbbc7382e473 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/ProGNN_Flickr_dice_0.25.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +Flickr , dice , 0.25 , 0.7123762376237623 diff --git a/examples/graph/cgscore_experiments/result/ProGNN_Flickr_minmax_0.25.csv b/examples/graph/cgscore_experiments/result/ProGNN_Flickr_minmax_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..f3e74bda1c2d64280459d50e68a68d1e461e3ad1 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/ProGNN_Flickr_minmax_0.25.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +Flickr , minmax , 0.25 , 0.19686468646864685 diff --git a/examples/graph/cgscore_experiments/result/ProGNN_citeseer_dice_0.25.csv b/examples/graph/cgscore_experiments/result/ProGNN_citeseer_dice_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..2ab48ea4954fdcf01ba0b5226eb9fb6b688ccbbd --- /dev/null +++ b/examples/graph/cgscore_experiments/result/ProGNN_citeseer_dice_0.25.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +citeseer , dice , 0.25 , 0.7215639810426541 diff --git a/examples/graph/cgscore_experiments/result/ProGNN_citeseer_minmax_0.25.csv b/examples/graph/cgscore_experiments/result/ProGNN_citeseer_minmax_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..9d84f299b1e1bb2961ebdf115a7d3b01e27c9fc1 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/ProGNN_citeseer_minmax_0.25.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +citeseer , minmax , 0.25 , 0.6990521327014219 diff --git a/examples/graph/cgscore_experiments/result/ProGNN_cora_minmax_0.25.csv b/examples/graph/cgscore_experiments/result/ProGNN_cora_minmax_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..ec5b3435ce2893083f155afea439e7f9385bc4bf --- /dev/null +++ b/examples/graph/cgscore_experiments/result/ProGNN_cora_minmax_0.25.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +cora , minmax , 0.25 , 0.6443661971830986 diff --git a/examples/graph/cgscore_experiments/result/ProGNN_polblogs_dice_0.25.csv b/examples/graph/cgscore_experiments/result/ProGNN_polblogs_dice_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..f697413f8f769ea4b01c2f02c7bdea243989019c --- /dev/null +++ b/examples/graph/cgscore_experiments/result/ProGNN_polblogs_dice_0.25.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +polblogs , dice , 0.25 , 0.7474437627811862 diff --git a/examples/graph/cgscore_experiments/result/ProGNN_polblogs_minmax_0.25.csv b/examples/graph/cgscore_experiments/result/ProGNN_polblogs_minmax_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..25264ddb5b41c224cfb87066ec1e0fca958d4ce3 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/ProGNN_polblogs_minmax_0.25.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +polblogs , minmax , 0.25 , 0.6073619631901841 diff --git a/examples/graph/cgscore_experiments/result/RGCN_Flickr_minmax_0.25.csv b/examples/graph/cgscore_experiments/result/RGCN_Flickr_minmax_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..88de98d9d78d4abfcf5c54e178dbfc61e6d2a13e --- /dev/null +++ b/examples/graph/cgscore_experiments/result/RGCN_Flickr_minmax_0.25.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +Flickr , minmax , 0.25 , 0.0712871287128713 diff --git a/examples/graph/cgscore_experiments/result/RGCN_citeseer_clean_0.0.csv b/examples/graph/cgscore_experiments/result/RGCN_citeseer_clean_0.0.csv new file mode 100644 index 0000000000000000000000000000000000000000..0694d6156b0de749d6a7fb29aae465084de56647 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/RGCN_citeseer_clean_0.0.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +citeseer , clean , 0.0 , 0.71978672985782 diff --git a/examples/graph/cgscore_experiments/result/RGCN_citeseer_meta_0.25.csv b/examples/graph/cgscore_experiments/result/RGCN_citeseer_meta_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..e93a326707447f2f5fd55a73d44993c80e807dfa --- /dev/null +++ b/examples/graph/cgscore_experiments/result/RGCN_citeseer_meta_0.25.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +citeseer , meta , 0.25 , 0.5681279620853081 diff --git a/examples/graph/cgscore_experiments/result/RGCN_cora_clean_0.0.csv b/examples/graph/cgscore_experiments/result/RGCN_cora_clean_0.0.csv new file mode 100644 index 0000000000000000000000000000000000000000..c90ac53a926675a16c5dd6131abb1af0ded10a66 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/RGCN_cora_clean_0.0.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +cora , clean , 0.0 , 0.8289738430583502 diff --git a/examples/graph/cgscore_experiments/result/RGCN_cora_minmax_0.25.csv b/examples/graph/cgscore_experiments/result/RGCN_cora_minmax_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..4299307a28fcd19213fdc1cd01db78e01c3aaab4 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/RGCN_cora_minmax_0.25.csv @@ -0,0 +1,3 @@ +cora , minmax , 0.25 , 0.5870221327967807 +cora , minmax , 0.25 , 0.5769617706237425 +cora , minmax , 0.25 , 0.5980885311871228 diff --git a/examples/graph/cgscore_experiments/result/RGCN_polblogs_clean_0.0.csv b/examples/graph/cgscore_experiments/result/RGCN_polblogs_clean_0.0.csv new file mode 100644 index 0000000000000000000000000000000000000000..700057a77597d60034e1ddb3879d5df37a60ace7 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/RGCN_polblogs_clean_0.0.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +polblogs , clean , 0.0 , 0.9529652351738243 diff --git a/examples/graph/cgscore_experiments/result/RGCN_polblogs_meta_0.25.csv b/examples/graph/cgscore_experiments/result/RGCN_polblogs_meta_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..422ff6ca3629a4966d5d31d1e894d4ca5127d42e --- /dev/null +++ b/examples/graph/cgscore_experiments/result/RGCN_polblogs_meta_0.25.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +polblogs , meta , 0.25 , 0.6267893660531698 diff --git a/examples/graph/cgscore_experiments/result/RGCN_polblogs_minmax_0.25.csv b/examples/graph/cgscore_experiments/result/RGCN_polblogs_minmax_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..d44c9381d18b95461c3f1a0e76e2900fa1d05f5d --- /dev/null +++ b/examples/graph/cgscore_experiments/result/RGCN_polblogs_minmax_0.25.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +polblogs , minmax , 0.25 , 0.8210633946830267 diff --git a/examples/graph/cgscore_experiments/result/RGCN_pubmed_clean_0.0.csv b/examples/graph/cgscore_experiments/result/RGCN_pubmed_clean_0.0.csv new file mode 100644 index 0000000000000000000000000000000000000000..fe095d7fe692bda5ab30c9298d9aefb83eed0efc --- /dev/null +++ b/examples/graph/cgscore_experiments/result/RGCN_pubmed_clean_0.0.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +pubmed , clean , 0.0 , 0.8472169392671486 diff --git a/examples/graph/cgscore_experiments/result/RGCN_pubmed_dice_0.25.csv b/examples/graph/cgscore_experiments/result/RGCN_pubmed_dice_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..8e5c727a6cff5ee129751433db3dae1884ee6bbc --- /dev/null +++ b/examples/graph/cgscore_experiments/result/RGCN_pubmed_dice_0.25.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +pubmed , dice , 0.25 , 0.8144414859896032 diff --git a/examples/graph/cgscore_experiments/result/RGCN_pubmed_minmax_0.25.csv b/examples/graph/cgscore_experiments/result/RGCN_pubmed_minmax_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..7ab8bbc9a691bcb470b2d97fa0508e0365892a47 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/RGCN_pubmed_minmax_0.25.csv @@ -0,0 +1,2 @@ +dataset ,ptb_type ,ptb_rate ,accuracy +pubmed , minmax , 0.25 , 0.5464054773678205 diff --git a/examples/graph/cgscore_experiments/result/SVD_Flickr_minmax_0.25.csv b/examples/graph/cgscore_experiments/result/SVD_Flickr_minmax_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..489edc52c856bf72ca8155a5d500f063165e5a5c --- /dev/null +++ b/examples/graph/cgscore_experiments/result/SVD_Flickr_minmax_0.25.csv @@ -0,0 +1,15 @@ +dataset ,ptb_type ,ptb_rate ,svd_k accuracy +Flickr , minmax , 0.25 , 40 , 0.2613861386138614 +Flickr , minmax , 0.25 , 50 , 0.2514851485148515 +Flickr , minmax , 0.25 , 70 , 0.2518151815181518 +Flickr , minmax , 0.25 , 65 , 0.24554455445544554 +Flickr , minmax , 0.25 , 45 , 0.25214521452145217 +Flickr , minmax , 0.25 , 60 , 0.24966996699669966 +Flickr , minmax , 0.25 , 55 , 0.25775577557755774 +Flickr , minmax , 0.25 , 25 , 0.3415841584158416 +Flickr , minmax , 0.25 , 15 , 0.3917491749174917 +Flickr , minmax , 0.25 , 5 , 0.26765676567656765 +Flickr , minmax , 0.25 , 35 , 0.30033003300330036 +Flickr , minmax , 0.25 , 10 , 0.3338283828382838 +Flickr , minmax , 0.25 , 30 , 0.2886138613861386 +Flickr , minmax , 0.25 , 20 , 0.36633663366336633 diff --git a/examples/graph/cgscore_experiments/result/SVD_citeseer_clean_0.0.csv b/examples/graph/cgscore_experiments/result/SVD_citeseer_clean_0.0.csv new file mode 100644 index 0000000000000000000000000000000000000000..6cbfdaec56b2f3392420fa1deebddcf4afc5dd7d --- /dev/null +++ b/examples/graph/cgscore_experiments/result/SVD_citeseer_clean_0.0.csv @@ -0,0 +1,15 @@ +dataset ,ptb_type ,ptb_rate ,svd_k accuracy +citeseer , clean , 0.0 , 50 , 0.6937203791469195 +citeseer , clean , 0.0 , 15 , 0.6712085308056872 +citeseer , clean , 0.0 , 20 , 0.6735781990521328 +citeseer , clean , 0.0 , 25 , 0.696090047393365 +citeseer , clean , 0.0 , 35 , 0.691350710900474 +citeseer , clean , 0.0 , 70 , 0.6860189573459716 +citeseer , clean , 0.0 , 30 , 0.6966824644549764 +citeseer , clean , 0.0 , 55 , 0.686611374407583 +citeseer , clean , 0.0 , 45 , 0.6759478672985783 +citeseer , clean , 0.0 , 5 , 0.6712085308056872 +citeseer , clean , 0.0 , 10 , 0.6759478672985783 +citeseer , clean , 0.0 , 60 , 0.6860189573459716 +citeseer , clean , 0.0 , 40 , 0.6783175355450237 +citeseer , clean , 0.0 , 65 , 0.6854265402843602 diff --git a/examples/graph/cgscore_experiments/result/SVD_cora_clean_0.0.csv b/examples/graph/cgscore_experiments/result/SVD_cora_clean_0.0.csv new file mode 100644 index 0000000000000000000000000000000000000000..08e3f04f7f36d3c4ac9189a5c900736599d8ed14 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/SVD_cora_clean_0.0.csv @@ -0,0 +1,13 @@ +dataset ,ptb_type ,ptb_rate ,svd_k accuracy +cora , clean , 0.0 , 35 , 0.7771629778672032 +cora , clean , 0.0 , 50 , 0.7776659959758552 +cora , clean , 0.0 , 10 , 0.699195171026157 +cora , clean , 0.0 , 20 , 0.738933601609658 +cora , clean , 0.0 , 55 , 0.7751509054325957 +cora , clean , 0.0 , 5 , 0.6851106639839035 +cora , clean , 0.0 , 45 , 0.767102615694165 +cora , clean , 0.0 , 40 , 0.7816901408450705 +cora , clean , 0.0 , 15 , 0.7137826961770625 +cora , clean , 0.0 , 60 , 0.7711267605633804 +cora , clean , 0.0 , 25 , 0.7474849094567405 +cora , clean , 0.0 , 30 , 0.7505030181086519 diff --git a/examples/graph/cgscore_experiments/result/SVD_cora_meta_0.25.csv b/examples/graph/cgscore_experiments/result/SVD_cora_meta_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..f0fe012d796b757aac32d82963e5cf4e33d17619 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/SVD_cora_meta_0.25.csv @@ -0,0 +1,13 @@ +dataset ,ptb_type ,ptb_rate ,svd_k accuracy +cora , meta , 0.25 , 5 , 0.6111670020120725 +cora , meta , 0.25 , 10 , 0.579476861167002 +cora , meta , 0.25 , 20 , 0.5674044265593562 +cora , meta , 0.25 , 55 , 0.5674044265593562 +cora , meta , 0.25 , 15 , 0.619215291750503 +cora , meta , 0.25 , 40 , 0.5311871227364185 +cora , meta , 0.25 , 50 , 0.5477867203219317 +cora , meta , 0.25 , 35 , 0.5347082494969819 +cora , meta , 0.25 , 25 , 0.591046277665996 +cora , meta , 0.25 , 30 , 0.5774647887323944 +cora , meta , 0.25 , 45 , 0.5372233400402415 +cora , meta , 0.25 , 60 , 0.5789738430583502 diff --git a/examples/graph/cgscore_experiments/result/SVD_cora_minmax_0.25.csv b/examples/graph/cgscore_experiments/result/SVD_cora_minmax_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..b1ef70decac7e2df6c00b4e33f9b159d46908fa1 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/SVD_cora_minmax_0.25.csv @@ -0,0 +1,13 @@ +dataset ,ptb_type ,ptb_rate ,svd_k accuracy +cora , minmax , 0.25 , 10 , 0.5648893360160966 +cora , minmax , 0.25 , 60 , 0.5653923541247485 +cora , minmax , 0.25 , 50 , 0.5824949698189136 +cora , minmax , 0.25 , 25 , 0.5940643863179075 +cora , minmax , 0.25 , 15 , 0.591549295774648 +cora , minmax , 0.25 , 20 , 0.5845070422535211 +cora , minmax , 0.25 , 30 , 0.5648893360160966 +cora , minmax , 0.25 , 45 , 0.5875251509054327 +cora , minmax , 0.25 , 35 , 0.5945674044265594 +cora , minmax , 0.25 , 55 , 0.5804828973843059 +cora , minmax , 0.25 , 60 , 0.5653923541247485 +cora , minmax , 0.25 , 40 , 0.5824949698189136 diff --git a/examples/graph/cgscore_experiments/result/SVD_polblogs_clean_0.0.csv b/examples/graph/cgscore_experiments/result/SVD_polblogs_clean_0.0.csv new file mode 100644 index 0000000000000000000000000000000000000000..c1bdda451b5c676d7eb8ac97f67c1d50c0855d00 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/SVD_polblogs_clean_0.0.csv @@ -0,0 +1,15 @@ +dataset ,ptb_type ,ptb_rate ,svd_k accuracy +polblogs , clean , 0.0 , 30 , 0.9437627811860941 +polblogs , clean , 0.0 , 60 , 0.9447852760736197 +polblogs , clean , 0.0 , 20 , 0.9447852760736197 +polblogs , clean , 0.0 , 65 , 0.9447852760736197 +polblogs , clean , 0.0 , 15 , 0.948875255623722 +polblogs , clean , 0.0 , 5 , 0.9171779141104295 +polblogs , clean , 0.0 , 25 , 0.9406952965235175 +polblogs , clean , 0.0 , 50 , 0.9468302658486708 +polblogs , clean , 0.0 , 40 , 0.9447852760736197 +polblogs , clean , 0.0 , 55 , 0.9417177914110431 +polblogs , clean , 0.0 , 45 , 0.9427402862985685 +polblogs , clean , 0.0 , 10 , 0.9417177914110431 +polblogs , clean , 0.0 , 70 , 0.9447852760736197 +polblogs , clean , 0.0 , 35 , 0.9417177914110431 diff --git a/examples/graph/cgscore_experiments/result/SVD_polblogs_meta_0.25.csv b/examples/graph/cgscore_experiments/result/SVD_polblogs_meta_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..908f4db711cce8db15dd3b622bf0fac4caae79bd --- /dev/null +++ b/examples/graph/cgscore_experiments/result/SVD_polblogs_meta_0.25.csv @@ -0,0 +1,15 @@ +dataset ,ptb_type ,ptb_rate ,svd_k accuracy +polblogs , meta , 0.25 , 10 , 0.6421267893660533 +polblogs , meta , 0.25 , 50 , 0.6339468302658487 +polblogs , meta , 0.25 , 40 , 0.6411042944785277 +polblogs , meta , 0.25 , 25 , 0.6339468302658487 +polblogs , meta , 0.25 , 55 , 0.6308793456032721 +polblogs , meta , 0.25 , 15 , 0.6247443762781186 +polblogs , meta , 0.25 , 65 , 0.638036809815951 +polblogs , meta , 0.25 , 5 , 0.8128834355828222 +polblogs , meta , 0.25 , 35 , 0.6451942740286299 +polblogs , meta , 0.25 , 45 , 0.6431492842535788 +polblogs , meta , 0.25 , 60 , 0.6359918200408998 +polblogs , meta , 0.25 , 30 , 0.6319018404907976 +polblogs , meta , 0.25 , 70 , 0.6370143149284254 +polblogs , meta , 0.25 , 20 , 0.6308793456032721 diff --git a/examples/graph/cgscore_experiments/result/SVD_pubmed_clean_0.0.csv b/examples/graph/cgscore_experiments/result/SVD_pubmed_clean_0.0.csv new file mode 100644 index 0000000000000000000000000000000000000000..194f53f5828375730726e6a9c45a06195f9bb6be --- /dev/null +++ b/examples/graph/cgscore_experiments/result/SVD_pubmed_clean_0.0.csv @@ -0,0 +1,9 @@ +dataset ,ptb_type ,ptb_rate ,svd_k accuracy +pubmed , clean , 0.0 , 50 , 0.8389121338912134 +pubmed , clean , 0.0 , 10 , 0.8390389248129835 +pubmed , clean , 0.0 , 20 , 0.8418917205528085 +pubmed , clean , 0.0 , 15 , 0.8381513883605934 +pubmed , clean , 0.0 , 25 , 0.8413845568657286 +pubmed , clean , 0.0 , 55 , 0.2081272980854571 +pubmed , clean , 0.0 , 30 , 0.8453784709014835 +pubmed , clean , 0.0 , 35 , 0.8390389248129835 diff --git a/examples/graph/cgscore_experiments/result/SVD_pubmed_dice_0.25.csv b/examples/graph/cgscore_experiments/result/SVD_pubmed_dice_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..3da9d6f52284de48509a2164f45f30459097bb40 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/SVD_pubmed_dice_0.25.csv @@ -0,0 +1,13 @@ +dataset ,ptb_type ,ptb_rate ,svd_k accuracy +pubmed , dice , 0.25 , 10 , 0.2081272980854571 +pubmed , dice , 0.25 , 20 , 0.2081272980854571 +pubmed , dice , 0.25 , 15 , 0.2081272980854571 +pubmed , dice , 0.25 , 5 , 0.2081272980854571 +pubmed , dice , 0.25 , 25 , 0.2081272980854571 +pubmed , dice , 0.25 , 30 , 0.2081272980854571 +pubmed , dice , 0.25 , 35 , 0.2081272980854571 +pubmed , dice , 0.25 , 40 , 0.2081272980854571 +pubmed , dice , 0.25 , 60 , 0.2081272980854571 +pubmed , dice , 0.25 , 50 , 0.2081272980854571 +pubmed , dice , 0.25 , 55 , 0.2081272980854571 +pubmed , dice , 0.25 , 45 , 0.2081272980854571 diff --git a/examples/graph/cgscore_experiments/result/SVD_pubmed_minmax_0.25.csv b/examples/graph/cgscore_experiments/result/SVD_pubmed_minmax_0.25.csv new file mode 100644 index 0000000000000000000000000000000000000000..9d040f9240b5b38a00bd6c6476ef1a4e79091a27 --- /dev/null +++ b/examples/graph/cgscore_experiments/result/SVD_pubmed_minmax_0.25.csv @@ -0,0 +1,14 @@ +dataset ,ptb_type ,ptb_rate ,svd_k accuracy +pubmed , minmax , 0.25 , 5 , 0.796120197793838 +pubmed , minmax , 0.25 , 50 , 0.8014454165081781 +pubmed , minmax , 0.25 , 10 , 0.8003676936731331 +pubmed , minmax , 0.25 , 15 , 0.8016356028908331 +pubmed , minmax , 0.25 , 20 , 0.8039178394826931 +pubmed , minmax , 0.25 , 25 , 0.8027133257258781 +pubmed , minmax , 0.25 , 30 , 0.8050589577786231 +pubmed , minmax , 0.25 , 40 , 0.7996703436033981 +pubmed , minmax , 0.25 , 35 , 0.8030936984911882 +pubmed , minmax , 0.25 , 45 , 0.7978952706986181 +pubmed , minmax , 0.25 , 50 , 0.8007480664384431 +pubmed , minmax , 0.25 , 60 , 0.2081272980854571 +pubmed , minmax , 0.25 , 55 , 0.2081272980854571 diff --git a/examples/graph/cgscore_experiments/runsh/gnnguard.txt b/examples/graph/cgscore_experiments/runsh/gnnguard.txt new file mode 100644 index 0000000000000000000000000000000000000000..c09e208f92a9f74a5bf1f182386b255904772ef5 --- /dev/null +++ b/examples/graph/cgscore_experiments/runsh/gnnguard.txt @@ -0,0 +1,80 @@ +cuda: True +Loading flickr dataset... +Traceback (most recent call last): + File "/home/yiren/new_ssd2/chunhui/yaning/project/cgscore/examples/graph/cgscore_experiments/runsh/../defense_method/GNNGuard.py", line 38, in + perturbed_adj = torch.load(ptb_path) + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 1025, in load + return _load(opened_zipfile, + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 1446, in _load + result = unpickler.load() + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 1416, in persistent_load + typed_storage = load_tensor(dtype, nbytes, key, _maybe_decode_ascii(location)) + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 1390, in load_tensor + wrap_storage=restore_location(storage, location), + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 390, in default_restore_location + result = fn(storage, location) + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 270, in _cuda_deserialize + return obj.cuda(device) + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/_utils.py", line 114, in _cuda + untyped_storage = torch.UntypedStorage( +torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 220.00 MiB. GPU  has a total capacity of 47.41 GiB of which 79.38 MiB is free. Process 460136 has 47.04 GiB memory in use. Including non-PyTorch memory, this process has 260.00 MiB memory in use. Of the allocated memory 0 bytes is allocated by PyTorch, and 0 bytes is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +cuda: True +Loading flickr dataset... +Traceback (most recent call last): + File "/home/yiren/new_ssd2/chunhui/yaning/project/cgscore/examples/graph/cgscore_experiments/runsh/../defense_method/GNNGuard.py", line 38, in + perturbed_adj = torch.load(ptb_path) + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 1025, in load + return _load(opened_zipfile, + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 1446, in _load + result = unpickler.load() + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 1416, in persistent_load + typed_storage = load_tensor(dtype, nbytes, key, _maybe_decode_ascii(location)) + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 1390, in load_tensor + wrap_storage=restore_location(storage, location), + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 390, in default_restore_location + result = fn(storage, location) + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 270, in _cuda_deserialize + return obj.cuda(device) + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/_utils.py", line 114, in _cuda + untyped_storage = torch.UntypedStorage( +torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 220.00 MiB. GPU  has a total capacity of 47.41 GiB of which 79.38 MiB is free. Process 460136 has 47.04 GiB memory in use. Including non-PyTorch memory, this process has 260.00 MiB memory in use. Of the allocated memory 0 bytes is allocated by PyTorch, and 0 bytes is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +cuda: True +Loading flickr dataset... +Traceback (most recent call last): + File "/home/yiren/new_ssd2/chunhui/yaning/project/cgscore/examples/graph/cgscore_experiments/runsh/../defense_method/GNNGuard.py", line 38, in + perturbed_adj = torch.load(ptb_path) + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 1025, in load + return _load(opened_zipfile, + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 1446, in _load + result = unpickler.load() + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 1416, in persistent_load + typed_storage = load_tensor(dtype, nbytes, key, _maybe_decode_ascii(location)) + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 1390, in load_tensor + wrap_storage=restore_location(storage, location), + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 390, in default_restore_location + result = fn(storage, location) + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 270, in _cuda_deserialize + return obj.cuda(device) + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/_utils.py", line 114, in _cuda + untyped_storage = torch.UntypedStorage( +torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 220.00 MiB. GPU  has a total capacity of 47.41 GiB of which 79.38 MiB is free. Process 460136 has 47.04 GiB memory in use. Including non-PyTorch memory, this process has 260.00 MiB memory in use. Of the allocated memory 0 bytes is allocated by PyTorch, and 0 bytes is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +cuda: True +Loading flickr dataset... +Traceback (most recent call last): + File "/home/yiren/new_ssd2/chunhui/yaning/project/cgscore/examples/graph/cgscore_experiments/runsh/../defense_method/GNNGuard.py", line 38, in + perturbed_adj = torch.load(ptb_path) + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 1025, in load + return _load(opened_zipfile, + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 1446, in _load + result = unpickler.load() + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 1416, in persistent_load + typed_storage = load_tensor(dtype, nbytes, key, _maybe_decode_ascii(location)) + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 1390, in load_tensor + wrap_storage=restore_location(storage, location), + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 390, in default_restore_location + result = fn(storage, location) + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/serialization.py", line 270, in _cuda_deserialize + return obj.cuda(device) + File "/home/yiren/new_ssd2/chunhui/miniconda/envs/cgscore/lib/python3.9/site-packages/torch/_utils.py", line 114, in _cuda + untyped_storage = torch.UntypedStorage( +torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 220.00 MiB. GPU  has a total capacity of 47.41 GiB of which 79.38 MiB is free. Process 460136 has 47.04 GiB memory in use. Including non-PyTorch memory, this process has 260.00 MiB memory in use. Of the allocated memory 0 bytes is allocated by PyTorch, and 0 bytes is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) diff --git a/examples/graph/cgscore_experiments/runsh/svd.txt b/examples/graph/cgscore_experiments/runsh/svd.txt new file mode 100644 index 0000000000000000000000000000000000000000..fa31e3f4a213a32c4a53b3c524c5eca34c8fe8ed --- /dev/null +++ b/examples/graph/cgscore_experiments/runsh/svd.txt @@ -0,0 +1,360 @@ +/home/yiren/new_ssd2/chunhui/yaning/project/cgscore/deeprobust/graph/utils.py:356: UserWarning: torch.sparse.SparseTensor(indices, values, shape, *, device=) is deprecated. Please use torch.sparse_coo_tensor(indices, values, shape, dtype=, device=). (Triggered internally at ../torch/csrc/utils/tensor_new.cpp:621.) + return torch.sparse.FloatTensor(sparseconcat.t(),sparsedata,torch.Size(sparse_mx.shape)) +/home/yiren/new_ssd2/chunhui/yaning/project/cgscore/deeprobust/graph/utils.py:356: UserWarning: torch.sparse.SparseTensor(indices, values, shape, *, device=) is deprecated. Please use torch.sparse_coo_tensor(indices, values, shape, dtype=, device=). (Triggered internally at ../torch/csrc/utils/tensor_new.cpp:621.) + return torch.sparse.FloatTensor(sparseconcat.t(),sparsedata,torch.Size(sparse_mx.shape)) +/home/yiren/new_ssd2/chunhui/yaning/project/cgscore/deeprobust/graph/utils.py:356: UserWarning: torch.sparse.SparseTensor(indices, values, shape, *, device=) is deprecated. Please use torch.sparse_coo_tensor(indices, values, shape, dtype=, device=). (Triggered internally at ../torch/csrc/utils/tensor_new.cpp:621.) + return torch.sparse.FloatTensor(sparseconcat.t(),sparsedata,torch.Size(sparse_mx.shape)) +/home/yiren/new_ssd2/chunhui/yaning/project/cgscore/deeprobust/graph/utils.py:356: UserWarning: torch.sparse.SparseTensor(indices, values, shape, *, device=) is deprecated. Please use torch.sparse_coo_tensor(indices, values, shape, dtype=, device=). (Triggered internally at ../torch/csrc/utils/tensor_new.cpp:621.) + return torch.sparse.FloatTensor(sparseconcat.t(),sparsedata,torch.Size(sparse_mx.shape)) +cuda: True +Loading pubmed dataset... +=== Fast SVD: rank=10 === +rank_after = 10 +=== training gcn model === +Epoch 0, training loss: nan +Epoch 10, training loss: nan +Epoch 20, training loss: nan +Epoch 30, training loss: nan +Epoch 40, training loss: nan +Epoch 50, training loss: nan +Epoch 60, training loss: nan +Epoch 70, training loss: nan +Epoch 80, training loss: nan +Epoch 90, training loss: nan +Epoch 100, training loss: nan +Epoch 110, training loss: nan +Epoch 120, training loss: nan +Epoch 130, training loss: nan +Epoch 140, training loss: nan +Epoch 150, training loss: nan +Epoch 160, training loss: nan +Epoch 170, training loss: nan +Epoch 180, training loss: nan +Epoch 190, training loss: nan +=== picking the best model according to the performance on validation === +Test set results: loss= nan accuracy= 0.2081 +0.2081272980854571 +cuda: True +Loading pubmed dataset... +=== Fast SVD: rank=20 === +rank_after = 20 +=== training gcn model === +Epoch 0, training loss: nan +Epoch 10, training loss: nan +Epoch 20, training loss: nan +Epoch 30, training loss: nan +Epoch 40, training loss: nan +Epoch 50, training loss: nan +Epoch 60, training loss: nan +Epoch 70, training loss: nan +Epoch 80, training loss: nan +Epoch 90, training loss: nan +Epoch 100, training loss: nan +Epoch 110, training loss: nan +Epoch 120, training loss: nan +Epoch 130, training loss: nan +Epoch 140, training loss: nan +Epoch 150, training loss: nan +Epoch 160, training loss: nan +Epoch 170, training loss: nan +Epoch 180, training loss: nan +Epoch 190, training loss: nan +=== picking the best model according to the performance on validation === +Test set results: loss= nan accuracy= 0.2081 +0.2081272980854571 +cuda: True +Loading pubmed dataset... +=== Fast SVD: rank=15 === +rank_after = 15 +=== training gcn model === +Epoch 0, training loss: nan +Epoch 10, training loss: nan +Epoch 20, training loss: nan +Epoch 30, training loss: nan +Epoch 40, training loss: nan +Epoch 50, training loss: nan +Epoch 60, training loss: nan +Epoch 70, training loss: nan +Epoch 80, training loss: nan +Epoch 90, training loss: nan +Epoch 100, training loss: nan +Epoch 110, training loss: nan +Epoch 120, training loss: nan +Epoch 130, training loss: nan +Epoch 140, training loss: nan +Epoch 150, training loss: nan +Epoch 160, training loss: nan +Epoch 170, training loss: nan +Epoch 180, training loss: nan +Epoch 190, training loss: nan +=== picking the best model according to the performance on validation === +Test set results: loss= nan accuracy= 0.2081 +0.2081272980854571 +cuda: True +Loading pubmed dataset... +=== Fast SVD: rank=5 === +rank_after = 5 +=== training gcn model === +Epoch 0, training loss: nan +Epoch 10, training loss: nan +Epoch 20, training loss: nan +Epoch 30, training loss: nan +Epoch 40, training loss: nan +Epoch 50, training loss: nan +Epoch 60, training loss: nan +Epoch 70, training loss: nan +Epoch 80, training loss: nan +Epoch 90, training loss: nan +Epoch 100, training loss: nan +Epoch 110, training loss: nan +Epoch 120, training loss: nan +Epoch 130, training loss: nan +Epoch 140, training loss: nan +Epoch 150, training loss: nan +Epoch 160, training loss: nan +Epoch 170, training loss: nan +Epoch 180, training loss: nan +Epoch 190, training loss: nan +=== picking the best model according to the performance on validation === +Test set results: loss= nan accuracy= 0.2081 +0.2081272980854571 +/home/yiren/new_ssd2/chunhui/yaning/project/cgscore/deeprobust/graph/utils.py:356: UserWarning: torch.sparse.SparseTensor(indices, values, shape, *, device=) is deprecated. Please use torch.sparse_coo_tensor(indices, values, shape, dtype=, device=). (Triggered internally at ../torch/csrc/utils/tensor_new.cpp:621.) + return torch.sparse.FloatTensor(sparseconcat.t(),sparsedata,torch.Size(sparse_mx.shape)) +/home/yiren/new_ssd2/chunhui/yaning/project/cgscore/deeprobust/graph/utils.py:356: UserWarning: torch.sparse.SparseTensor(indices, values, shape, *, device=) is deprecated. Please use torch.sparse_coo_tensor(indices, values, shape, dtype=, device=). (Triggered internally at ../torch/csrc/utils/tensor_new.cpp:621.) + return torch.sparse.FloatTensor(sparseconcat.t(),sparsedata,torch.Size(sparse_mx.shape)) +/home/yiren/new_ssd2/chunhui/yaning/project/cgscore/deeprobust/graph/utils.py:356: UserWarning: torch.sparse.SparseTensor(indices, values, shape, *, device=) is deprecated. Please use torch.sparse_coo_tensor(indices, values, shape, dtype=, device=). (Triggered internally at ../torch/csrc/utils/tensor_new.cpp:621.) + return torch.sparse.FloatTensor(sparseconcat.t(),sparsedata,torch.Size(sparse_mx.shape)) +/home/yiren/new_ssd2/chunhui/yaning/project/cgscore/deeprobust/graph/utils.py:356: UserWarning: torch.sparse.SparseTensor(indices, values, shape, *, device=) is deprecated. Please use torch.sparse_coo_tensor(indices, values, shape, dtype=, device=). (Triggered internally at ../torch/csrc/utils/tensor_new.cpp:621.) + return torch.sparse.FloatTensor(sparseconcat.t(),sparsedata,torch.Size(sparse_mx.shape)) +cuda: True +Loading pubmed dataset... +=== Fast SVD: rank=25 === +rank_after = 25 +=== training gcn model === +Epoch 0, training loss: nan +Epoch 10, training loss: nan +Epoch 20, training loss: nan +Epoch 30, training loss: nan +Epoch 40, training loss: nan +Epoch 50, training loss: nan +Epoch 60, training loss: nan +Epoch 70, training loss: nan +Epoch 80, training loss: nan +Epoch 90, training loss: nan +Epoch 100, training loss: nan +Epoch 110, training loss: nan +Epoch 120, training loss: nan +Epoch 130, training loss: nan +Epoch 140, training loss: nan +Epoch 150, training loss: nan +Epoch 160, training loss: nan +Epoch 170, training loss: nan +Epoch 180, training loss: nan +Epoch 190, training loss: nan +=== picking the best model according to the performance on validation === +Test set results: loss= nan accuracy= 0.2081 +0.2081272980854571 +cuda: True +Loading pubmed dataset... +=== Fast SVD: rank=30 === +rank_after = 30 +=== training gcn model === +Epoch 0, training loss: nan +Epoch 10, training loss: nan +Epoch 20, training loss: nan +Epoch 30, training loss: nan +Epoch 40, training loss: nan +Epoch 50, training loss: nan +Epoch 60, training loss: nan +Epoch 70, training loss: nan +Epoch 80, training loss: nan +Epoch 90, training loss: nan +Epoch 100, training loss: nan +Epoch 110, training loss: nan +Epoch 120, training loss: nan +Epoch 130, training loss: nan +Epoch 140, training loss: nan +Epoch 150, training loss: nan +Epoch 160, training loss: nan +Epoch 170, training loss: nan +Epoch 180, training loss: nan +Epoch 190, training loss: nan +=== picking the best model according to the performance on validation === +Test set results: loss= nan accuracy= 0.2081 +0.2081272980854571 +cuda: True +Loading pubmed dataset... +=== Fast SVD: rank=35 === +rank_after = 35 +=== training gcn model === +Epoch 0, training loss: nan +Epoch 10, training loss: nan +Epoch 20, training loss: nan +Epoch 30, training loss: nan +Epoch 40, training loss: nan +Epoch 50, training loss: nan +Epoch 60, training loss: nan +Epoch 70, training loss: nan +Epoch 80, training loss: nan +Epoch 90, training loss: nan +Epoch 100, training loss: nan +Epoch 110, training loss: nan +Epoch 120, training loss: nan +Epoch 130, training loss: nan +Epoch 140, training loss: nan +Epoch 150, training loss: nan +Epoch 160, training loss: nan +Epoch 170, training loss: nan +Epoch 180, training loss: nan +Epoch 190, training loss: nan +=== picking the best model according to the performance on validation === +Test set results: loss= nan accuracy= 0.2081 +0.2081272980854571 +cuda: True +Loading pubmed dataset... +=== Fast SVD: rank=40 === +rank_after = 40 +=== training gcn model === +Epoch 0, training loss: nan +Epoch 10, training loss: nan +Epoch 20, training loss: nan +Epoch 30, training loss: nan +Epoch 40, training loss: nan +Epoch 50, training loss: nan +Epoch 60, training loss: nan +Epoch 70, training loss: nan +Epoch 80, training loss: nan +Epoch 90, training loss: nan +Epoch 100, training loss: nan +Epoch 110, training loss: nan +Epoch 120, training loss: nan +Epoch 130, training loss: nan +Epoch 140, training loss: nan +Epoch 150, training loss: nan +Epoch 160, training loss: nan +Epoch 170, training loss: nan +Epoch 180, training loss: nan +Epoch 190, training loss: nan +=== picking the best model according to the performance on validation === +Test set results: loss= nan accuracy= 0.2081 +0.2081272980854571 +/home/yiren/new_ssd2/chunhui/yaning/project/cgscore/deeprobust/graph/utils.py:356: UserWarning: torch.sparse.SparseTensor(indices, values, shape, *, device=) is deprecated. Please use torch.sparse_coo_tensor(indices, values, shape, dtype=, device=). (Triggered internally at ../torch/csrc/utils/tensor_new.cpp:621.) + return torch.sparse.FloatTensor(sparseconcat.t(),sparsedata,torch.Size(sparse_mx.shape)) +/home/yiren/new_ssd2/chunhui/yaning/project/cgscore/deeprobust/graph/utils.py:356: UserWarning: torch.sparse.SparseTensor(indices, values, shape, *, device=) is deprecated. Please use torch.sparse_coo_tensor(indices, values, shape, dtype=, device=). (Triggered internally at ../torch/csrc/utils/tensor_new.cpp:621.) + return torch.sparse.FloatTensor(sparseconcat.t(),sparsedata,torch.Size(sparse_mx.shape)) +/home/yiren/new_ssd2/chunhui/yaning/project/cgscore/deeprobust/graph/utils.py:356: UserWarning: torch.sparse.SparseTensor(indices, values, shape, *, device=) is deprecated. Please use torch.sparse_coo_tensor(indices, values, shape, dtype=, device=). (Triggered internally at ../torch/csrc/utils/tensor_new.cpp:621.) + return torch.sparse.FloatTensor(sparseconcat.t(),sparsedata,torch.Size(sparse_mx.shape)) +/home/yiren/new_ssd2/chunhui/yaning/project/cgscore/deeprobust/graph/utils.py:356: UserWarning: torch.sparse.SparseTensor(indices, values, shape, *, device=) is deprecated. Please use torch.sparse_coo_tensor(indices, values, shape, dtype=, device=). (Triggered internally at ../torch/csrc/utils/tensor_new.cpp:621.) + return torch.sparse.FloatTensor(sparseconcat.t(),sparsedata,torch.Size(sparse_mx.shape)) +cuda: True +Loading pubmed dataset... +=== Fast SVD: rank=60 === +rank_after = 60 +=== training gcn model === +Epoch 0, training loss: nan +Epoch 10, training loss: nan +Epoch 20, training loss: nan +Epoch 30, training loss: nan +Epoch 40, training loss: nan +Epoch 50, training loss: nan +Epoch 60, training loss: nan +Epoch 70, training loss: nan +Epoch 80, training loss: nan +Epoch 90, training loss: nan +Epoch 100, training loss: nan +Epoch 110, training loss: nan +Epoch 120, training loss: nan +Epoch 130, training loss: nan +Epoch 140, training loss: nan +Epoch 150, training loss: nan +Epoch 160, training loss: nan +Epoch 170, training loss: nan +Epoch 180, training loss: nan +Epoch 190, training loss: nan +=== picking the best model according to the performance on validation === +Test set results: loss= nan accuracy= 0.2081 +0.2081272980854571 +cuda: True +Loading pubmed dataset... +=== Fast SVD: rank=50 === +rank_after = 50 +=== training gcn model === +Epoch 0, training loss: nan +Epoch 10, training loss: nan +Epoch 20, training loss: nan +Epoch 30, training loss: nan +Epoch 40, training loss: nan +Epoch 50, training loss: nan +Epoch 60, training loss: nan +Epoch 70, training loss: nan +Epoch 80, training loss: nan +Epoch 90, training loss: nan +Epoch 100, training loss: nan +Epoch 110, training loss: nan +Epoch 120, training loss: nan +Epoch 130, training loss: nan +Epoch 140, training loss: nan +Epoch 150, training loss: nan +Epoch 160, training loss: nan +Epoch 170, training loss: nan +Epoch 180, training loss: nan +Epoch 190, training loss: nan +=== picking the best model according to the performance on validation === +Test set results: loss= nan accuracy= 0.2081 +0.2081272980854571 +cuda: True +Loading pubmed dataset... +=== Fast SVD: rank=55 === +rank_after = 55 +=== training gcn model === +Epoch 0, training loss: nan +Epoch 10, training loss: nan +Epoch 20, training loss: nan +Epoch 30, training loss: nan +Epoch 40, training loss: nan +Epoch 50, training loss: nan +Epoch 60, training loss: nan +Epoch 70, training loss: nan +Epoch 80, training loss: nan +Epoch 90, training loss: nan +Epoch 100, training loss: nan +Epoch 110, training loss: nan +Epoch 120, training loss: nan +Epoch 130, training loss: nan +Epoch 140, training loss: nan +Epoch 150, training loss: nan +Epoch 160, training loss: nan +Epoch 170, training loss: nan +Epoch 180, training loss: nan +Epoch 190, training loss: nan +=== picking the best model according to the performance on validation === +Test set results: loss= nan accuracy= 0.2081 +0.2081272980854571 +cuda: True +Loading pubmed dataset... +=== Fast SVD: rank=45 === +rank_after = 45 +=== training gcn model === +Epoch 0, training loss: nan +Epoch 10, training loss: nan +Epoch 20, training loss: nan +Epoch 30, training loss: nan +Epoch 40, training loss: nan +Epoch 50, training loss: nan +Epoch 60, training loss: nan +Epoch 70, training loss: nan +Epoch 80, training loss: nan +Epoch 90, training loss: nan +Epoch 100, training loss: nan +Epoch 110, training loss: nan +Epoch 120, training loss: nan +Epoch 130, training loss: nan +Epoch 140, training loss: nan +Epoch 150, training loss: nan +Epoch 160, training loss: nan +Epoch 170, training loss: nan +Epoch 180, training loss: nan +Epoch 190, training loss: nan +=== picking the best model according to the performance on validation === +Test set results: loss= nan accuracy= 0.2081 +0.2081272980854571