diff --git a/.gitattributes b/.gitattributes index 9a38d9b1a372f6c717d6f558056b854ca07536db..a38376fb5d265b95785d964a358f0862018d1144 100644 --- a/.gitattributes +++ b/.gitattributes @@ -71,3 +71,14 @@ data.files/gMVP.MSA.tgz.part-ab filter=lfs diff=lfs merge=lfs -text parse.input.table/swissprot_and_human.full.seq.csv.tgz.part-aa filter=lfs diff=lfs merge=lfs -text parse.input.table/swissprot_and_human.full.seq.csv.tgz.part-ab filter=lfs diff=lfs merge=lfs -text parse.input.table/swissprot_and_human.full.seq.csv.tgz.part-ac filter=lfs diff=lfs merge=lfs -text +MSA.tgz.part-aa filter=lfs diff=lfs merge=lfs -text +af2.files.tgz.part-aa filter=lfs diff=lfs merge=lfs -text +esm.MSA.tgz.part-aa filter=lfs diff=lfs merge=lfs -text +esm.MSA.tgz.part-ab filter=lfs diff=lfs merge=lfs -text +esm.files.tgz.part-aa filter=lfs diff=lfs merge=lfs -text +esm.files.tgz.part-ab filter=lfs diff=lfs merge=lfs -text +esm.files.tgz.part-ac filter=lfs diff=lfs merge=lfs -text +esm.files.tgz.part-ad filter=lfs diff=lfs merge=lfs -text +esm.files.tgz.part-ae filter=lfs diff=lfs merge=lfs -text +gMVP.MSA.tgz.part-aa filter=lfs diff=lfs merge=lfs -text +pretrain.tgz.part-aa filter=lfs diff=lfs merge=lfs -text diff --git a/ALL.csv.gz b/ALL.csv.gz new file mode 100644 index 0000000000000000000000000000000000000000..fbf67b532d8b48eccf99c37e9528f972ae0d54f8 --- /dev/null +++ b/ALL.csv.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ba809ae66bc775ff9072934ade95a242fc378962eb6562fca5e926e6eadd5d2 +size 11103682 diff --git a/ASPA.tgz b/ASPA.tgz new file mode 100644 index 0000000000000000000000000000000000000000..5999783f776b92dc7e8195a8b23393e07535fe3a --- /dev/null +++ b/ASPA.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5e401c824edb3ae63e05d973dafb82a40e0c0b4167f8ea8a44f255cb2e61a2f +size 6497828 diff --git a/CCR5.tgz b/CCR5.tgz new file mode 100644 index 0000000000000000000000000000000000000000..4565c920d3f45de645f1ce23e763976122061a7f --- /dev/null +++ b/CCR5.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32b1fd9f3c79438e5804c24fbfd63db0f102741d60837e1735d03e554005027a +size 5790823 diff --git a/CXCR4.tgz b/CXCR4.tgz new file mode 100644 index 0000000000000000000000000000000000000000..e197ff64cc688d3d2c0e74d3e783ab049db0ec1e --- /dev/null +++ b/CXCR4.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5be7e596768b968ec92ae5a74857fc9d047d03a1387fed181fa378edf41eb057 +size 5826481 diff --git a/CYP2C9.tgz b/CYP2C9.tgz new file mode 100644 index 0000000000000000000000000000000000000000..ab83a7c8fe42f7a9465888d8d6973b89901f5df9 --- /dev/null +++ b/CYP2C9.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d89ae48ab501c9b110c8da0b89ebe40a8843b3af8a80ce26a751f9548f246984 +size 7476136 diff --git a/GCK.tgz b/GCK.tgz new file mode 100644 index 0000000000000000000000000000000000000000..f3aaf03061ee2edc3c8f60399b141b97dcd8c543 --- /dev/null +++ b/GCK.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6af4a7f205161f42e26b645c0c7baa468be540af912a5349cc700945a10429e7 +size 9954541 diff --git a/ICC.seed.0.tgz b/ICC.seed.0.tgz new file mode 100644 index 0000000000000000000000000000000000000000..f47938d91cbf5441305fce514599cbdf9c87f0d2 --- /dev/null +++ b/ICC.seed.0.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb36b5eadfefc849887c480554752f43a6b76c545a2ced700d260a1cfe56c592 +size 4588973 diff --git a/ICC.seed.1.tgz b/ICC.seed.1.tgz new file mode 100644 index 0000000000000000000000000000000000000000..9f8c21405971e58b11c8c3eb6452b09a205d0e77 --- /dev/null +++ b/ICC.seed.1.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:402f281b6b1f83da14b1b1ab81774c2da45a67883f6c0d9d778a804a15722bd2 +size 4577813 diff --git a/ICC.seed.2.tgz b/ICC.seed.2.tgz new file mode 100644 index 0000000000000000000000000000000000000000..52adfeb1c3e03d88d49bc67afe3be174c41432a4 --- /dev/null +++ b/ICC.seed.2.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82a8afea44f05506266ffcddce3dcbff145aad59427b606b7c4d5356fba26a80 +size 4577738 diff --git a/ICC.seed.3.tgz b/ICC.seed.3.tgz new file mode 100644 index 0000000000000000000000000000000000000000..0258fdd0b5c8fd7cf1c6916454279f3f7a0e700e --- /dev/null +++ b/ICC.seed.3.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b52b45a04a73cf531bc9be4ef31785dc3dd2f8b0cb649aa4b8fd3ed3ec5c99e +size 4588209 diff --git a/ICC.seed.4.tgz b/ICC.seed.4.tgz new file mode 100644 index 0000000000000000000000000000000000000000..b69b096ed767f6aeeda1bf7ad8cc8799c90286ec --- /dev/null +++ b/ICC.seed.4.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:405b089729ebdcc19710f135d06e31896cfbd4a99b0e4098926443aa218817f9 +size 4563681 diff --git a/MSA.tgz b/MSA.tgz new file mode 100644 index 0000000000000000000000000000000000000000..e0234e0482c4e0320243f1640b1c9d2f26dcf8e4 --- /dev/null +++ b/MSA.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8b28e09ba5f647d720f865fc37c21063efe8fa5f6b1fe67d4b9f1348f21c6b5 +size 853335 diff --git a/MSA.tgz.part-aa b/MSA.tgz.part-aa new file mode 100644 index 0000000000000000000000000000000000000000..d0c35130605dcb94499fe00e521752753e7e908f --- /dev/null +++ b/MSA.tgz.part-aa @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:670504f78fb5828face000895457b0bdf006e2c762f90546d4160c1741abdad7 +size 756213964 diff --git a/NUDT15.tgz b/NUDT15.tgz new file mode 100644 index 0000000000000000000000000000000000000000..b0088b7d27db9e110811c02d1351a01f07e71d92 --- /dev/null +++ b/NUDT15.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac39d81c5d54a408e4a024961cd889f2702cab1f7ed40d83262067f1314c494f +size 2648155 diff --git a/PTEN.bin.tgz b/PTEN.bin.tgz new file mode 100644 index 0000000000000000000000000000000000000000..6e4cdf76c25372254896f185e53ad2e03444560b --- /dev/null +++ b/PTEN.bin.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbe7d33dbbf5630c5c161986eb052f12deb61b7ccbf678fa7ff5bc7897ba4c59 +size 983279 diff --git a/PTEN.replicate.rest.1.tgz b/PTEN.replicate.rest.1.tgz new file mode 100644 index 0000000000000000000000000000000000000000..80493d0aaaf5c5dcd940bbcb6dcd8ded870c1259 --- /dev/null +++ b/PTEN.replicate.rest.1.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a5a8491533eb09736c97526c22aa83220d3df81cbc3e086d0c97cf82fecbac4 +size 4065895 diff --git a/PTEN.replicate.rest.2.tgz b/PTEN.replicate.rest.2.tgz new file mode 100644 index 0000000000000000000000000000000000000000..caa347c914560803fde8e055001008ac3459ac00 --- /dev/null +++ b/PTEN.replicate.rest.2.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e22735a538e5dc3ffbf3926514ea32a3b8ef5638f0fd274eacab37ec3e076f7 +size 4065999 diff --git a/PTEN.replicate.rest.3.tgz b/PTEN.replicate.rest.3.tgz new file mode 100644 index 0000000000000000000000000000000000000000..4710aa91b4c6308b854a36212a929fd7cfb0f814 --- /dev/null +++ b/PTEN.replicate.rest.3.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6bb4a7d231337c8436e3a0b46af24e7b8e87953c75676ed9849a6d27b87157d +size 4065725 diff --git a/PTEN.replicate.rest.4.tgz b/PTEN.replicate.rest.4.tgz new file mode 100644 index 0000000000000000000000000000000000000000..4f1837055ba0d8283ff8cf2f69f78724976bd60d --- /dev/null +++ b/PTEN.replicate.rest.4.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90216071931145f6c8b4a2ec31224f7bb1edce8a43490e16a57173a6cc634433 +size 4065705 diff --git a/PTEN.replicate.rest.5.tgz b/PTEN.replicate.rest.5.tgz new file mode 100644 index 0000000000000000000000000000000000000000..965da5521e559e705117a8d3102a800d1bb5d146 --- /dev/null +++ b/PTEN.replicate.rest.5.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87694c8d852f9d79230f7e064b4079902b17b4e4a66347b9b359902d987d8e23 +size 4065975 diff --git a/PTEN.replicate.rest.6.tgz b/PTEN.replicate.rest.6.tgz new file mode 100644 index 0000000000000000000000000000000000000000..c6936d991c66518c2d1ee2d574552afadf210a75 --- /dev/null +++ b/PTEN.replicate.rest.6.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c2afb3de5c57da35333d61dc11e48732916455cd4f48430c0291161702797fa +size 4065851 diff --git a/PTEN.replicate.rest.7.tgz b/PTEN.replicate.rest.7.tgz new file mode 100644 index 0000000000000000000000000000000000000000..4f2520b485c108604b1976af609c571d8874c1fd --- /dev/null +++ b/PTEN.replicate.rest.7.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:470a04794e6e88bc525ca7ca9de5f05ba0c4406cd321b3aa11a6f77beda102d2 +size 4065635 diff --git a/PTEN.replicate.rest.8.tgz b/PTEN.replicate.rest.8.tgz new file mode 100644 index 0000000000000000000000000000000000000000..7cba87ce62431d2a879b47f91f3a59dcb955fc21 --- /dev/null +++ b/PTEN.replicate.rest.8.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9004017f797684b8166ab337e3ba7211a0920fbc733d69ef1192d449d5ce51b +size 4065531 diff --git a/PTEN.tgz b/PTEN.tgz new file mode 100644 index 0000000000000000000000000000000000000000..820369fdee2b9ac96f93fd0e2ffcd50fd5ec32e4 --- /dev/null +++ b/PTEN.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4eff76435a175961076847bdea25b79f2a45a548a9b1f393dc1d73a32fbca170 +size 4244420 diff --git a/SNCA.tgz b/SNCA.tgz new file mode 100644 index 0000000000000000000000000000000000000000..ec166ba5f635cbe7d6139a65b5c14ff9cd1e0bf7 --- /dev/null +++ b/SNCA.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da09fe734efc81689f94e280656d497f3c3118e3389498b945bbae5daa0bd562 +size 1932672 diff --git a/Stab.tgz b/Stab.tgz new file mode 100644 index 0000000000000000000000000000000000000000..7717ce6cdcf79ce99bdf7da49ecd2f7be930eba8 --- /dev/null +++ b/Stab.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b4332f6557b56d355eb012317070675941c985f217f5a52c032e4789af5dc1f +size 49191908 diff --git a/af2.files.tgz.part-aa b/af2.files.tgz.part-aa new file mode 100644 index 0000000000000000000000000000000000000000..20be72181285dc77b8462b91d9f92c32189ce17a --- /dev/null +++ b/af2.files.tgz.part-aa @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8c99e093f3deb83e5cf57f7d3f2003af69268e2d7020342d23d98dfe8464342 +size 1747009312 diff --git a/esm.MSA.tgz.part-aa b/esm.MSA.tgz.part-aa new file mode 100644 index 0000000000000000000000000000000000000000..db30b8dd09f719ac4c68c6600146d9224ebfd3cd --- /dev/null +++ b/esm.MSA.tgz.part-aa @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2a14923d1aaea14ad3ccedaafc5cc89b819811cc6fd8fd58584a7572dff0d91 +size 10737418240 diff --git a/esm.MSA.tgz.part-ab b/esm.MSA.tgz.part-ab new file mode 100644 index 0000000000000000000000000000000000000000..4aefabd5420830d4c60b9af4a8b6627e8c072d0e --- /dev/null +++ b/esm.MSA.tgz.part-ab @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a40159327f7454b4ff2ee5285e3bb7d778936bca39e41649d369aa3a3499a6aa +size 2929687838 diff --git a/esm.files.tgz.part-aa b/esm.files.tgz.part-aa new file mode 100644 index 0000000000000000000000000000000000000000..f7ea928ef75a69cf329e8da0f4e73d82b0e04560 --- /dev/null +++ b/esm.files.tgz.part-aa @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98a5c81d3651a6cc33c0e99a4e2b206fae7ee26c3bf9a0df8ee006741004a8c6 +size 10737418240 diff --git a/esm.files.tgz.part-ab b/esm.files.tgz.part-ab new file mode 100644 index 0000000000000000000000000000000000000000..8312e93657ab378a50254ddaa430d2c1aeab2c52 --- /dev/null +++ b/esm.files.tgz.part-ab @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9caf98a5e3acba2a0ca3f9a3c3effe2803e82c0a3114bf5f911f77f105e9a949 +size 10737418240 diff --git a/esm.files.tgz.part-ac b/esm.files.tgz.part-ac new file mode 100644 index 0000000000000000000000000000000000000000..35383a27f121ae0afdf6096f08aa5433c9d0e0a8 --- /dev/null +++ b/esm.files.tgz.part-ac @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa44afe7158f565e229a8ead8d8760cb8393340c6e3287e457385d9678cb31df +size 10737418240 diff --git a/esm.files.tgz.part-ad b/esm.files.tgz.part-ad new file mode 100644 index 0000000000000000000000000000000000000000..38f1d645f9b26d4cb07e1719a3600b185edb640c --- /dev/null +++ b/esm.files.tgz.part-ad @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91d4e1b0fa3a399a6755537f64783cbe6eb0a731e2d7812f091a4919d3c39268 +size 10737418240 diff --git a/esm.files.tgz.part-ae b/esm.files.tgz.part-ae new file mode 100644 index 0000000000000000000000000000000000000000..195d11a2814f09855f3e4cf7dc6408362bc40ae0 --- /dev/null +++ b/esm.files.tgz.part-ae @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b975435639ee8fc7ab9b45da83a35dc2f246a943aec53a59d459b6a46ab6a85d +size 362286942 diff --git a/esm.inference.py b/esm.inference.py new file mode 100644 index 0000000000000000000000000000000000000000..ae29f9323d8293e31a1ff9d102720eda8dd80804 --- /dev/null +++ b/esm.inference.py @@ -0,0 +1,177 @@ +import pandas as pd +import numpy as np +import os +import esm +import torch +import argparse + +os.environ['CUDA_LAUNCH_BLOCKING'] = '-1' + + +def precompute_sequence(transcript_id, sequence, esm_model, batch_converter, out_dir, device_id=0): + if os.path.exists(os.path.join(out_dir, transcript_id + '.contacts.npy')): + return + else: + print('begin precompute sequence for {}'.format(transcript_id)) + try: + data = [(transcript_id, sequence)] + _, _, toks = batch_converter(data) + except: + print(transcript_id) + return + toks = toks.to(f'cuda:{device_id}') + aa = toks.shape[1] + if aa <= 2250: + print(f"{transcript_id} has {toks.shape[1]} amino acids") + return + with torch.no_grad(): + out = esm_model(toks, repr_layers=[33], return_contacts=True, need_head_weights=False) + representations = out["representations"][33][0].to(device='cpu').detach().numpy() + # output is batch x layers x heads x seqlen x seqlen + # attentions = out["attentions"][0].to(device="cpu").detach().numpy() + contacts = out['contacts'][0].to(device="cpu").detach().numpy() + logits = out['logits'][0].to(device="cpu").detach().numpy() + np.save( + f"{out_dir}/{transcript_id}.representations.layer.48.npy", + representations, + ) + np.save( + f"{out_dir}/{transcript_id}.contacts.npy", + contacts, + ) + np.save( + f"{out_dir}/{transcript_id}.logits.npy", + logits, + ) + return + + +def precompute_sequence_multiple_gpus(transcript_id, sequence, esm_model, batch_converter, out_dir): + if os.path.exists(os.path.join(out_dir, transcript_id + '.contacts.npy')): + return + else: + print('begin precompute sequence for {}'.format(transcript_id)) + try: + data = [(transcript_id, sequence)] + _, _, toks = batch_converter(data) + except: + print(transcript_id) + return + toks = toks.to('cuda:0') + if toks.shape[1] > 30000: + print(f"{transcript_id} has {toks.shape[1]} amino acids, don't proceed") + return + print(f"{transcript_id} has {toks.shape[1]} amino acids") + if toks.shape[1] > 5500: + need_head_weights = False + return_contacts = False + else: + need_head_weights = True + return_contacts = True + with torch.no_grad(): + assert toks.ndim == 2 + padding_mask = toks.eq(esm_model.padding_idx) # B, T + x = esm_model.embed_scale * esm_model.embed_tokens(toks) + + if esm_model.token_dropout: + x.masked_fill_((toks == esm_model.mask_idx).unsqueeze(-1), 0.0) + # x: B x T x C + mask_ratio_train = 0.15 * 0.8 + src_lengths = (~padding_mask).sum(-1) + mask_ratio_observed = (toks == esm_model.mask_idx).sum(-1).to(x.dtype) / src_lengths + x = x * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None] + + if padding_mask is not None: + x = x * (1 - padding_mask.unsqueeze(-1).type_as(x)) + + repr_layers = {33} + hidden_representations = {} + if 0 in repr_layers: + hidden_representations[0] = x + if need_head_weights: + attn_weights = [] + # (B, T, E) => (T, B, E) + x = x.transpose(0, 1) + if not padding_mask.any(): + padding_mask = None + for layer_idx, layer in enumerate(esm_model.layers): + x = x.to(f'cuda:{layer_idx // 9}') + x, attn = layer( + x, + self_attn_padding_mask=padding_mask, + need_head_weights=need_head_weights, + ) + if (layer_idx + 1) in repr_layers: + hidden_representations[layer_idx + 1] = x.transpose(0, 1) + if need_head_weights: + # (H, B, T, T) => (B, H, T, T) + attn_weights.append(attn.transpose(1, 0).cpu()) + x = esm_model.emb_layer_norm_after(x) + x = x.transpose(0, 1) # (T, B, E) => (B, T, E) + + # last hidden representation should have layer norm applied + if (layer_idx + 1) in repr_layers: + hidden_representations[layer_idx + 1] = x + # lm head is on cuda:0, x is on cuda:3 + x = esm_model.lm_head(x.to('cuda:0')) + out = {"logits": x, "representations": hidden_representations} + if need_head_weights: + # attentions: B x L x H x T x T + attentions = torch.stack(attn_weights, 1) + if padding_mask is not None: + attention_mask = 1 - padding_mask.type_as(attentions) + attention_mask = attention_mask.unsqueeze(1) * attention_mask.unsqueeze(2) + attentions = attentions * attention_mask[:, None, None, :, :] + out["attentions"] = attentions + if return_contacts: + contacts = esm_model.contact_head(toks, attentions) + out["contacts"] = contacts + representations = out["representations"][33][0].to(device='cpu').detach().numpy() + # output is batch x layers x heads x seqlen x seqlen + + logits = out['logits'][0].to(device="cpu").detach().numpy() + np.save( + f"{out_dir}/{transcript_id}.representations.layer.48.npy", + representations, + ) + np.save( + f"{out_dir}/{transcript_id}.logits.npy", + logits, + ) + if return_contacts: + contacts = out['contacts'][0].to(device="cpu").detach().numpy() + np.save( + f"{out_dir}/{transcript_id}.contacts.npy", + contacts, + ) + return + + +def main(file=None, outdir=None): + model, alphabet = esm.pretrained.esm2_t33_650M_UR50D() + if torch.cuda.is_available(): + # manually split the model into 4 GPUs + model.embed_tokens.to('cuda:0') + for layer_idx, layer in enumerate(model.layers): + layer.to(f'cuda:{layer_idx // 9}') + model.emb_layer_norm_after.to('cuda:3') + model.lm_head.to('cuda:0') + model.contact_head.to('cpu') + print("Transferred model to GPUs") + # model = model.to(f'cuda:{rank}') + if file is None: + return + files = pd.read_csv(file, index_col=0) + os.makedirs(outdir, exist_ok=True) + for transcript_id, sequence in zip(files['uniprotID'], files['sequence']): + precompute_sequence_multiple_gpus(transcript_id, sequence, model, + alphabet.get_batch_converter(), + outdir) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--file', type=str, default=None) + parser.add_argument('--outdir', type=str, default=None) + args = parser.parse_args() + main(args.file, args.outdir) diff --git a/fluorescence.tgz b/fluorescence.tgz new file mode 100644 index 0000000000000000000000000000000000000000..b1a9fb289793ba4f19d43a9e7ebe79538e4e7dc1 --- /dev/null +++ b/fluorescence.tgz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1089b47959bb0540b338460de27a666dd1788372d3bb499e0b33fa4089c05448 +size 15124238 diff --git a/gMVP.MSA.tgz.part-aa b/gMVP.MSA.tgz.part-aa new file mode 100644 index 0000000000000000000000000000000000000000..403a02ea07ebcd72b122b6059dc795cf77940222 --- /dev/null +++ b/gMVP.MSA.tgz.part-aa @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9e251ed2ac02926a7f121b585eef3bb8fe351826c2ebf2410fc93f1a7dcd146 +size 3105673966 diff --git a/preprocess.gene.pfam.R b/preprocess.gene.pfam.R new file mode 100644 index 0000000000000000000000000000000000000000..c44f0c548bf0d557d41d378dcf41bb1a907cad43 --- /dev/null +++ b/preprocess.gene.pfam.R @@ -0,0 +1,83 @@ +source('/share/vault/Users/gz2294/Pipeline/uniprot.table.add.annotation.R') +ALL <- read.csv('ALL.csv', row.names = 1) + +ALL <- uniprot.table.add.annotation.parallel(ALL, "InterPro") +# remove glazer +ALL <- ALL[ALL$data_source != "glazer",] + +good.uniprotIDs <- data.frame( + uniprotID=c("P15056", "P21802", "P07949", + "P04637", "Q09428", "O00555", + "Q14654", "Q99250", "Q14524")) +good.uniprotIDs.df <- data.frame() +frac <- 0.8 +for (seed in 0:4) { + split.dir <- paste0('ICC.seed.', seed, '/') + dir.create(split.dir) + for (i in 1:dim(good.uniprotIDs)[1]) { + gene.itan <- ALL[ALL$uniprotID==good.uniprotIDs$uniprotID[i],] + # prepare some types + pfams <- unique(unlist(strsplit(gene.itan$InterPro,";"))) + # pick ratio% of variants as training + for (pfam in pfams) { + set.seed(seed) + GO.itan <- ALL[grep(pfam, ALL$InterPro),] + GO.itan.training <- GO.itan[!GO.itan$uniprotID %in% gene.itan$uniprotID,] + if (dim(GO.itan.training)[1] > 0) { + GO.itan.training$split <- 'train' + } + # select only the data in domain as train/test + gene.itan.domain <- gene.itan[grep(pfam, gene.itan$InterPro),] + # random select testing and validation + # select equal amount of gof and lof for testing + gof.training <- sample(which(gene.itan.domain$score==1), size = floor(sum(gene.itan.domain$score==1)*frac)) + lof.training <- sample(which(gene.itan.domain$score==-1), size = floor(sum(gene.itan.domain$score==-1)*frac)) + # select equal amount of gof and lof for validation + if (length(gof.training) > 0 & length(lof.training) > 0) { + gene.itan.domain.training <- gene.itan.domain[c(gof.training, lof.training),] + gene.itan.domain.training$split <- 'train' + gof.val <- sample(which(gene.itan.domain.training$score==1), size = floor(sum(gene.itan.domain$score==1)*(1-frac))) + lof.val <- sample(which(gene.itan.domain.training$score==-1), size = floor(sum(gene.itan.domain$score==-1)*(1-frac))) + gene.itan.domain.training$split[c(gof.val, lof.val)] <- 'val' + + GO.itan.testing <- gene.itan.domain[-c(gof.training, lof.training),] + if (dim(GO.itan.testing)[1] > 0) { + # first save the gene itself + dir.create(paste0(split.dir, good.uniprotIDs$uniprotID[i], '.', pfam, ".", "self")) + write.csv(gene.itan.domain.training[sample(dim(gene.itan.domain.training)[1]),], paste0(split.dir, good.uniprotIDs$uniprotID[i], ".", pfam, ".self", "/training.csv")) + write.csv(GO.itan.testing, paste0(split.dir, good.uniprotIDs$uniprotID[i], ".", pfam, ".self", "/testing.csv")) + good.uniprotIDs.df <- rbind(good.uniprotIDs.df, + data.frame(dataID=paste0(good.uniprotIDs$uniprotID[i], ".", pfam, ".self"), + uniprotID=paste0(good.uniprotIDs$uniprotID[i]), + pfam=pfam, + gof.training=sum(gene.itan.domain.training$score==1), + lof.training=sum(gene.itan.domain.training$score==-1), + gof.testing=sum(GO.itan.testing$score==1), + lof.testing=sum(GO.itan.testing$score==-1), + seed=seed)) + # next concatenate and shuffle + GO.itan.training <- dplyr::bind_rows(gene.itan.domain.training, GO.itan.training) + GO.itan.training <- GO.itan.training[sample(dim(GO.itan.training)[1]),] + GO.itan.testing <- GO.itan.testing[sample(dim(GO.itan.testing)[1]),] + # save the training files + dir.create(paste0(split.dir, good.uniprotIDs$uniprotID[i], '.', pfam, ".", pfam)) + write.csv(GO.itan.training, paste0(split.dir, good.uniprotIDs$uniprotID[i], ".", pfam, ".", pfam, "/training.csv")) + write.csv(GO.itan.testing, paste0(split.dir, good.uniprotIDs$uniprotID[i], ".", pfam, ".", pfam, "/testing.csv")) + + good.uniprotIDs.df <- rbind(good.uniprotIDs.df, + data.frame(dataID=paste0(good.uniprotIDs$uniprotID[i], ".", pfam, ".", pfam), + uniprotID=paste0(good.uniprotIDs$uniprotID[i]), + pfam=pfam, + gof.training=sum(GO.itan.training$score==1), + lof.training=sum(GO.itan.training$score==-1), + gof.testing=sum(GO.itan.testing$score==1), + lof.testing=sum(GO.itan.testing$score==-1), + seed=seed)) + } + } + } + } +} +entry.list <- read.delim('/share/vault/Users/gz2294/Data/Protein/InterPro/entry.list') +good.uniprotIDs.df$name <- entry.list$ENTRY_NAME[match(good.uniprotIDs.df$pfam, entry.list$ENTRY_AC)] +write.csv(good.uniprotIDs.df, file = "good.uniprotIDs.InterPros.csv") diff --git a/preprocess.separate.gene.R b/preprocess.separate.gene.R new file mode 100644 index 0000000000000000000000000000000000000000..c9b6fdb37380ef5c1d32e5389274f5d6a6da7437 --- /dev/null +++ b/preprocess.separate.gene.R @@ -0,0 +1,63 @@ +source('../analysis/prepare.biochem.R') +ALL <- read.csv('../analysis/figs/ALL.csv', row.names = 1) +ALL$score.label <- NULL +gof.lof.df <- data.frame(uniprotIDs=as.character(unique(unlist(strsplit(ALL$uniprotID, split = ";")))), gof=0, lof=0) +for (i in 1:dim(gof.lof.df)[1]) { + gene <- ALL[grep(gof.lof.df$uniprotIDs[i], ALL$uniprotID),] + gof.lof.df$gof[i] <- sum(gene$score==1) + gof.lof.df$lof[i] <- sum(gene$score==-1) +} + +good.uniprotIDs <- gof.lof.df[gof.lof.df$gof >= 15 & gof.lof.df$lof >= 15, ] + +# split by random +ratio <- 0.8 +good.uniprotIDs$gof.training <- NA +good.uniprotIDs$gof.testing <- NA +good.uniprotIDs$lof.training <- NA +good.uniprotIDs$lof.testing <- NA +for (seed in 0:4) { + split.dir <- paste0('ICC.seed.', seed, '/') + dir.create(split.dir) + for (i in 1:dim(good.uniprotIDs)[1]) { + gene <- ALL[grep(good.uniprotIDs$uniprotIDs[i], ALL$uniprotID),] + gene.gof <- gene[gene$score == 1,] + gene.lof <- gene[gene$score == -1,] + set.seed(seed) + # pick ratio% of variants as training + if (floor(dim(gene.gof)[1] * ratio) > 0 & + floor(dim(gene.lof)[1] * ratio) > 0) { + gene.gof.training <- sample(dim(gene.gof)[1], floor(dim(gene.gof)[1] * ratio)) + gene.lof.training <- sample(dim(gene.lof)[1], floor(dim(gene.lof)[1] * ratio)) + gene.training <- rbind(gene.gof[gene.gof.training,], gene.lof[gene.lof.training,]) + gene.testing <- rbind(gene.gof[-gene.gof.training,], gene.lof[-gene.lof.training,]) + + gene.training <- gene.training[sample(dim(gene.training)[1]),] + gene.testing <- gene.testing[sample(dim(gene.testing)[1]),] + + dir.create(paste0(split.dir, good.uniprotIDs$uniprotIDs[i])) + + uid <- good.uniprotIDs$uniprotIDs[i] + if (uid == 'Q14524') { + uid <- 'Q14524.clean' + } + + if (!file.exists(paste0(split.dir, uid, "/training.csv")) | uid=='Q99250') { + print(uid) + write.csv(gene.training, paste0(split.dir, uid, "/training.csv")) + } + if (!file.exists(paste0(split.dir, uid, "/testing.csv")) | uid=='Q99250') { + print(uid) + write.csv(gene.testing, paste0(split.dir, uid, "/testing.csv")) + } + + good.uniprotIDs$gof.training[i] <- sum(tmp.training$score==1) + good.uniprotIDs$lof.training[i] <- sum(tmp.training$score==-1) + + good.uniprotIDs$gof.testing[i] <- sum(tmp.testing$score==1) + good.uniprotIDs$lof.testing[i] <- sum(tmp.testing$score==-1) + + } + } +} +write.csv(good.uniprotIDs, file = "sup.data.1.csv") \ No newline at end of file diff --git a/preprocess.separate.gene.itan.R b/preprocess.separate.gene.itan.R new file mode 100644 index 0000000000000000000000000000000000000000..1ee2ffad732109bd1c4fb1fc06ea2ad5c95fbe5c --- /dev/null +++ b/preprocess.separate.gene.itan.R @@ -0,0 +1,258 @@ +# sum the variant number of uniprotIDs in the dataset +library(ggplot2) +source('~/Pipeline/uniprot.table.add.annotation.R') +ALL <- read.csv('ALL.csv', row.names = 1) +ALL$score[ALL$score == 0] <- -1 +# remove glazer +ALL <- ALL[ALL$data_source != "glazer",] +ALL$score.label <- NULL +good.uniprotIDs <- data.frame( + uniprotIDs=c("P15056", "P21802", "P07949", + "P04637", "Q09428", "O00555", + "Q14654", "Q99250")) + +core.columns <- c("uniprotID", "ref", "alt", "pos.orig", "ENST", "wt.orig", "score") +for (i in 1:dim(good.uniprotIDs)[1]) { + GO <- ALL[grep(good.uniprotIDs$uniprotIDs[i], ALL$uniprotID),] + print(table(GO$score[!grepl('Itan', GO$data_source)])) +} + +itan.aucs.1 <- c() +itan.aucs.2 <- c() +# split by random, add beni +ratio <- 0.25 +good.uniprotIDs$gof.training <- NA +good.uniprotIDs$gof.testing <- NA +good.uniprotIDs$lof.training <- NA +good.uniprotIDs$lof.testing <- NA +for (seed in 0:4) { + split.dir <- paste0('ICC.seed.', seed, '/') + dir.create(split.dir) + for (i in 1:dim(good.uniprotIDs)[1]) { + GO <- ALL[grep(good.uniprotIDs$uniprotIDs[i], ALL$uniprotID),] + GO.gof <- GO[GO$score == 1,] + GO.lof <- GO[GO$score == -1,] + # split variants from GO and other + GO.non.itan.gof <- GO.gof[!grepl('Itan', GO.gof$data_source),] + GO.non.itan.lof <- GO.lof[!grepl('Itan', GO.lof$data_source),] + GO.itan.gof <- GO.gof[grepl('Itan', GO.gof$data_source),] + GO.itan.lof <- GO.lof[grepl('Itan', GO.lof$data_source),] + + set.seed(seed) + # pick ratio% of variants as training + if (floor(dim(GO.gof)[1] * ratio) > 0 & + floor(dim(GO.lof)[1] * ratio) > 0) { + GO.gof.testing <- sample(dim(GO.non.itan.gof)[1], min(dim(GO.non.itan.gof)[1], floor(dim(GO.gof)[1] * ratio))) + GO.lof.testing <- sample(dim(GO.non.itan.lof)[1], min(dim(GO.non.itan.lof)[1], floor(dim(GO.lof)[1] * ratio))) + GO.training <- rbind(GO.itan.gof, + GO.itan.lof, + GO.non.itan.gof[-GO.gof.testing,], + GO.non.itan.lof[-GO.lof.testing,]) + GO.testing <- rbind(GO.non.itan.gof[GO.gof.testing,], + GO.non.itan.lof[GO.lof.testing,]) + + GO.training <- GO.training[sample(dim(GO.training)[1]),] + GO.testing <- GO.testing[sample(dim(GO.testing)[1]),] + # beni.training.beni <- beni.training[beni.training$score==0,] + if (sum(is.na(GO.testing$itan.beni)) > 0) { + print(good.uniprotIDs$uniprotIDs[i]) + print(seed) + } + if (dim(GO.testing)[1] > 0) { + # print(paste0(good.uniprotIDs$uniprotIDs[i], ":", seed)) + itan.aucs.1 <- c(itan.aucs.1, plot.AUC(GO.testing$score, 1-GO.testing$itan.gof)$auc) + itan.aucs.2 <- c(itan.aucs.2, plot.AUC(GO.testing$score, GO.testing$itan.lof/GO.testing$itan.gof)$auc) + } else { + itan.aucs.1 <- c(itan.aucs.1, NA) + itan.aucs.2 <- c(itan.aucs.2, NA) + } + dir.create(paste0(split.dir, good.uniprotIDs$uniprotIDs[i], '.itan.split')) + write.csv(GO.training, paste0(split.dir, good.uniprotIDs$uniprotIDs[i], ".itan.split/training.csv")) + # write.csv(beni.training.beni, paste0(split.dir, good.uniprotIDs$uniprotIDs[i], ".chps/beni.csv")) + write.csv(GO.testing, paste0(split.dir, good.uniprotIDs$uniprotIDs[i], ".itan.split/testing.csv")) + + good.uniprotIDs$gof.training[i] <- sum(GO.training$score==1) + good.uniprotIDs$lof.training[i] <- sum(GO.training$score==-1) + good.uniprotIDs$beni.training[i] <- sum(GO.training$score==0) + good.uniprotIDs$patho.training[i] <- sum(GO.training$score==3) + + good.uniprotIDs$gof.testing[i] <- sum(GO.testing$score==1) + good.uniprotIDs$lof.testing[i] <- sum(GO.testing$score==-1) + good.uniprotIDs$beni.testing[i] <- sum(GO.testing$score==0) + good.uniprotIDs$patho.testing[i] <- sum(GO.testing$score==3) + + } + } +} +write.csv(good.uniprotIDs, file = "good.uniprotIDs.itan.csv") + +# # split by random, add beni +ratio <- 0.25 +# good.uniprotIDs$gof.training <- NA +# good.uniprotIDs$gof.testing <- NA +# good.uniprotIDs$lof.training <- NA +# good.uniprotIDs$lof.testing <- NA +# source('~/Pipeline/AUROC.R') +# +# for (seed in 0:4) { +# if (seed == 0) { +# split.dir <- paste0('pfams.add.beni.', 0.8, '/') +# } else { +# split.dir <- paste0('pfams.add.beni.', 0.8, '.seed.', seed, '/') +# } +# dir.create(split.dir) +# for (i in 1:dim(good.uniprotIDs)[1]) { +# GO <- ALL[grep(good.uniprotIDs$uniprotIDs[i], ALL$uniprotID),] +# GO.gof <- GO[GO$score == 1,] +# GO.lof <- GO[GO$score == -1,] +# # split variants from GO and other +# non.itan.gof.idx <- which(GO.gof$data_source != 'Itan' & !is.na(GO.gof$itan.beni)) +# non.itan.lof.idx <- which(GO.lof$data_source != 'Itan' & !is.na(GO.lof$itan.beni)) +# +# GO.non.itan.gof <- GO.gof[non.itan.gof.idx,] +# GO.non.itan.lof <- GO.lof[non.itan.lof.idx,] +# GO.itan.gof <- GO.gof[-non.itan.gof.idx,] +# GO.itan.lof <- GO.lof[-non.itan.lof.idx,] +# +# set.seed(seed) +# # pick ratio% of variants as training +# if (floor(dim(GO.gof)[1] * ratio) > 0 & +# floor(dim(GO.lof)[1] * ratio) > 0) { +# GO.gof.testing <- sample(dim(GO.non.itan.gof)[1], min(dim(GO.non.itan.gof)[1], floor(dim(GO.gof)[1] * ratio))) +# GO.lof.testing <- sample(dim(GO.non.itan.lof)[1], min(dim(GO.non.itan.lof)[1], floor(dim(GO.lof)[1] * ratio))) +# GO.training <- rbind(GO.itan.gof, +# GO.itan.lof, +# GO.non.itan.gof[-GO.gof.testing,], +# GO.non.itan.lof[-GO.lof.testing,]) +# GO.testing <- rbind(GO.non.itan.gof[GO.gof.testing,], +# GO.non.itan.lof[GO.lof.testing,]) +# +# GO.training <- GO.training[sample(dim(GO.training)[1]),] +# GO.testing <- GO.testing[sample(dim(GO.testing)[1]),] +# # beni.training.beni <- beni.training[beni.training$score==0,] +# if (dim(GO.testing)[1] > 0) { +# print(paste0(good.uniprotIDs$uniprotIDs[i], ":", seed)) +# itan.aucs.1 <- c(itan.aucs.1, plot.AUC(GO.testing$score, GO.testing$itan.gof)$auc) +# itan.aucs.2 <- c(itan.aucs.2, plot.AUC(GO.testing$score, GO.testing$itan.gof/GO.testing$itan.lof)$auc) +# } else { +# itan.aucs.1 <- c(itan.aucs.1, NA) +# itan.aucs.2 <- c(itan.aucs.2, NA) +# } +# dir.create(paste0(split.dir, good.uniprotIDs$uniprotIDs[i], '.itan.split.clean')) +# write.csv(GO.training, paste0(split.dir, good.uniprotIDs$uniprotIDs[i], ".itan.split.clean/training.csv")) +# # write.csv(beni.training.beni, paste0(split.dir, good.uniprotIDs$uniprotIDs[i], ".chps/beni.csv")) +# write.csv(GO.testing, paste0(split.dir, good.uniprotIDs$uniprotIDs[i], ".itan.split.clean/testing.csv")) +# +# good.uniprotIDs$gof.training[i] <- sum(GO.training$score==1) +# good.uniprotIDs$lof.training[i] <- sum(GO.training$score==-1) +# good.uniprotIDs$beni.training[i] <- sum(GO.training$score==0) +# good.uniprotIDs$patho.training[i] <- sum(GO.training$score==3) +# +# good.uniprotIDs$gof.testing[i] <- sum(GO.testing$score==1) +# good.uniprotIDs$lof.testing[i] <- sum(GO.testing$score==-1) +# good.uniprotIDs$beni.testing[i] <- sum(GO.testing$score==0) +# good.uniprotIDs$patho.testing[i] <- sum(GO.testing$score==3) +# +# } +# } +# } +# write.csv(good.uniprotIDs, file = "good.uniprotIDs.itan.clean.csv") + +# for SCN5A, remove glazer +for (seed in 0:4) { + if (seed == 0) { + split.dir <- paste0('pfams.add.beni.', 0.8, '/') + } else { + split.dir <- paste0('pfams.add.beni.', 0.8, '.seed.', seed, '/') + } + dir.create(split.dir) + GO <- ALL[grep('Q14524', ALL$uniprotID),] + GO <- GO[GO$data_source != 'glazer',] + GO.gof <- GO[GO$score == 1,] + GO.lof <- GO[GO$score == -1,] + # split variants from GO and other + non.itan.gof.idx <- which(!grepl('Itan', GO.gof$data_source) & !is.na(GO.gof$itan.beni)) + non.itan.lof.idx <- which(!grepl('Itan', GO.lof$data_source) & !is.na(GO.lof$itan.beni)) + + GO.non.itan.gof <- GO.gof[non.itan.gof.idx,] + GO.non.itan.lof <- GO.lof[non.itan.lof.idx,] + GO.itan.gof <- GO.gof[-non.itan.gof.idx,] + GO.itan.lof <- GO.lof[-non.itan.lof.idx,] + + set.seed(seed) + # pick ratio% of variants as training + if (floor(dim(GO.gof)[1] * ratio) > 0 & + floor(dim(GO.lof)[1] * ratio) > 0) { + GO.gof.testing <- sample(dim(GO.non.itan.gof)[1], min(dim(GO.non.itan.gof)[1], floor(dim(GO.gof)[1] * ratio))) + GO.lof.testing <- sample(dim(GO.non.itan.lof)[1], min(dim(GO.non.itan.lof)[1], floor(dim(GO.lof)[1] * ratio))) + GO.training <- rbind(GO.itan.gof, + GO.itan.lof, + GO.non.itan.gof[-GO.gof.testing,], + GO.non.itan.lof[-GO.lof.testing,]) + GO.testing <- rbind(GO.non.itan.gof[GO.gof.testing,], + GO.non.itan.lof[GO.lof.testing,]) + + GO.training <- GO.training[sample(dim(GO.training)[1]),] + GO.testing <- GO.testing[sample(dim(GO.testing)[1]),] + # beni.training.beni <- beni.training[beni.training$score==0,] + if (dim(GO.testing)[1] > 0) { + # print(paste0(good.uniprotIDs$uniprotIDs[i], ":", seed)) + itan.aucs.1 <- c(itan.aucs.1, plot.AUC(GO.testing$score, GO.testing$itan.gof)$auc) + itan.aucs.2 <- c(itan.aucs.2, plot.AUC(GO.testing$score, GO.testing$itan.gof/GO.testing$itan.lof)$auc) + } else { + itan.aucs.1 <- c(itan.aucs.1, NA) + itan.aucs.2 <- c(itan.aucs.2, NA) + } + dir.create(paste0(split.dir, 'Q14524', '.clean.itan.split')) + write.csv(GO.training, paste0(split.dir, 'Q14524', ".clean.itan.split/training.csv")) + # write.csv(beni.training.beni, paste0(split.dir, 'Q14524', ".chps/beni.csv")) + write.csv(GO.testing, paste0(split.dir, 'Q14524', ".clean.itan.split/testing.csv")) + } +} + +# for SCN5A, remove glazer, don't do itan split, just split +ratio <- 0.8 +for (seed in 0:4) { + if (seed == 0) { + split.dir <- paste0('pfams.add.beni.', 0.8, '/') + } else { + split.dir <- paste0('pfams.add.beni.', 0.8, '.seed.', seed, '/') + } + dir.create(split.dir) + GO <- ALL[grep('Q14524', ALL$uniprotID),] + GO.itan <- GO[GO$data_source != 'glazer',] + GO.itan.gof <- GO.itan[GO.itan$score == 1,] + GO.itan.lof <- GO.itan[GO.itan$score == -1,] + set.seed(seed) + # pick ratio% of variants as training + if (floor(dim(GO.itan.gof)[1] * ratio) > 0 & + floor(dim(GO.itan.lof)[1] * ratio) > 0) { + GO.itan.gof.training <- sample(dim(GO.itan.gof)[1], floor(dim(GO.itan.gof)[1] * ratio)) + GO.itan.lof.training <- sample(dim(GO.itan.lof)[1], floor(dim(GO.itan.lof)[1] * ratio)) + GO.itan.training <- rbind(GO.itan.gof[GO.itan.gof.training,], + GO.itan.lof[GO.itan.lof.training,]) + GO.itan.testing <- rbind(GO.itan.gof[-GO.itan.gof.training,], + GO.itan.lof[-GO.itan.lof.training,]) + GO.itan.training <- GO.itan.training[sample(dim(GO.itan.training)[1]),] + GO.itan.testing <- GO.itan.testing[sample(dim(GO.itan.testing)[1]),] + + dir.create(paste0(split.dir, 'Q14524', '.clean')) + write.csv(GO.itan.training, paste0(split.dir, 'Q14524', ".clean/training.csv")) + # write.csv(beni.training.beni, paste0(split.dir, 'Q14524', ".chps/beni.csv")) + write.csv(GO.itan.testing, paste0(split.dir, 'Q14524', ".clean/testing.csv")) + } +} + +# # for FGFR2, further clean data, only use CKB validated as testing +# fgfr2.check <- read.csv('fgfr2.check.csv') +# fgfr2.check$uniprotID <- 'P21802' +# source('/share/vault/Users/gz2294/Pipeline/dnv.table.to.uniprot.R') +# fgfr2.check <- dnv.table.to.uniprot.by.af2.uniprotID.parallel(fgfr2.check, 'aaChg', 'score', 'uniprotID', 'aaChg') +# source('/share/vault/Users/gz2294/Pipeline/uniprot.table.add.annotation.R') +# fgfr2.check <- uniprot.table.add.annotation.parallel(fgfr2.check$result.noNA, 'Itan') +# + + + + + diff --git a/preprocess.separate.gene.subset.R b/preprocess.separate.gene.subset.R new file mode 100644 index 0000000000000000000000000000000000000000..bf7912b607351a8049c76786ed7db76e7d9c9a50 --- /dev/null +++ b/preprocess.separate.gene.subset.R @@ -0,0 +1,52 @@ +good.uniprotIDs <- c('P15056', 'P21802', 'P07949', 'P04637', 'Q09428', 'O00555', 'Q14654', 'Q99250', 'Q14524.clean') +# split by random, add beni +good.uniprotIDs.df <- data.frame() +for (seed in 0:4) { + split.dir <- paste0('ICC.seed.', seed, '/') + ratios <- c(1, 2, 4, 6) + dir.create(split.dir) + for (i in 1:length(good.uniprotIDs)) { + GO.itan.training <- read.csv(paste0('ICC.seed.', 0, '/', good.uniprotIDs[i], '/training.csv')) + + GO.itan.testing <- read.csv(paste0('ICC.seed.', 0 ,'/', good.uniprotIDs[i], '/testing.csv')) + GO.itan.gof <- GO.itan.training[GO.itan.training$score==1,] + GO.itan.lof <- GO.itan.training[GO.itan.training$score==-1,] + set.seed(seed) + # pick ratio% of variants as training + for (ratio in ratios) { + if (floor(dim(GO.itan.gof)[1] * ratio/8) > 0 & + floor(dim(GO.itan.lof)[1] * ratio/8) > 0) { + GO.itan.gof.training <- sample(dim(GO.itan.gof)[1], ceiling(dim(GO.itan.gof)[1] * ratio/8)) + GO.itan.lof.training <- sample(dim(GO.itan.lof)[1], ceiling(dim(GO.itan.lof)[1] * ratio/8)) + GO.itan.training <- rbind(GO.itan.gof[GO.itan.gof.training,], + GO.itan.lof[GO.itan.lof.training,]) + GO.itan.training$split <- 'train' + + GO.itan.training <- GO.itan.training[sample(dim(GO.itan.training)[1]),] + GO.itan.testing <- GO.itan.testing[sample(dim(GO.itan.testing)[1]),] + + dir.create(paste0(split.dir, good.uniprotIDs[i], '.subset2.', ratio)) + if (!file.exists(paste0(split.dir, good.uniprotIDs[i], '.subset2.', ratio, "/training.csv"))) { + print(good.uniprotIDs[i]) + write.csv(GO.itan.training, paste0(split.dir, good.uniprotIDs[i], '.subset2.', ratio, "/training.csv")) + } + if (!file.exists(paste0(split.dir, good.uniprotIDs[i], '.subset2.', ratio, "/testing.csv"))) { + print(good.uniprotIDs[i]) + write.csv(GO.itan.testing, paste0(split.dir, good.uniprotIDs[i], '.subset2.', ratio, "/testing.csv")) + } + good.uniprotIDs.df <- rbind(good.uniprotIDs.df, + data.frame(gene=good.uniprotIDs[i], + ratio=ratio, + seed=seed, + gof.training = sum(GO.itan.training$score==1 & GO.itan.training$split=='train'), + lof.training = sum(GO.itan.training$score==-1 & GO.itan.training$split=='train'), + gof.val = sum(GO.itan.training$score==1 & GO.itan.training$split=='val'), + lof.val = sum(GO.itan.training$score==-1 & GO.itan.training$split=='val'), + gof.testing = sum(GO.itan.testing$score==1), + lof.testing = sum(GO.itan.testing$score==-1))) + } + } + } +} + +write.csv(good.uniprotIDs.df, file = "good.uniprotIDs.subsets.csv") diff --git a/pretrain.tgz.part-aa b/pretrain.tgz.part-aa new file mode 100644 index 0000000000000000000000000000000000000000..4144f8c9f80180c3c03038d22ec5e0deae3a7950 --- /dev/null +++ b/pretrain.tgz.part-aa @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d4f19e4fca0765d0ede2c93de380d0134c14c906ce3b75232893727c1ba0f9b +size 894753652 diff --git a/pretrain/testing.csv.gz b/pretrain/testing.csv.gz new file mode 100644 index 0000000000000000000000000000000000000000..f45a67e98de5a3de2d194be221255a484bcba5f1 --- /dev/null +++ b/pretrain/testing.csv.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ce066487c65d6a810759ed7087960959bc860e45409cdb2e8b946bdb37173f1 +size 1138034 diff --git a/pretrain/training.0.csv.gz b/pretrain/training.0.csv.gz new file mode 100644 index 0000000000000000000000000000000000000000..cc0fea2c81f85c8b6311a131c26cb4f0ea71d207 --- /dev/null +++ b/pretrain/training.0.csv.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d1bfe529668cf8706daae5b69b602775add2787b7f88ad367810e3d08c97b53 +size 24972339 diff --git a/pretrain/training.1.csv.gz b/pretrain/training.1.csv.gz new file mode 100644 index 0000000000000000000000000000000000000000..e11a3621180c7ce511baa979bb65595ceff7e075 --- /dev/null +++ b/pretrain/training.1.csv.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4918e5ba8c2e4ccb27505a6517b32b228ccf4eacce4d4052c7562c80ce90245 +size 22754591 diff --git a/pretrain/training.2.csv.gz b/pretrain/training.2.csv.gz new file mode 100644 index 0000000000000000000000000000000000000000..40847d3ee3d800a314b2f98ac6bb1f2e22e4e773 --- /dev/null +++ b/pretrain/training.2.csv.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfcc1224356ae0e684d1d8f64f4b24ad3d78b552f158f16e009bf2e30c7681e7 +size 22809905 diff --git a/pretrain/training.3.csv.gz b/pretrain/training.3.csv.gz new file mode 100644 index 0000000000000000000000000000000000000000..35cc181f96cd9e7890248e249c886b739aa22c39 --- /dev/null +++ b/pretrain/training.3.csv.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dec4591c575c60228349b2eac512afb67513bb5d7e78a4dc22f401192bb1b84c +size 22286420 diff --git a/pretrain/training.csv.gz b/pretrain/training.csv.gz new file mode 100644 index 0000000000000000000000000000000000000000..f4efc1d3487c40fc559995bd3de63b6dac746218 --- /dev/null +++ b/pretrain/training.csv.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc50e2d1946267babc3ccb267374d13196c1a145ff58476b5d33f9968e80f7da +size 93760937 diff --git a/ptm.small.csv.gz b/ptm.small.csv.gz new file mode 100644 index 0000000000000000000000000000000000000000..99868fe8d0922d0279c873810ae523b23f48d1d4 --- /dev/null +++ b/ptm.small.csv.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1b4d1e38fe5fe797dd5189751869335e6ea3f2dfc6adeda91df3fa14b835184 +size 2419426 diff --git a/revision.all.glof.itan.split.R b/revision.all.glof.itan.split.R new file mode 100644 index 0000000000000000000000000000000000000000..0fca0d1da69c8f1776aff74acfb4e8d7562e8bb3 --- /dev/null +++ b/revision.all.glof.itan.split.R @@ -0,0 +1,67 @@ +icc.all <- read.csv(gzfile('../analysis/figs/ALL.csv.gz'), row.names = 1) +icc.all$score.label <- NULL +# do random split in all genes, keep it the same as seed 0 1 2 3 4 results +nine.genes <- read.csv('../scripts/gene.itan.txt', header = F)$V1 +icc.no.nine.genes <- icc.all[!icc.all$uniprotID %in% gsub('.clean.itan.split|.itan.split', '', nine.genes),] +my.bind.rows <- function (df1, df2) { + for (c in colnames(df1)[colnames(df1) %in% colnames(df2)]) { + if(typeof(df1[,c])!=typeof(df2[,c])) { + df1[,c] <- as.character(df1[,c]) + df2[,c] <- as.character(df2[,c]) + } + } + result <- dplyr::bind_rows(df1, df2) + result +} +for (seed in 0:4) { + # for other genes in icc, split by 80-20 for each gene randomly + training <- c() + testing <- c() + for (uid in unique(icc.no.nine.genes$uniprotID)) { + uid.gof <- which(icc.no.nine.genes$uniprotID==uid & icc.no.nine.genes$score==1) + uid.lof <- which(icc.no.nine.genes$uniprotID==uid & icc.no.nine.genes$score==-1) + set.seed(seed) + if (length(uid.gof) > 0) { + uid.gof.trn <- sample(uid.gof, floor(length(uid.gof)*0.8)) + uid.gof.tst <- uid.gof[!uid.gof %in% uid.gof.trn] + } else { + uid.gof.trn <- c() + uid.gof.tst <- c() + } + if (length(uid.lof) > 0) { + uid.lof.trn <- sample(uid.lof, length(uid.lof)*0.8) + uid.lof.tst <- uid.lof[!uid.lof %in% uid.lof.trn] + } else { + uid.lof.trn <- c() + uid.lof.tst <- c() + } + training <- c(training, uid.gof.trn, uid.lof.trn) + testing <- c(testing, uid.gof.tst, uid.lof.tst) + } + # aggregate results + icc.train <- icc.no.nine.genes[training,] + icc.test <- icc.no.nine.genes[testing,] + for (uid in nine.genes) { + icc.train <- my.bind.rows(icc.train, read.csv(paste0('ICC.seed.', seed, '/', uid, '/training.csv'), row.names = 1)) + icc.test <- my.bind.rows(icc.test, read.csv(paste0('ICC.seed.', seed, '/', uid, '/testing.csv'), row.names = 1)) + } + set.seed(seed) + # shuffle + icc.train <- icc.train[sample(dim(icc.train)[1]),] + icc.test <- icc.test[sample(dim(icc.test)[1]),] + # write + dir.create(paste0('ICC.seed.', seed, '/ALL.itan/'), showWarnings = F) + # drop score.labels column + icc.train$score.label <- NULL + icc.test$score.label <- NULL + write.csv(icc.train, paste0('ICC.seed.', seed, '/ALL.itan/training.csv')) + write.csv(icc.test, paste0('ICC.seed.', seed, '/ALL.itan/testing.csv')) +} +# do itan and no itan +icc.noitan <- icc.all[!grepl('Itan', icc.all$data_source),] +icc.itan <- icc.all[grepl('Itan', icc.all$data_source),] +dir.create(paste0('ICC.seed.', 0, '/ALL.itan.only/')) +write.csv(icc.noitan, paste0('ICC.seed.', 0, '/ALL.itan.only/testing.csv')) +write.csv(icc.itan, paste0('ICC.seed.', 0, '/ALL.itan.only/training.csv')) + + diff --git a/revision.all.glof.split.R b/revision.all.glof.split.R new file mode 100644 index 0000000000000000000000000000000000000000..a1c3d8ec4af8cc43430a6e2437cac7c32af6a660 --- /dev/null +++ b/revision.all.glof.split.R @@ -0,0 +1,58 @@ +icc.all <- read.csv(gzfile('../analysis/figs/ALL.csv.gz'), row.names = 1) +# do random split in all genes, keep it the same as seed 0 1 2 3 4 results +nine.genes <- read.csv('../scripts/gene.txt', header = F)$V1 +icc.no.nine.genes <- icc.all[!icc.all$uniprotID %in% gsub('.clean', '', nine.genes),] +my.bind.rows <- function (df1, df2) { + for (c in colnames(df1)[colnames(df1) %in% colnames(df2)]) { + if(typeof(df1[,c])!=typeof(df2[,c])) { + df1[,c] <- as.character(df1[,c]) + df2[,c] <- as.character(df2[,c]) + } + } + result <- dplyr::bind_rows(df1, df2) + result +} +for (seed in 0:4) { + # for other genes in icc, split by 80-20 for each gene randomly + training <- c() + testing <- c() + for (uid in unique(icc.no.nine.genes$uniprotID)) { + uid.gof <- which(icc.no.nine.genes$uniprotID==uid & icc.no.nine.genes$score==1) + uid.lof <- which(icc.no.nine.genes$uniprotID==uid & icc.no.nine.genes$score==-1) + set.seed(seed) + if (length(uid.gof) > 0) { + uid.gof.trn <- sample(uid.gof, floor(length(uid.gof)*0.8)) + uid.gof.tst <- uid.gof[!uid.gof %in% uid.gof.trn] + } else { + uid.gof.trn <- c() + uid.gof.tst <- c() + } + if (length(uid.lof) > 0) { + uid.lof.trn <- sample(uid.lof, length(uid.lof)*0.8) + uid.lof.tst <- uid.lof[!uid.lof %in% uid.lof.trn] + } else { + uid.lof.trn <- c() + uid.lof.tst <- c() + } + training <- c(training, uid.gof.trn, uid.lof.trn) + testing <- c(testing, uid.gof.tst, uid.lof.tst) + } + # aggregate results + icc.train <- icc.no.nine.genes[training,] + icc.test <- icc.no.nine.genes[testing,] + for (uid in nine.genes) { + icc.train <- my.bind.rows(icc.train, read.csv(paste0('ICC.seed.', seed, '/', uid, '/training.csv'), row.names = 1)) + icc.test <- my.bind.rows(icc.test, read.csv(paste0('ICC.seed.', seed, '/', uid, '/testing.csv'), row.names = 1)) + } + set.seed(seed) + # shuffle + icc.train <- icc.train[sample(dim(icc.train)[1]),] + icc.test <- icc.test[sample(dim(icc.test)[1]),] + # write + dir.create(paste0('ICC.seed.', seed, '/ALL/'), showWarnings = F) + # drop score.labels column + icc.train$score.label <- NULL + icc.test$score.label <- NULL + write.csv(icc.train, paste0('ICC.seed.', seed, '/ALL/training.csv')) + write.csv(icc.test, paste0('ICC.seed.', seed, '/ALL/testing.csv')) +} diff --git a/revision.by.domain.split.no.self.R b/revision.by.domain.split.no.self.R new file mode 100644 index 0000000000000000000000000000000000000000..81e56ce114ec8c76cc6c12aef7fa8aa81e67e4f9 --- /dev/null +++ b/revision.by.domain.split.no.self.R @@ -0,0 +1,19 @@ +# revision, split by domains, but only use variants from other proteins to train the model +gene.pfams <- read.csv('../scripts/gene.pfams.txt', header = F)$V1 +gene.pfams <- gene.pfams[grep('self', gene.pfams, invert = T)] +# for gene.pfams, remove the gene itself +for (uid.pfam in gene.pfams) { + uid <- strsplit(uid.pfam, '\\.')[[1]][1] + for (seed in 0:4) { + uid.pfam.trn <-read.csv(paste0('ICC.seed.', seed, '/', uid.pfam, '/training.csv'), row.names = 1) + uid.pfam.tst <- read.csv(paste0('ICC.seed.', seed, '/', uid.pfam, '/testing.csv'), row.names = 1) + # remove uid in uid.pfam.trn + uid.pfam.trn <- uid.pfam.trn[uid.pfam.trn$uniprotID!=uid,] + # make directory + dir.create(paste0('ICC.seed.', seed, '/', uid.pfam, '.noself/'), showWarnings = F) + write.csv(uid.pfam.trn, paste0('ICC.seed.', seed, '/', uid.pfam, '.noself/training.csv')) + system(paste0('ln -s ', + 'ICC.seed.', seed, '/', uid.pfam, '/testing.csv ', + 'ICC.seed.', seed, '/', uid.pfam, '.noself/testing.csv')) + } +} \ No newline at end of file diff --git a/revision.by.position.split.R b/revision.by.position.split.R new file mode 100644 index 0000000000000000000000000000000000000000..92275a220e7eda08661f57591d1094fd21b57c5c --- /dev/null +++ b/revision.by.position.split.R @@ -0,0 +1,57 @@ +# check performance by training / testing split from the same position to either training / testing +nine.genes <- read.csv('../scripts/gene.txt', header = F)$V1 +my.bind.rows <- function (df1, df2) { + for (c in colnames(df1)[colnames(df1) %in% colnames(df2)]) { + if(typeof(df1[,c])!=typeof(df2[,c])) { + df1[,c] <- as.character(df1[,c]) + df2[,c] <- as.character(df2[,c]) + } + } + result <- dplyr::bind_rows(df1, df2) + result +} +# split by uniprotid +split.by.position <- function(freq_table, number_to_select, seed) { + set.seed(seed) + selected = 0 + selected_positions = c() + candidates = freq_table[freq_table$Freq <= number_to_select - selected,] + while ((selected < number_to_select) & (dim(candidates)[1] > 0)) { + selected_position = sample(as.character(candidates$Var1), size = 1) + selected_positions <- c(selected_positions, selected_position) + selected = selected + freq_table$Freq[freq_table$Var1 == selected_position] + # update freq_table and candidates + freq_table = freq_table[!freq_table$Var1 %in% selected_position,] + candidates = freq_table[freq_table$Freq <= number_to_select - selected,] + } + result = list(selected_positions, freq_table) + result +} +for (uid in nine.genes) { + for (seed in 0:4) { + icc.train <- read.csv(paste0('ICC.seed.', 0, '/', uid, '/training.csv'), row.names = 1) + icc.test <- read.csv(paste0('ICC.seed.', 0, '/', uid, '/testing.csv'), row.names = 1) + icc <- my.bind.rows(icc.train, icc.test) + # get all uniq positions, for GoF and LoF + icc.gof.pos <- as.data.frame(table(icc$pos.orig[icc$score==1])) + icc.lof.pos <- as.data.frame(table(icc$pos.orig[icc$score==-1])) + # make number to select same as icc.test + icc.gof.pos.splt <- split.by.position(icc.gof.pos, sum(icc.test$score==1), seed) + icc.gof.pos.tst <- icc.gof.pos.splt[[1]] + icc.gof.pos.trn <- icc.gof.pos.splt[[2]]$Var1 + # make number to select same as icc.test + icc.lof.pos.splt <- split.by.position(icc.lof.pos, sum(icc.test$score==-1), seed) + icc.lof.pos.tst <- icc.lof.pos.splt[[1]] + icc.lof.pos.trn <- icc.lof.pos.splt[[2]]$Var1 + # split data + icc.trn <- icc[c(which(icc$score==1 & icc$pos.orig %in% icc.gof.pos.trn), + which(icc$score==-1 & icc$pos.orig %in% icc.lof.pos.trn)),] + icc.tst <- icc[c(which(icc$score==1 & icc$pos.orig %in% icc.gof.pos.tst), + which(icc$score==-1 & icc$pos.orig %in% icc.lof.pos.tst)),] + print(sum(unique(icc.tst$pos.orig[icc.tst$score==1]) %in% unique(icc.trn$pos.orig[icc.trn$score==1]))) + print(sum(unique(icc.tst$pos.orig[icc.tst$score==-1]) %in% unique(icc.trn$pos.orig[icc.trn$score==-1]))) + dir.create(paste0('ICC.seed.', seed, '/', uid, '.split.by.pos/'), showWarnings = F) + write.csv(icc.trn, paste0('ICC.seed.', seed, '/', uid, '.split.by.pos/training.csv')) + write.csv(icc.tst, paste0('ICC.seed.', seed, '/', uid, '.split.by.pos/testing.csv')) + } +} diff --git a/revision.by.position.split.itan.R b/revision.by.position.split.itan.R new file mode 100644 index 0000000000000000000000000000000000000000..4fa40d1d00377fcc890e5cdf458167a287e82e32 --- /dev/null +++ b/revision.by.position.split.itan.R @@ -0,0 +1,60 @@ +# check performance by training / testing split from the same position to either training / testing +nine.genes <- read.csv('../scripts/gene.txt', header = F)$V1 +my.bind.rows <- function (df1, df2) { + for (c in colnames(df1)[colnames(df1) %in% colnames(df2)]) { + if(typeof(df1[,c])!=typeof(df2[,c])) { + df1[,c] <- as.character(df1[,c]) + df2[,c] <- as.character(df2[,c]) + } + } + result <- dplyr::bind_rows(df1, df2) + result +} +# split by uniprotid +split.by.position <- function(freq_table, number_to_select, seed) { + set.seed(seed) + selected = 0 + selected_positions = c() + candidates = freq_table[freq_table$Freq <= number_to_select - selected,] + while ((selected < number_to_select) & (dim(candidates)[1] > 0)) { + selected_position = sample(as.character(candidates$Var1), size = 1) + selected_positions <- c(selected_positions, selected_position) + selected = selected + freq_table$Freq[freq_table$Var1 == selected_position] + # update freq_table and candidates + freq_table = freq_table[!freq_table$Var1 %in% selected_position,] + candidates = freq_table[freq_table$Freq <= number_to_select - selected,] + } + result = list(selected_positions, freq_table) + result +} +for (uid in nine.genes) { + for (seed in 0:4) { + icc.train <- read.csv(paste0('ICC.seed.', 0, '/', uid, '/training.csv'), row.names = 1) + icc.test <- read.csv(paste0('ICC.seed.', 0, '/', uid, '/testing.csv'), row.names = 1) + icc <- my.bind.rows(icc.train, icc.test) + # get all uniq positions, for GoF and LoF + icc.gof.pos <- as.data.frame(table(icc$pos.orig[icc$score==1])) + icc.gof.pos <- icc.gof.pos[!icc.gof.pos$Var1 %in% icc$pos.orig[icc$score==1 & grepl('Itan', icc$data_source)],] + icc.lof.pos <- as.data.frame(table(icc$pos.orig[icc$score==-1])) + icc.lof.pos <- icc.lof.pos[!icc.lof.pos$Var1 %in% icc$pos.orig[icc$score==-1 & grepl('Itan', icc$data_source)],] + # make number to select same as icc.test + icc.gof.pos.splt <- split.by.position(icc.gof.pos, sum(icc.test$score==1), seed) + icc.gof.pos.tst <- which(icc$score==1 & icc$pos.orig %in% icc.gof.pos.splt[[1]]) + icc.gof.pos.trn <- which(icc$score==1 & !icc$pos.orig %in% icc.gof.pos.splt[[1]]) + # make number to select same as icc.test + icc.lof.pos.splt <- split.by.position(icc.lof.pos, sum(icc.test$score==-1), seed) + icc.lof.pos.tst <- which(icc$score==-1 & icc$pos.orig %in% icc.lof.pos.splt[[1]]) + icc.lof.pos.trn <- which(icc$score==-1 & !icc$pos.orig %in% icc.lof.pos.splt[[1]]) + # split data + icc.trn <- icc[c(icc.gof.pos.trn, icc.lof.pos.trn),] + icc.tst <- icc[c(icc.gof.pos.tst, icc.lof.pos.tst),] + print(length(unique(c(icc.gof.pos.tst, icc.gof.pos.trn, icc.lof.pos.tst, icc.lof.pos.trn))) - dim(icc)[1]) + # print(sum(unique(icc.tst$pos.orig[icc.tst$score==1]) %in% unique(icc.trn$pos.orig[icc.trn$score==1]))) + print(sum(grepl('Itan', icc.tst$data_source))) + # print(sum(unique(icc.tst$pos.orig[icc.tst$score==-1]) %in% unique(icc.trn$pos.orig[icc.trn$score==-1]))) + # print(sum(grepl('Itan', icc.trn$data_source))) + dir.create(paste0('ICC.seed.', seed, '/', uid, '.split.by.pos.itan/'), showWarnings = F) + write.csv(icc.trn, paste0('ICC.seed.', seed, '/', uid, '.split.by.pos.itan/training.csv')) + write.csv(icc.tst, paste0('ICC.seed.', seed, '/', uid, '.split.by.pos.itan/testing.csv')) + } +} diff --git a/revision.cpt.split.R b/revision.cpt.split.R new file mode 100644 index 0000000000000000000000000000000000000000..caaf12d562b39627a431357856f7033ba68f1470 --- /dev/null +++ b/revision.cpt.split.R @@ -0,0 +1,73 @@ +icc.all <- read.csv(('../analysis/figs/ALL.csv'), row.names = 1) +icc.all$score.label <- NULL +# do random split in all genes, keep it the same as seed 0 1 2 3 4 results +nine.genes <- read.csv('../scripts/gene.itan.txt', header = F)$V1 +nine.genes <- gsub('.clean.itan.split|.itan.split', '', nine.genes) +icc.no.nine.genes <- icc.all[!icc.all$uniprotID %in% nine.genes,] +# get a table of all genes that have sufficient data +icc.noitan <- icc.all[!grepl('Itan', icc.all$data_source),] +icc.itan <- icc.all[grepl('Itan', icc.all$data_source),] +# check the number of genes +genes <- unique(icc.noitan$uniprotID) +good.genes <- c() +for (uid in genes) { + icc.noitan.gene <- icc.noitan[icc.noitan$uniprotID == uid,] + # check number of gof and lof + n.gof <- sum(icc.noitan.gene$score == 1) + n.lof <- sum(icc.noitan.gene$score == -1) + # add to list if n.gof >= 5 and n.lof >= 5 and not in icc.itan + if (n.gof >= 4 & n.lof >= 4 & !uid %in% icc.itan$uniprotID) { + good.genes <- c(good.genes, uid) + } +} +# take the good genes, make the split +good.genes.train <- icc.all[!icc.all$uniprotID %in% good.genes,] +good.genes.test <- icc.all[icc.all$uniprotID %in% good.genes,] +for (g in good.genes) { + itan.all.g <- icc.all[icc.all$uniprotID == g,] + write.csv(itan.all.g, paste0('ICC.seed.0/ALL.itan.nogeneoverlap/', g, '.csv')) +} +write.csv(good.genes.train, 'ICC.seed.0/ALL.itan.nogeneoverlap/training.csv') +write.csv(good.genes.test, 'ICC.seed.0/ALL.itan.nogeneoverlap/testing.csv') + +good.genes <- c() +for (uid in genes) { + icc.gene <- icc.all[icc.all$uniprotID == uid,] + # check number of gof and lof + n.gof <- sum(icc.gene$score == 1) + n.lof <- sum(icc.gene$score == -1) + # add to list if n.gof >= 5 and n.lof >= 5 and not in icc.itan + if (n.gof >= 10 & n.lof >= 10) { + good.genes <- c(good.genes, uid) + } +} +# take the good genes, make the split +good.genes.train <- icc.all[!icc.all$uniprotID %in% good.genes,] +good.genes.test <- icc.all[icc.all$uniprotID %in% good.genes,] +for (g in good.genes) { + itan.all.g <- icc.all[icc.all$uniprotID == g,] + write.csv(itan.all.g, paste0('ICC.seed.0/ALL.nogeneoverlap/', g, '.csv')) +} +write.csv(good.genes.train, 'ICC.seed.0/ALL.nogeneoverlap/training.csv') +write.csv(good.genes.test, 'ICC.seed.0/ALL.nogeneoverlap/testing.csv') + + +good.genes <- c() +for (uid in genes) { + icc.gene <- icc.all[icc.all$uniprotID == uid,] + # check number of gof and lof + n.gof <- sum(icc.gene$score == 1) + n.lof <- sum(icc.gene$score == -1) + # add to list if n.gof >= 5 and n.lof >= 5 and not in icc.itan + if (n.gof + n.lof >= 10) { + good.genes <- c(good.genes, uid) + } +} +# take the good genes, make the split +good.genes.train <- icc.all[icc.all$uniprotID %in% good.genes & !icc.all$uniprotID %in% nine.genes,] +good.genes.test <- icc.all[icc.all$uniprotID %in% nine.genes,] +write.csv(good.genes.train, 'ICC.seed.0/ALL.nogeneoverlap.large.gene/training.csv') +write.csv(good.genes.test, 'ICC.seed.0/ALL.nogeneoverlap.large.gene/testing.csv') + + + diff --git a/revision.remove.all.transfer.paralog.R b/revision.remove.all.transfer.paralog.R new file mode 100644 index 0000000000000000000000000000000000000000..2ab0ab53340a7ad6d86ad61ca789876b0ef26d36 --- /dev/null +++ b/revision.remove.all.transfer.paralog.R @@ -0,0 +1,74 @@ +# remove all the homologous proteins from pretrain +pretrain <- rbind(read.csv('../archive/data.files/pretrain.orig/training.csv', row.names = 1), + read.csv('../archive/data.files/pretrain.orig/testing.csv', row.names = 1)) +# read homologous data frame +geneid2paralog <- read.csv('pretrain/revision.split.by.paralogues/geneid2paralog.csv', row.names = 1) +uniprot2geneid <- read.csv('pretrain/revision.split.by.paralogues/uniprot2geneid.csv', row.names = 1) +# define the transfer learning tasks +tf.tasks <- read.csv('../scripts/gene.txt', header = F) +tf.uids <- gsub('.clean', '', tf.tasks$V1) +# add DMS uids: PTEN PTEN.bin CCR5 CXCR4 NUDT15 SNCA CYP2C9 GCK ASPA Stab +tf.uids <- c(tf.uids, 'P60484', 'P51681', 'P61073', 'Q9NV35', 'P37840', 'P11712', 'P35557', 'P45381') +stab <- rbind(read.csv('../archive/data.files/Stab/test.seed.0.csv'), + read.csv('../archive/data.files/Stab/train.seed.0.csv')) +tf.uids <- c(tf.uids, unique(stab$uniprotID)) +# add ProteinGym uids +task.dic <- rbind(read.csv('../analysis/single.assays.txt', header = F), read.csv('../analysis/multiple.assays.txt', header = F)) +for (task in task.dic$V1) { + if (file.exists(paste0('../analysis/PreMode/', task, '/', 'testing.fold.0.csv'))) { + tmp <- read.csv(paste0('../analysis/PreMode/', task, '/', 'testing.fold.0.csv')) + tf.uids <- c(tf.uids, tmp$uniprotID[1]) + } +} +tf.uids <- unique(tf.uids) +tf.geneids <- uniprot2geneid$ensembl_gene_id[match(tf.uids, uniprot2geneid$uniprot_gn_id)] +tf.homologues <- geneid2paralog$hsapiens_paralog_ensembl_gene[geneid2paralog$ensembl_gene_id %in% tf.geneids] +to.remove <- unique(c(tf.geneids, tf.homologues)) +to.remove.uids <- uniprot2geneid$uniprot_gn_id[match(to.remove, uniprot2geneid$ensembl_gene_id)] +# remove the uids from pretrain +pretrain <- pretrain[!pretrain$uniprotID %in% to.remove.uids,] +# split into 4 parts +split.by.uniprotID <- function(freq_table, number_to_select) { + set.seed(0) + selected = 0 + selected_uniprotIDs = c() + candidates = freq_table[freq_table$Freq <= number_to_select - selected,] + while ((selected < number_to_select) & (dim(candidates)[1] > 0)) { + selected_uniprotID = sample(as.character(candidates$Var1), size = 1) + selected_uniprotIDs <- c(selected_uniprotIDs, selected_uniprotID) + selected = selected + freq_table$Freq[freq_table$Var1 == selected_uniprotID] + # update freq_table and candidates + freq_table = freq_table[!freq_table$Var1 %in% selected_uniprotID,] + candidates = freq_table[freq_table$Freq <= number_to_select - selected,] + } + result = list(selected_uniprotIDs, freq_table) + result +} + +training_freq_table <- as.data.frame(table(pretrain$uniprotID)) + +quarter.size <- floor(dim(pretrain)[1] / 4) + +tmp <- split.by.uniprotID(freq_table = training_freq_table, quarter.size) +quarter.1 <- tmp[[1]] +left_freq_table <- tmp[[2]] + +tmp <- split.by.uniprotID(freq_table = left_freq_table, quarter.size) +quarter.2 <- tmp[[1]] +left_freq_table <- tmp[[2]] + +tmp <- split.by.uniprotID(freq_table = left_freq_table, quarter.size) +quarter.3 <- tmp[[1]] +left_freq_table <- tmp[[2]] + +tmp <- split.by.uniprotID(freq_table = left_freq_table, dim(pretrain)[1]-quarter.size*3) +quarter.4 <- tmp[[1]] +left_freq_table <- tmp[[2]] + +# write out the 4 quarters +dir.create('pretrain/revision.remove.all.paralog/', recursive = T, showWarnings = F) +write.csv(pretrain[pretrain$uniprotID %in% quarter.1,], "pretrain/revision.remove.all.paralog/training.0.csv", na = ".") +write.csv(pretrain[pretrain$uniprotID %in% quarter.2,], "pretrain/revision.remove.all.paralog/training.1.csv", na = ".") +write.csv(pretrain[pretrain$uniprotID %in% quarter.3,], "pretrain/revision.remove.all.paralog/training.2.csv", na = ".") +write.csv(pretrain[pretrain$uniprotID %in% quarter.4,], "pretrain/revision.remove.all.paralog/training.3.csv", na = ".") +write.csv(pretrain, 'pretrain/revision.remove.all.paralog/training.csv', na = ".") diff --git a/revision.remove.glof.paralog.R b/revision.remove.glof.paralog.R new file mode 100644 index 0000000000000000000000000000000000000000..3b5a72ddbfc4ef3513a342874c570139582170dc --- /dev/null +++ b/revision.remove.glof.paralog.R @@ -0,0 +1,60 @@ +# remove all the homologous proteins from pretrain +pretrain <- rbind(read.csv('../archive/data.files/pretrain.orig/training.csv', row.names = 1), + read.csv('../archive/data.files/pretrain.orig/testing.csv', row.names = 1)) +# read homologous data frame +geneid2paralog <- read.csv('pretrain/revision.split.by.paralogues/geneid2paralog.csv', row.names = 1) +uniprot2geneid <- read.csv('pretrain/revision.split.by.paralogues/uniprot2geneid.csv', row.names = 1) +# define the transfer learning tasks +tf.tasks <- read.csv('../scripts/gene.txt', header = F) +tf.uids <- gsub('.clean', '', tf.tasks$V1) +tf.geneids <- uniprot2geneid$ensembl_gene_id[match(tf.uids, uniprot2geneid$uniprot_gn_id)] +tf.homologues <- geneid2paralog$hsapiens_paralog_ensembl_gene[geneid2paralog$ensembl_gene_id %in% tf.geneids] +to.remove <- unique(c(tf.geneids, tf.homologues)) +to.remove.uids <- uniprot2geneid$uniprot_gn_id[match(to.remove, uniprot2geneid$ensembl_gene_id)] +# remove the uids from pretrain +pretrain <- pretrain[!pretrain$uniprotID %in% to.remove.uids,] +# split into 4 parts +split.by.uniprotID <- function(freq_table, number_to_select) { + set.seed(0) + selected = 0 + selected_uniprotIDs = c() + candidates = freq_table[freq_table$Freq <= number_to_select - selected,] + while ((selected < number_to_select) & (dim(candidates)[1] > 0)) { + selected_uniprotID = sample(as.character(candidates$Var1), size = 1) + selected_uniprotIDs <- c(selected_uniprotIDs, selected_uniprotID) + selected = selected + freq_table$Freq[freq_table$Var1 == selected_uniprotID] + # update freq_table and candidates + freq_table = freq_table[!freq_table$Var1 %in% selected_uniprotID,] + candidates = freq_table[freq_table$Freq <= number_to_select - selected,] + } + result = list(selected_uniprotIDs, freq_table) + result +} + +training_freq_table <- as.data.frame(table(pretrain$uniprotID)) + +quarter.size <- floor(dim(pretrain)[1] / 4) + +tmp <- split.by.uniprotID(freq_table = training_freq_table, quarter.size) +quarter.1 <- tmp[[1]] +left_freq_table <- tmp[[2]] + +tmp <- split.by.uniprotID(freq_table = left_freq_table, quarter.size) +quarter.2 <- tmp[[1]] +left_freq_table <- tmp[[2]] + +tmp <- split.by.uniprotID(freq_table = left_freq_table, quarter.size) +quarter.3 <- tmp[[1]] +left_freq_table <- tmp[[2]] + +tmp <- split.by.uniprotID(freq_table = left_freq_table, dim(pretrain)[1]-quarter.size*3) +quarter.4 <- tmp[[1]] +left_freq_table <- tmp[[2]] + +# write out the 4 quarters +dir.create('pretrain/revision.remove.glof.paralog/', recursive = T, showWarnings = F) +write.csv(pretrain[pretrain$uniprotID %in% quarter.1,], "pretrain/revision.remove.glof.paralog/training.0.csv", na = ".") +write.csv(pretrain[pretrain$uniprotID %in% quarter.2,], "pretrain/revision.remove.glof.paralog/training.1.csv", na = ".") +write.csv(pretrain[pretrain$uniprotID %in% quarter.3,], "pretrain/revision.remove.glof.paralog/training.2.csv", na = ".") +write.csv(pretrain[pretrain$uniprotID %in% quarter.4,], "pretrain/revision.remove.glof.paralog/training.3.csv", na = ".") +write.csv(pretrain, 'pretrain/revision.remove.glof.paralog/training.csv', na = ".") diff --git a/sup.data.1.csv.gz b/sup.data.1.csv.gz new file mode 100644 index 0000000000000000000000000000000000000000..15e9d2480c5bb5528593480b085d63cc0e57dcae --- /dev/null +++ b/sup.data.1.csv.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35a1d2ac04342a4032472c871696f1620e35430c30f4f5719dc5ceba196d463a +size 211 diff --git a/uniq.genes.csv.gz b/uniq.genes.csv.gz new file mode 100644 index 0000000000000000000000000000000000000000..ad489dbfb1a8f12bd41101bf3812118734f918d1 --- /dev/null +++ b/uniq.genes.csv.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82ff818470a7e0cfc443479fd61ce8b934c02dfd82aa120c3c03e57bdb9592d8 +size 48477