python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import glob
import shutil
import subprocess
from tqdm import tqdm
from pathlib import Path
def main():
import argparse
parser = argparse.ArgumentParser(description='VoxCeleb2 tsv preparation', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--vox', type=str, help='VoxCeleb2 root dir')
parser.add_argument('--en-ids', type=str, help='a list of English-utterance ids')
args = parser.parse_args()
file_list = f"{args.vox}/file.list"
assert os.path.isfile(file_list) , f"{file_list} not exist -> run vox_prepare.py first"
nframes_audio_file, nframes_video_file = f"{args.vox}/nframes.audio", f"{args.vox}/nframes.video"
assert os.path.isfile(nframes_audio_file) , f"{nframes_audio_file} not exist -> run count_frames.py first"
assert os.path.isfile(nframes_video_file) , f"{nframes_video_file} not exist -> run count_frames.py first"
audio_dir, video_dir = f"{args.vox}/audio", f"{args.vox}/video"
def setup_target(target_dir, train):
for name, data in zip(['train'], [train]):
with open(f"{target_dir}/{name}.tsv", 'w') as fo:
fo.write('/\n')
for fid, nf_audio, nf_video in data:
fo.write('\t'.join([fid, os.path.abspath(f"{video_dir}/{fid}.mp4"), os.path.abspath(f"{audio_dir}/{fid}.wav"), str(nf_video), str(nf_audio)])+'\n')
return
fids = [x.strip() for x in open(file_list).readlines()]
nfs_audio, nfs_video = [x.strip() for x in open(nframes_audio_file).readlines()], [x.strip() for x in open(nframes_video_file).readlines()]
en_fids = set([x.strip() for x in open(args.en_ids).readlines()])
train_all, train_sub = [], []
for fid, nf_audio, nf_video in zip(fids, nfs_audio, nfs_video):
if fid in en_fids:
train_sub.append([fid, nf_audio, nf_video])
train_all.append([fid, nf_audio, nf_video])
dir_en = f"{args.vox}/en_data"
print(f"Set up English-only dir")
os.makedirs(dir_en, exist_ok=True)
setup_target(dir_en, train_sub)
dir_all = f"{args.vox}/all_data"
print(f"Set up all data dir")
os.makedirs(dir_all, exist_ok=True)
setup_target(dir_all, train_all)
return
if __name__ == '__main__':
main()
| av_hubert-main | avhubert/preparation/vox_manifest.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from transformers import BertModel, BertTokenizer
import torch
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss
from transformers.modeling_outputs import SequenceClassifierOutput
from transformers.models.bert.modeling_bert import BertPreTrainedModel
from transformers.models.bert.configuration_bert import BertConfig
class_labels = [
"adoring",
"amused",
"angered",
"approving",
"excited",
"saddened",
"scared",
]
class CAREBERT(BertPreTrainedModel):
def __init__(self, config: BertConfig, model_load_path: str = "./care_bert.pth"):
super().__init__(config)
self.config = config
self.bert = BertModel(config)
if model_load_path is not None:
checkpoint = torch.load(model_load_path)
self.bert.load_state_dict(checkpoint["model_state_dict"])
print(f"Loaded from old {model_load_path}")
classifier_dropout = (
config.classifier_dropout
if config.classifier_dropout is not None
else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Run predictions for a list of texts, returning a list of the list of affects predicted for each example.
def predict(
examples: List[str], threshold: float = 0.5, model_load_path="./care_bert.pth"
) -> List[List[str]]:
model = CAREBERT.from_pretrained(
"bert-base-uncased",
num_labels=7,
model_load_path=model_load_path,
)
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
encoding = tokenizer(
examples,
padding="max_length",
truncation=True,
max_length=256,
return_tensors="pt",
)
# forward pass
outs = model(**encoding, return_dict=False)
logits = outs[0]
pred_bools = [pl > threshold for pl in logits]
predictions = []
for pred_bool in pred_bools:
affects = [class_labels[i] for i in range(len(pred_bool)) if pred_bool[i]]
predictions.append(affects)
return predictions
if __name__ == "__main__":
examples = ["Warriors against the Miami Heat!!!", "That was so hilarious"]
print(predict(examples))
| care-main | care_bert.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import re
import lexicon_filtering
import nltk
import string
from typing import List, Dict, Tuple
tokenizer = nltk.data.load("tokenizers/punkt/PY3/english.pickle")
lexicon_map, multi_word_phrases = lexicon_filtering.get_hardcoded_lexicon()
all_AA_keys = set(
list(lexicon_map.keys()) + multi_word_phrases
) # The list of all indicators
# List of negation words that are not permitted.
negation_words = [
"weren't",
"wasn't",
"don't",
"aren't",
"can't",
"neither",
"if",
"couldn't",
"not",
"shouldn't",
"wouldn't",
"stop",
"people think",
"you think",
"nobody",
"no one",
]
# List of exaggerators as described in line 246 of the paper.
exaggerator_synonyms = (
"(?:a\s|an\s)*("
+ "|".join(
[
"soo*\s",
"re*a*lly*\s",
"ve*ry*\s",
"extre*mely*\s",
"su*per*\s",
"pre*tty*\s",
"the\smost\s",
"one\sof\sthe\smost\s",
"abso*lu*tely\s",
"such\sa\s",
"alwa*ys*\s",
"ju*st*\s",
"espe*cia*lly*\s",
"friggin.\s",
"fuckin.\s",
"friggin\s",
"fuckin\s",
"by\sfar\sthe\smo*st\s",
"probably\sthe\smo*st\s",
"too*\s",
"a\slittle\s",
"a\s*lo*t\s",
"more\s",
"quite\spossibly\sthe\smo*st\s",
"actually\s",
"ki*nd*\sof\s",
"freakin.\s",
"freakin\s",
"bit\s",
"currently\s",
"recently\s",
"lately\s",
"honestly\s",
"truly\s",
"unbelievably\s",
"insanely\s",
"seriously\s",
]
)
+ ")*(?:a\s|an\s)*"
)
# Additional sub-patterns used in CARE patterns
singular_subjective_pronouns = "(" + "|".join(["he", "she"]) + ")"
plural_subjective_pronouns = "(" + "|".join(["they", "you", "u"]) + ")"
singular_demonstrative_pronouns = "(" + "|".join(["that", "this"]) + ")"
plural_demonstrative_pronouns = "(" + "|".join(["these", "those"]) + ")"
beginning = r"(\.|!|but\s|however\s|oh\sno\s|oh\s|oh\sman\s|oh\ssnap\s|omg\s|wow\s|jesus|holy\scrap\s|for\ssome\sreason\s|,|^)\s*(?:funny\senough\s|holy\sshit\s|damn\s|oh\sshit\s)*"
ending = "\s*([^\s]*)\s*([^\s]*)\s*([^\s]*)"
# ending = "\s*([a-z]*)\s*([a-z]*)\s*([a-z]*)"
# Map of CARE pattern names to their respective regular expressions.
regex_name_to_pattern = {
"individual": beginning
+ "(i)(\s|\sam\s|'m\s|m\s|'ve\s|\shave\s|\shave\snever\s.een\s)"
+ exaggerator_synonyms
+ ending,
"individual_feel": beginning
+ "(i\sfeel\s)(like\s)*"
+ exaggerator_synonyms
+ ending,
"we": beginning + "(we)(\sare|'re|re|have|'ve)\s" + exaggerator_synonyms + ending,
"we_feel": beginning + "(we\sfeel\s)(like\s)" + exaggerator_synonyms + ending,
"heshe": beginning
+ singular_subjective_pronouns
+ "(\sis|'s|s)\s"
+ exaggerator_synonyms
+ ending,
"it": beginning + "(it)" + "(\sis|'s|s)\s" + exaggerator_synonyms + ending,
"theyyou": beginning
+ plural_subjective_pronouns
+ "(\sare|'re|re)\s"
+ exaggerator_synonyms
+ ending,
"this_is": beginning
+ "(this|that)\s(?:story\s|situation\s)*(is\s|was\s|\s)"
+ exaggerator_synonyms
+ ending,
"hisher_story": beginning
+ "(his|her)\s(?:story\s|situation\s)*(is\s|was\s|\s)"
+ exaggerator_synonyms
+ ending,
"noun_is": beginning
+ "(?:the\s)"
+ "([a-z']+)"
+ "\s(is)\s"
+ exaggerator_synonyms
+ ending,
"this_really": beginning
+ singular_demonstrative_pronouns
+ "\s(re*a*lly*)\s"
+ "(is\s|was\s|\s)*"
+ ending,
"this_makes_me": beginning
+ singular_demonstrative_pronouns
+ "\s(makes\sme\sfeel|made\sme|made\sme\sfeel|makes\sme)\s"
+ exaggerator_synonyms
+ ending,
"these_are": beginning
+ plural_demonstrative_pronouns
+ "\s(are|were|)\s"
+ exaggerator_synonyms
+ ending,
"these_really": beginning
+ plural_demonstrative_pronouns
+ "\s(really)"
+ "\s(are\s|were\s|)*"
+ ending,
"these_make_me": beginning
+ plural_demonstrative_pronouns
+ "\s(make\sme|make\sme\sfeel|made\sme|made\sme\sfeel)\s"
+ exaggerator_synonyms
+ ending,
"made_me": beginning
+ "(makes\sme|made\sme)\s(feel\s)*"
+ exaggerator_synonyms
+ ending,
"feeling": beginning + "()()(feeling\s)" + exaggerator_synonyms + ending,
"my_heart": beginning + "(my\sheart\sis)" + exaggerator_synonyms + ending,
"sovery": beginning
+ "()()("
+ "|".join(["soo*\s", "very\s", "extremely\s"])
+ ")+"
+ ending,
"what_a": beginning + "(what\s)(a|an)\s" + exaggerator_synonyms + ending,
"how": beginning + "()()(how\s)" + exaggerator_synonyms + ending,
"some_people": beginning
+ "(some\speople\s|humans\s|society\s)(is\s|are\s|make\sme\s)"
+ exaggerator_synonyms
+ ending,
"freeform": beginning + "()()()" + ending,
}
# Helper function to skip duplicate affects that can occur from matching multiple patterns.
def get_set(
matches: List, affects: List[str], indicators: List[str]
) -> Tuple[List[str], List[str], List[str]]:
output_matches = []
output_indicators = []
seen = set()
for i, affect in enumerate(affects):
if affect in seen:
continue
else:
seen.add(affect)
output_matches.append(matches[i])
output_indicators.append(indicators[i])
return output_matches, list(seen), output_indicators
# Function for getting a list of all matches, all affects, and all indicators from a given piece of text.
def get_regex_match_all(text: str) -> List[str]:
if type(text) == list:
sentences = text
else:
sentences = tokenizer.tokenize(text)
all_matches = []
all_affects = []
all_indicators = []
for sentence in sentences:
matches, affects, indicators = get_regex_match(sentence)
if len(affects) > 0:
matches, affects, indicators = get_set(matches, affects, indicators)
all_affects.extend(affects)
all_matches.extend(matches)
all_indicators.extend(indicators)
return all_affects
# Check that the pattern and keyword combination is not forbidding.
def is_valid_regex_pattern(regex_name: str, affect: str, keyword: str) -> bool:
if regex_name in lexicon_filtering.affect_to_prohibited_patterns[affect]:
return False
if regex_name == "freeform" and len(keyword.split(" ")) == 1:
return False
return True
# Clean the text of punctuation, numbers, and extra spaces, and make lower case.
def clean_text(text: str) -> str:
# remove numbers
text_nonum = re.sub(r"\d+", "", text)
# remove punctuations and convert characters to lower case
text_nopunct = "".join(
[
char.lower()
for char in text_nonum
if char not in string.punctuation or char == "'" or char == ","
]
)
# substitute multiple whitespace with single whitespace
# Also, removes leading and trailing whitespaces
text_no_doublespace = re.sub("\s+", " ", text_nopunct).strip()
return text_no_doublespace
# Apply regular expression matching to a single sentence.
def get_regex_match(sentence: str) -> Tuple[List[str], List[str], List[str]]:
matches = []
affects = []
indicators = []
if "but" in sentence:
sentence = sentence[sentence.index("but") + 4 :]
if "however" in sentence:
sentence = sentence[sentence.index("however") + 8 :]
sentence = clean_text(sentence)
for regex_name, regex_pattern in regex_name_to_pattern.items():
regex = re.compile(regex_pattern)
match = regex.search(sentence.lower())
if match is not None and len(match.groups()) > 0:
# Make sure that the given group is a noun if the regular expression name is 'noun_is'.
if regex_name == "noun_is":
if match.groups()[0] != "":
if nltk.pos_tag([match.groups()[0]])[0][1] != "NN":
if (
match.groups()[1] != ""
and nltk.pos_tag([match.groups()[1]])[0][1] != "NN"
):
continue
elif match.groups()[0] == "":
if (
match.groups()[1] != ""
and nltk.pos_tag([match.groups()[1]])[0][1] != "NN"
):
continue
index = 4 # This is the index of the group defining the start of the indicator phrase
if index > len(match.groups()):
continue
indicator = match.groups()[index : len(match.groups())]
indicator = [
x.rstrip().lstrip() for x in indicator if x != "" and x is not None
]
for negator in negation_words:
if negator in indicator:
joined_indicator = " ".join(indicator)
if (
"can't stop laughing" in joined_indicator
or "cannot stop laughing" in joined_indicator
):
continue
else:
indicator = []
keyword = ""
for i, word in enumerate(indicator):
if keyword in lexicon_map:
print(
is_valid_regex_pattern(
regex_name, lexicon_map[keyword], keyword
)
)
word = word.replace(",", "").rstrip().lstrip()
if word in all_AA_keys:
if word in multi_word_phrases:
two_words = " ".join(indicator[:-1])
if two_words in lexicon_map:
keyword = two_words
three_words = two_words + " " + indicator[-1]
if three_words in lexicon_map:
keyword = three_words
elif word in lexicon_map:
keyword = word
if keyword != "" and is_valid_regex_pattern(
regex_name, lexicon_map[keyword], keyword
):
matches.append(
" ".join(
[
x.rstrip().lstrip()
for x in match.groups()
if x is not None and x != "" and x != " "
]
)
)
affects.append(lexicon_map[keyword])
indicators.append(regex_name + ": " + keyword)
return matches, affects, indicators
return matches, affects, indicators
| care-main | regex_pipeline.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
from functools import partial
import requests
import pandas as pd
import os
from typing import Dict, List
import multiprocessing
import utils
import argparse
# Metadata parameters to save when downloading the post metadata
parameters_to_save = [
"id",
"num_comments",
"is_original_content",
"parent_id",
"link_id",
"subreddit",
"permalink",
"subreddit_type",
"category",
"url",
"submission-type",
"lang",
"title",
"selftext",
"header_title",
"submit_text",
"metadata",
]
# This function uses pushshift.io to download all metadata the posts in the CARE database. data_file should point to a csv containing the post ids in the CARE database. The parameter params_to_keep enumerates the parameters to save. Increase cpus_to_use if for more multiprocessing.
def download_all_sub_data(
sub_ids: List[str] = None,
data_file: str = None,
cpus_to_use: int = 2,
n: int = 10,
output_file: str = None,
chunked_folder: str = None,
params_to_keep: List[str] = utils.parameters_to_save,
) -> None:
if data_file is None:
data_file = "./care_db_ids_and_labels.csv"
if sub_ids is None:
assert os.path.exists(data_file)
sub_ids_df = pd.read_csv(data_file, sep="\t")
sub_ids = [x for x in list(sub_ids_df["id"]) if isinstance(x, str)]
pool = multiprocessing.Pool(cpus_to_use)
chunked_list = sorted([sub_ids[i : i + n] for i in range(0, len(sub_ids), n)])
func = partial(
download_sub_data_one_chunk,
output_file_path=chunked_folder,
chunked_list=chunked_list,
params_to_keep=params_to_keep,
)
pool.map(func, range(len(chunked_list)))
aggregate_chunks(output_file=output_file)
pool.close()
pool.join()
# Helper function for download_all_sub_data. By defaults it saves to care/data/chunks/post_id_metadata_{index}.json
def download_sub_data_one_chunk(
index: int,
chunked_list: List[List[str]],
attempt: int = 1,
output_file_path: str = None,
params_to_keep: List[str] = None,
) -> bool:
sub_ids = chunked_list[index]
if output_file_path is None:
output_file_path = f"./data/chunks/post_id_metadata_{index}.json"
if os.path.exists(output_file_path):
return True
if not os.path.exists(os.path.dirname(os.path.abspath(output_file_path))):
os.makedirs(os.path.dirname(os.path.abspath(output_file_path)))
if attempt == 5:
return False
try:
response = requests.get(
"https://api.pushshift.io/reddit/submission/search?ids=" + ",".join(sub_ids)
)
data = response.json()["data"]
if params_to_keep is not None:
filtered_data = []
for entry in data:
new_entry = {}
for param in params_to_keep:
if param in entry:
new_entry[param] = entry[param]
filtered_data.append(new_entry)
data = filtered_data
with open(f"{output_file_path}", "w", encoding="utf8") as fh:
fh.write(json.dumps(data) + "\n")
return True
except:
download_sub_data_one_chunk(
index, chunked_list, attempt=attempt + 1, output_file_path=output_file_path
)
# Aggregates all the downloads into one file. By default, it saves to care/data/post_id_metadata.json
def aggregate_chunks(
output_file_path: str = None, chunked_output_folder: str = None
) -> None:
if output_file_path is None:
output_file_path = f"./data/post_id_metadata.json"
if chunked_output_folder is None:
chunked_output_folder = f"./data/chunks/"
all_data = []
for file in os.listdir(chunked_output_folder):
with open(os.path.join(chunked_output_folder, file), "r") as fin:
data = json.load(fin)
all_data.extend(data)
with open(f"{output_file_path}", "w", encoding="utf8") as fh:
for example in all_data:
fh.write(json.dumps(example) + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--cpus",
type=int,
required=False,
default=2,
help=f"Number of cpus to use for multiprocessing.",
)
parser.add_argument(
"--n",
type=int,
required=False,
default=10,
help=f"Number of post ids for each job.",
)
parser.add_argument(
"--data_file", type=str, default=None, help="Path the to csv with post ids."
)
parser.add_argument(
"--output_file", type=str, default=None, help="Write the metadata to this file."
)
parser.add_argument(
"--chunk_dir",
type=str,
default=None,
help="Write the batch metadata to this directory. This can be deleted after aggregation.",
)
args = parser.parse_args()
download_all_sub_data(
data_file=args.data_file,
cpus_to_use=args.cpus,
n=args.n,
output_file=args.output_file,
chunked_folder=args.chunk_dir,
)
| care-main | download_posts.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import regex_pipeline
from typing import Dict, List
from collections import Counter
import pandas as pd
import utils
# Labels posts based on if at least t comments are labeled with the same affect.
def label_posts(
post_id_to_comment_texts: Dict[str, List[str]], t: int = 5
) -> pd.DataFrame:
outputs = []
for post_id, comment_texts in post_id_to_comment_texts.items():
affects = []
for comment_text in comment_texts:
comment_affects = regex_pipeline.get_regex_match_all(comment_text)
affects.extend(comment_affects)
affect_map = dict(Counter(affects))
filtered_affect_map = {}
for k, v in utils.cluster_and_filter(affect_map).items():
if v >= t:
filtered_affect_map[k] = v
if len(filtered_affect_map) > 0:
outputs.append([post_id, filtered_affect_map])
return pd.DataFrame(outputs, columns=["post_id", "affect_map"])
if __name__ == "__main__":
example_dict = {
"1": ["This is so funny!!", "Cannot stop laughing at this.", "So hilarious"]
}
print(label_posts(example_dict, t=3))
| care-main | care_predict.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict
# Clustering into seven affective responses.
CLUSTER_MAP = {
"disgusted": "angered",
"saddened": "saddened",
"amused": "amused",
"angered": "angered",
"disappointed": "saddened",
"interested": "amused",
"impressed": "approving",
"excited": "excited",
"inspired": "approving",
"annoyed": "angered",
"admiring": "approving",
"scared": "scared",
"worried": "scared",
"anxious": "scared",
"adoring": "adoring",
"approving": "approving",
"attracted": "adoring",
"entertained": "amused",
}
CORE_AFFECTS = [
"adoring",
"angered",
"amused",
"approving",
"excited",
"saddened",
"scared",
]
# This function is for clustering according to the hierarchy defined in CLUSTER_MAP and/or filtering for the affects defined in CORE_AFFECTS.
def cluster_and_filter(
affect_map: Dict[str, int], cluster: bool = True, filter: bool = True
) -> Dict[str, int]:
new_affect_map = {}
for orig_k, orig_v in affect_map.items():
if not cluster or orig_k not in CLUSTER_MAP:
k = orig_k
else:
k = CLUSTER_MAP[orig_k]
if filter and k not in CORE_AFFECTS:
continue
if k not in new_affect_map:
new_affect_map[k] = 0
new_affect_map[k] += orig_v
return new_affect_map
| care-main | utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Tuple, Dict
# Map of keyword in the CARE lexicon to pattern combinations that are prohibited.
affect_to_prohibited_patterns = {
"disgusted": [],
"saddened": ["heshe", "theyyou"],
"amused": ["theyyou", "it"],
"angered": [],
"disappointed": ["heshe", "theyyou"],
"entertained": ["individual", "individual_feel", "we", "we_feel"],
"interested": ["hesheit", "theyyou"],
"impressed": [],
"excited": ["heshe", "theyyou", "some_people"],
"inspired": [],
"annoyed": [],
"admiring": [
"individual_feel",
"we_feel",
"heshe",
"it",
"theyyou",
"this_is",
"hisher_story",
"noun_is",
"this_really",
"these_are",
"these_really",
"feeling",
"what_a",
"some_people",
],
"scared": ["theyyou", "heshe"],
"worried": [],
"anxious": [],
"adoring": [
"individual",
"individual_feel",
"we",
"we_feel",
"this_makes_me",
"these_make_me",
"made_me",
"feeling",
],
"approving": [
"individual_feel",
"we",
"we_feel",
"this_makes_me",
"these_make_me",
"made_me",
"feeling",
],
"awed": ["heshe", "theyyou", "hisher_story", "some_people"],
"attracted": [
"individual",
"individual_feel",
"it",
"we",
"we_feel",
"this_is",
"hisher_story",
"noun_is",
"this_really",
"this_makes_me",
"these_are",
"these_really",
"these_make_me",
"made_me",
"feeling",
"sovery",
"how",
"some_people",
],
}
# Map of each class to keywords. This is the inverse mapping of the CARE lexicon, as defined in the paper.
affect_to_words = {
"disgusted": [
"gross",
"grosses me out",
"disgusting",
"disgusted",
"disgusts me",
"nasty",
"disgust",
"repulsive",
"repulses me",
],
"saddened": [
"depressing",
"that really sucks",
"saddening",
"saddens me",
"sad",
"sorry for your",
"sorry for them",
"sorry to hear",
"heartbreaking",
"heartbroken",
"tragic",
"painful to watch",
"painful to see",
"hard to see",
"hard to watch",
"unfortunate",
"depressed",
"depresses me",
],
"amused": [
"hilarious",
"funny",
"cracks me up",
"laugh",
"never laughed so",
"can't stop laughing",
"cannot stop laughing",
"the funniest thing",
],
"angered": [
"why i hate",
"fake",
"mislead",
"infuriated",
"infuriating",
"infuriates me",
"infuriate",
"fed up",
"furious",
"frustrate me",
"frustrates me",
"frustrated",
"frustrating",
"mad",
"angry",
"angers me",
"pissed me off",
"pisses me off",
"fuck the",
"fuck this",
"fuck them",
],
"disappointed": [
"disappointing",
"disappointed",
"let down",
"a bummer",
"letting down",
],
"entertained": ["entertaining"],
"interested": [
"intriguing",
"intrigues me",
"interesting",
"curious to see",
"talented",
"curious to know",
"intrigued",
],
"impressed": [
"brilliant",
"impressed",
"impressive",
"proud of you",
"impressive",
"impresses me",
],
"excited": [
"happy",
"ecstatic",
"excited",
"stoked",
"exciting",
"jazzed",
"excites me",
"excite",
"looking forward to",
],
"inspired": [
"forward to trying",
"inspired",
"inspiring",
"inspiration",
"inspires me",
"uplift",
"uplifts me",
"inspire",
"creative",
"motivated",
"encouraged",
"motivates me",
"encourages me",
"motivation",
"encouragement",
],
"annoyed": [
"sick of",
"annoy",
"annoys me",
"annoying",
"annoyed",
"annoyance",
"irritates me",
"irritating",
"agitates me",
"agitated",
"agitation",
"tired of this",
"getting ridiculous",
"tired of seeing",
"tired of hearing",
],
"admiring": ["admire you", "of admiration for", "admirable"],
"scared": [
"scare me",
"scared",
"scares me",
"freaks me out",
"freak me out",
"freaky",
"creepy",
],
"worried": ["worried", "worries me", "concerning", "concerns me"],
"anxious": ["anxious", "gives me anxiety", "nervous"],
"adoring": [
"adorable",
"the cutest",
"cute",
"adorbs",
"sweet",
"cutest thing",
],
"approving": [
"love this",
"love that",
"dope",
"fabulous",
"high five",
"excellent",
"amazing",
"damn good",
"fantastic",
"epic",
"wonderful",
"awesome",
"the best",
"the greatest",
],
"awed": [
"magnificent",
"awe inspiring",
"awe-inspiring",
"spectacular",
"breathtaking",
"majestic",
"incredible",
"in awe",
"awe-inspired",
],
"attracted": ["beautiful", "gorgeous", "handsome"],
}
# Creates the word to affect lexicon and collects a list of multi-word indicators.
def get_hardcoded_lexicon() -> Tuple[Dict[str, str], List[str]]:
words_to_affect = {x: k for k, v in affect_to_words.items() for x in v}
multi_word_phrases = [k.split(" ")[0] for k in words_to_affect.keys() if " " in k]
return words_to_affect, multi_word_phrases
| care-main | lexicon_filtering.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from setuptools import setup, find_packages
with open('README.md', 'r') as f:
long_description = f.read()
with open('requirements.txt', 'r') as f:
requirements = [line.strip() for line in f]
setup(
name='access',
version='0.2',
description='Controllable Sentence Simplification',
long_description=long_description,
long_description_content_type='text/markdown',
author='Louis Martin <[email protected]>',
url='https://github.com/facebookreasearch/access',
packages=find_packages(exclude=['resources']),
install_requires=requirements,
)
| access-main | setup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from functools import wraps
import multiprocessing
import random
import re
from joblib import Parallel, delayed
import torch
from access.text import to_words
from access.utils.helpers import (open_files, yield_lines, yield_lines_in_parallel, get_temp_filepath, delete_files,
get_temp_filepaths)
def apply_line_method_to_file(line_method, input_filepath):
output_filepath = get_temp_filepath()
with open(input_filepath, 'r') as input_file, open(output_filepath, 'w') as output_file:
for line in input_file:
transformed_line = line_method(line.rstrip('\n'))
if transformed_line is not None:
output_file.write(transformed_line + '\n')
return output_filepath
def replace_lrb_rrb(text):
text = re.sub(r'-lrb-', '(', text, flags=re.IGNORECASE)
text = re.sub(r'-rrb-', ')', text, flags=re.IGNORECASE)
text = re.sub(r'-lsb-', '[', text, flags=re.IGNORECASE)
text = re.sub(r'-rsb-', ']', text, flags=re.IGNORECASE)
text = re.sub(r'-lcb-', '{', text, flags=re.IGNORECASE)
text = re.sub(r'-rcb-', '}', text, flags=re.IGNORECASE)
return text
def replace_lrb_rrb_file(filepath):
return apply_line_method_to_file(replace_lrb_rrb, filepath)
def to_lrb_rrb(text):
# TODO: Very basic
text = re.sub(r'((^| ))\( ', r'\1-lrb- ', text)
text = re.sub(r' \)((^| ))', r' -rrb-\1', text)
return text
def replace_back_quotes(text):
return text.replace('`', "'")
def replace_double_quotes(text):
return text.replace("''", '"')
def normalize_quotes(text):
return replace_double_quotes(replace_back_quotes(text))
def to_lrb_rrb_file(input_filepath):
return apply_line_method_to_file(to_lrb_rrb, input_filepath)
def lowercase_file(filepath):
return apply_line_method_to_file(lambda line: line.lower(), filepath)
def concatenate_files(input_filepaths, output_filepath):
with open(output_filepath, 'w') as output_f:
for input_file in input_filepaths:
with open(input_file, 'r') as input_f:
for line in input_f:
output_f.write(line)
def split_file(input_filepath, output_filepaths, round_robin=False):
if not round_robin:
raise NotImplementedError('Splitting files is only implemented as round robin.')
with open_files(output_filepaths, 'w') as files:
# We write each line to a different file in a round robin fashion
for i, line in enumerate(yield_lines(input_filepath)):
files[i % len(output_filepaths)].write(line + '\n')
def merge_files(input_filepaths, output_filepath, round_robin=False):
if not round_robin:
return concatenate_files(input_filepaths, output_filepath)
with open(output_filepath, 'w') as f:
for lines in yield_lines_in_parallel(input_filepaths, strict=False):
for line in lines:
if line is None:
return
f.write(line + '\n')
def get_real_n_jobs(n_jobs):
n_cpus = multiprocessing.cpu_count()
if n_jobs < 0:
# Adopt same logic as joblib
n_jobs = n_cpus + 1 + n_jobs
if n_jobs > n_cpus:
print('Setting n_jobs={n_jobs} > n_cpus={n_cpus}, setting n_jobs={n_cpus}')
n_jobs = n_cpus
assert 0 < n_jobs <= n_cpus
return n_jobs
def get_parallel_file_pair_preprocessor(file_pair_preprocessor, n_jobs):
if n_jobs == 1:
return file_pair_preprocessor
n_jobs = get_real_n_jobs(n_jobs)
@wraps(file_pair_preprocessor)
def parallel_file_pair_preprocessor(complex_filepath, simple_filepath, output_complex_filepath,
output_simple_filepath):
temp_complex_filepaths = get_temp_filepaths(n_jobs)
temp_simple_filepaths = get_temp_filepaths(n_jobs)
split_file(complex_filepath, temp_complex_filepaths, round_robin=True)
split_file(simple_filepath, temp_simple_filepaths, round_robin=True)
preprocessed_temp_complex_filepaths = get_temp_filepaths(n_jobs)
preprocessed_temp_simple_filepaths = get_temp_filepaths(n_jobs)
tasks = [
delayed(file_pair_preprocessor)(*paths)
for paths in zip(temp_complex_filepaths, temp_simple_filepaths, preprocessed_temp_complex_filepaths,
preprocessed_temp_simple_filepaths)
]
Parallel(n_jobs=n_jobs)(tasks)
merge_files(preprocessed_temp_complex_filepaths, output_complex_filepath, round_robin=True)
merge_files(preprocessed_temp_simple_filepaths, output_simple_filepath, round_robin=True)
delete_files(temp_complex_filepaths)
delete_files(temp_simple_filepaths)
delete_files(preprocessed_temp_complex_filepaths)
delete_files(preprocessed_temp_simple_filepaths)
return parallel_file_pair_preprocessor
def word_shuffle(words, max_swap=3):
noise = torch.rand(len(words)).mul_(max_swap)
permutation = torch.arange(len(words)).float().add_(noise).sort()[1]
return [words[i] for i in permutation]
def word_dropout(words, dropout_prob=0.1):
keep = torch.rand(len(words))
dropped_out_words = [word for i, word in enumerate(words) if keep[i] > dropout_prob]
if len(dropped_out_words) == 0:
return [words[random.randint(0, len(words) - 1)]]
return dropped_out_words
def word_blank(words, blank_prob=0.1):
keep = torch.rand(len(words))
return [word if keep[i] > blank_prob else '<BLANK>' for i, word in enumerate(words)]
def add_noise(sentence):
words = to_words(sentence)
words = word_shuffle(words, max_swap=3)
words = word_dropout(words, dropout_prob=0.1)
words = word_blank(words, blank_prob=0.1)
return ' '.join(words)
| access-main | access/preprocess.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from functools import wraps
from pathlib import Path
import shutil
import tempfile
from imohash import hashfile
from access.fairseq.base import fairseq_generate
from access.preprocessors import ComposedPreprocessor, load_preprocessors
from access.utils.helpers import count_lines
def memoize_simplifier(simplifier):
memo = {}
@wraps(simplifier)
def wrapped(complex_filepath, pred_filepath):
complex_filehash = hashfile(complex_filepath, hexdigest=True)
previous_pred_filepath = memo.get(complex_filehash)
if previous_pred_filepath is not None and Path(previous_pred_filepath).exists():
assert count_lines(complex_filepath) == count_lines(previous_pred_filepath)
# Reuse previous prediction
shutil.copyfile(previous_pred_filepath, pred_filepath)
else:
simplifier(complex_filepath, pred_filepath)
# Save prediction
memo[complex_filehash] = pred_filepath
return wrapped
def get_fairseq_simplifier(exp_dir, reload_preprocessors=False, **kwargs):
'''Method factory'''
@memoize_simplifier
def fairseq_simplifier(complex_filepath, output_pred_filepath):
# Trailing spaces for markdown formatting
print('simplifier_type="fairseq_simplifier" ')
print(f'exp_dir="{exp_dir}" ')
fairseq_generate(complex_filepath, output_pred_filepath, exp_dir, **kwargs)
preprocessors = None
if reload_preprocessors:
preprocessors = load_preprocessors(exp_dir)
if preprocessors is not None:
fairseq_simplifier = get_preprocessed_simplifier(fairseq_simplifier, preprocessors)
return fairseq_simplifier
def get_preprocessed_simplifier(simplifier, preprocessors):
composed_preprocessor = ComposedPreprocessor(preprocessors)
@memoize_simplifier
@wraps(simplifier)
def preprocessed_simplifier(complex_filepath, output_pred_filepath):
print(f'preprocessors={preprocessors}')
preprocessed_complex_filepath = tempfile.mkstemp()[1]
composed_preprocessor.encode_file(complex_filepath, preprocessed_complex_filepath)
preprocessed_output_pred_filepath = tempfile.mkstemp()[1]
simplifier(preprocessed_complex_filepath, preprocessed_output_pred_filepath)
composed_preprocessor.decode_file(preprocessed_output_pred_filepath,
output_pred_filepath,
encoder_filepath=complex_filepath)
preprocessed_simplifier.__name__ = f'{preprocessed_simplifier.__name__}_{composed_preprocessor.get_suffix()}'
return preprocessed_simplifier
| access-main | access/simplifiers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from functools import lru_cache
import Levenshtein
import numpy as np
from access.resources.paths import FASTTEXT_EMBEDDINGS_PATH
from access.resources.prepare import prepare_fasttext_embeddings
from access.text import (to_words, remove_punctuation_tokens, remove_stopwords, spacy_process)
from access.utils.helpers import yield_lines
@lru_cache(maxsize=1)
def get_word2rank(vocab_size=np.inf):
prepare_fasttext_embeddings()
# TODO: Decrease vocab size or load from smaller file
word2rank = {}
line_generator = yield_lines(FASTTEXT_EMBEDDINGS_PATH)
next(line_generator) # Skip the first line (header)
for i, line in enumerate(line_generator):
if (i + 1) > vocab_size:
break
word = line.split(' ')[0]
word2rank[word] = i
return word2rank
def get_rank(word):
return get_word2rank().get(word, len(get_word2rank()))
def get_log_rank(word):
return np.log(1 + get_rank(word))
def get_lexical_complexity_score(sentence):
words = to_words(remove_stopwords(remove_punctuation_tokens(sentence)))
words = [word for word in words if word in get_word2rank()]
if len(words) == 0:
return np.log(1 + len(get_word2rank())) # TODO: This is completely arbitrary
return np.quantile([get_log_rank(word) for word in words], 0.75)
def get_levenshtein_similarity(complex_sentence, simple_sentence):
return Levenshtein.ratio(complex_sentence, simple_sentence)
def get_dependency_tree_depth(sentence):
def get_subtree_depth(node):
if len(list(node.children)) == 0:
return 0
return 1 + max([get_subtree_depth(child) for child in node.children])
tree_depths = [get_subtree_depth(spacy_sentence.root) for spacy_sentence in spacy_process(sentence).sents]
if len(tree_depths) == 0:
return 0
return max(tree_depths)
| access-main | access/feature_extraction.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from abc import ABC
from functools import wraps, lru_cache
import hashlib
from pathlib import Path
import dill as pickle
import re
import shutil
from nevergrad.instrumentation import var
import numpy as np
import sentencepiece as spm
from access.feature_extraction import (get_lexical_complexity_score, get_levenshtein_similarity,
get_dependency_tree_depth)
from access.resources.paths import VARIOUS_DIR, get_data_filepath
from access.utils.helpers import (write_lines_in_parallel, yield_lines_in_parallel, add_dicts, get_default_args,
get_temp_filepath, safe_division, count_lines)
SPECIAL_TOKEN_REGEX = r'<[a-zA-Z\-_\d\.]+>'
PREPROCESSORS_REGISTRY = {}
def get_preprocessor_by_name(preprocessor_name):
return PREPROCESSORS_REGISTRY[preprocessor_name]
def get_preprocessors(preprocessor_kwargs):
preprocessors = []
for preprocessor_name, kwargs in preprocessor_kwargs.items():
preprocessors.append(get_preprocessor_by_name(preprocessor_name)(**kwargs))
return preprocessors
def extract_special_tokens(sentence):
'''Remove any number of token at the beginning of the sentence'''
match = re.match(fr'(^(?:{SPECIAL_TOKEN_REGEX} *)+) *(.*)$', sentence)
if match is None:
return '', sentence
special_tokens, sentence = match.groups()
return special_tokens.strip(), sentence
def remove_special_tokens(sentence):
return extract_special_tokens(sentence)[1]
def store_args(constructor):
@wraps(constructor)
def wrapped(self, *args, **kwargs):
if not hasattr(self, 'args') or not hasattr(self, 'kwargs'):
# TODO: Default args are not overwritten if provided as args
self.args = args
self.kwargs = add_dicts(get_default_args(constructor), kwargs)
return constructor(self, *args, **kwargs)
return wrapped
def dump_preprocessors(preprocessors, dir_path):
with open(Path(dir_path) / 'preprocessors.pickle', 'wb') as f:
pickle.dump(preprocessors, f)
def load_preprocessors(dir_path):
path = Path(dir_path) / 'preprocessors.pickle'
if not path.exists():
return None
with open(path, 'rb') as f:
return pickle.load(f)
class AbstractPreprocessor(ABC):
def __init_subclass__(cls, **kwargs):
'''Register all children in registry'''
super().__init_subclass__(**kwargs)
PREPROCESSORS_REGISTRY[cls.__name__] = cls
def __repr__(self):
args = getattr(self, 'args', ())
kwargs = getattr(self, 'kwargs', {})
args_repr = [repr(arg) for arg in args]
kwargs_repr = [f'{k}={repr(v)}' for k, v in sorted(kwargs.items(), key=lambda kv: kv[0])]
args_kwargs_str = ', '.join(args_repr + kwargs_repr)
return f'{self.__class__.__name__}({args_kwargs_str})'
def get_hash_string(self):
return self.__class__.__name__
def get_hash(self):
return hashlib.md5(self.get_hash_string().encode()).hexdigest()
def get_nevergrad_variables(self):
return {}
@property
def prefix(self):
return self.__class__.__name__.replace('Preprocessor', '')
def fit(self, complex_filepath, simple_filepath):
pass
def encode_sentence(self, sentence, encoder_sentence=None):
raise NotImplementedError
def decode_sentence(self, sentence, encoder_sentence=None):
raise NotImplementedError
def encode_sentence_pair(self, complex_sentence, simple_sentence):
if complex_sentence is not None:
complex_sentence = self.encode_sentence(complex_sentence)
if simple_sentence is not None:
simple_sentence = self.encode_sentence(simple_sentence)
return complex_sentence, simple_sentence
def encode_file(self, input_filepath, output_filepath, encoder_filepath=None):
if encoder_filepath is None:
# We will use an empty temporary file which will yield None for each line
encoder_filepath = get_temp_filepath(create=True)
with open(output_filepath, 'w') as f:
for input_line, encoder_line in yield_lines_in_parallel([input_filepath, encoder_filepath], strict=False):
f.write(self.encode_sentence(input_line, encoder_line) + '\n')
def decode_file(self, input_filepath, output_filepath, encoder_filepath=None):
if encoder_filepath is None:
# We will use an empty temporary file which will yield None for each line
encoder_filepath = get_temp_filepath(create=True)
with open(output_filepath, 'w') as f:
for encoder_sentence, input_sentence in yield_lines_in_parallel([encoder_filepath, input_filepath],
strict=False):
decoded_sentence = self.decode_sentence(input_sentence, encoder_sentence=encoder_sentence)
f.write(decoded_sentence + '\n')
def encode_file_pair(self, complex_filepath, simple_filepath, output_complex_filepath, output_simple_filepath):
'''Jointly encode a complex file and a simple file (can be aligned or not)'''
with write_lines_in_parallel([output_complex_filepath, output_simple_filepath], strict=False) as output_files:
for complex_line, simple_line in yield_lines_in_parallel([complex_filepath, simple_filepath], strict=False):
output_files.write(self.encode_sentence_pair(complex_line, simple_line))
class ComposedPreprocessor(AbstractPreprocessor):
@store_args
def __init__(self, preprocessors, sort=False):
if preprocessors is None:
preprocessors = []
if sort:
# Make sure preprocessors are always in the same order
preprocessors = sorted(preprocessors, key=lambda preprocessor: preprocessor.__class__.__name__)
self.preprocessors = preprocessors
def get_hash_string(self):
preprocessors_hash_strings = [preprocessor.get_hash_string() for preprocessor in self.preprocessors]
return f'ComposedPreprocessor(preprocessors={preprocessors_hash_strings})'
def get_suffix(self):
return '_'.join([p.prefix.lower() for p in self.preprocessors])
def fit(self, complex_filepath, simple_filepath):
for preprocessor in self.preprocessors:
pass
def encode_sentence(self, sentence, encoder_sentence=None):
for preprocessor in self.preprocessors:
sentence = preprocessor.encode_sentence(sentence, encoder_sentence)
return sentence
def decode_sentence(self, sentence, encoder_sentence=None):
for preprocessor in self.preprocessors:
sentence = preprocessor.decode_sentence(sentence, encoder_sentence)
return sentence
def encode_file(self, input_filepath, output_filepath, encoder_filepath=None):
for preprocessor in self.preprocessors:
intermediary_output_filepath = get_temp_filepath()
preprocessor.encode_file(input_filepath, intermediary_output_filepath, encoder_filepath)
input_filepath = intermediary_output_filepath
shutil.copyfile(input_filepath, output_filepath)
def decode_file(self, input_filepath, output_filepath, encoder_filepath=None):
for preprocessor in self.preprocessors:
intermediary_output_filepath = get_temp_filepath()
preprocessor.decode_file(input_filepath, intermediary_output_filepath, encoder_filepath)
input_filepath = intermediary_output_filepath
shutil.copyfile(input_filepath, output_filepath)
def encode_file_pair(self, complex_filepath, simple_filepath, output_complex_filepath, output_simple_filepath):
for preprocessor in self.preprocessors:
intermediary_output_complex_filepath = get_temp_filepath()
intermediary_output_simple_filepath = get_temp_filepath()
preprocessor.encode_file_pair(complex_filepath, simple_filepath, intermediary_output_complex_filepath,
intermediary_output_simple_filepath)
complex_filepath = intermediary_output_complex_filepath
simple_filepath = intermediary_output_simple_filepath
shutil.copyfile(complex_filepath, output_complex_filepath)
shutil.copyfile(simple_filepath, output_simple_filepath)
def encode_sentence_pair(self, complex_sentence, simple_sentence):
for preprocessor in self.preprocessors:
complex_sentence, simple_sentence = preprocessor.encode_sentence_pair(complex_sentence, simple_sentence)
return complex_sentence, simple_sentence
class FeaturePreprocessor(AbstractPreprocessor):
'''Prepend a computed feature at the beginning of the sentence'''
@store_args
def __init__(self, feature_name, get_feature_value, get_target_feature_value, bucket_size=0.05, noise_std=0):
self.get_feature_value = get_feature_value
self.get_target_feature_value = get_target_feature_value
self.bucket_size = bucket_size
self.noise_std = noise_std
self.feature_name = feature_name.upper()
def get_hash_string(self):
return (f'{self.__class__.__name__}(feature_name={repr(self.feature_name)}, bucket_size={self.bucket_size},'
f'noise_std={self.noise_std})')
def bucketize(self, value):
'''Round value to bucket_size to reduce the number of different values'''
return round(round(value / self.bucket_size) * self.bucket_size, 10)
def add_noise(self, value):
return value + np.random.normal(0, self.noise_std)
def get_feature_token(self, feature_value):
return f'<{self.feature_name}_{feature_value}>'
def encode_sentence(self, sentence, encoder_sentence=None):
desired_feature = self.bucketize(self.get_target_feature_value(remove_special_tokens(sentence)))
return f'{self.get_feature_token(desired_feature)} {sentence}'
def decode_sentence(self, sentence, encoder_sentence=None):
return sentence
def encode_sentence_pair(self, complex_sentence, simple_sentence):
feature = self.bucketize(
self.add_noise(
self.get_feature_value(remove_special_tokens(complex_sentence),
remove_special_tokens(simple_sentence))))
return f'{self.get_feature_token(feature)} {complex_sentence}', simple_sentence
class LevenshteinPreprocessor(FeaturePreprocessor):
@store_args
def __init__(self, target_ratio=0.8, bucket_size=0.05, noise_std=0):
self.target_ratio = target_ratio
super().__init__(self.prefix.upper(), self.get_feature_value, self.get_target_feature_value, bucket_size,
noise_std)
def get_nevergrad_variables(self):
return {'target_ratio': var.OrderedDiscrete(np.arange(0.4, 1 + 1e-6, self.bucket_size))}
def get_feature_value(self, complex_sentence, simple_sentence):
return get_levenshtein_similarity(complex_sentence, simple_sentence)
def get_target_feature_value(self, complex_sentence):
return self.target_ratio
class RatioPreprocessor(FeaturePreprocessor):
@store_args
def __init__(self, feature_extractor, target_ratio=0.8, bucket_size=0.05, noise_std=0):
self.feature_extractor = feature_extractor
self.target_ratio = target_ratio
super().__init__(self.prefix.upper(), self.get_feature_value, self.get_target_feature_value, bucket_size,
noise_std)
def get_nevergrad_variables(self):
return {'target_ratio': var.OrderedDiscrete(np.arange(0.4, 1.4 + 1e-6, self.bucket_size))}
def get_feature_value(self, complex_sentence, simple_sentence):
return min(safe_division(self.feature_extractor(simple_sentence), self.feature_extractor(complex_sentence)), 2)
def get_target_feature_value(self, complex_sentence):
return self.target_ratio
class LengthRatioPreprocessor(RatioPreprocessor):
@store_args
def __init__(self, *args, **kwargs):
super().__init__(len, *args, **kwargs)
class WordRankRatioPreprocessor(RatioPreprocessor):
@store_args
def __init__(self, *args, **kwargs):
super().__init__(get_lexical_complexity_score, *args, **kwargs)
class DependencyTreeDepthRatioPreprocessor(RatioPreprocessor):
@store_args
def __init__(self, *args, **kwargs):
super().__init__(get_dependency_tree_depth, *args, **kwargs)
class SentencePiecePreprocessor(AbstractPreprocessor):
@store_args
def __init__(self, vocab_size=10000, input_filepaths=None):
self.vocab_size = vocab_size
self.sentencepiece_model_path = VARIOUS_DIR / f'sentencepiece_model/sentencepiece_model_{self.vocab_size}.model'
self.input_filepaths = input_filepaths
if self.input_filepaths is None:
self.input_filepaths = [
get_data_filepath('wikilarge', 'train', 'complex'),
get_data_filepath('wikilarge', 'train', 'simple')
]
self.learn_sentencepiece()
@property
@lru_cache(maxsize=1)
def sp(self):
'''
We need to use a property because SentencenPieceProcessor is cannot pickled
> pickle.dumps(spm.SentencePieceProcessor())
----> TypeError: can't pickle SwigPyObject objects
'''
sp = spm.SentencePieceProcessor()
sp.Load(str(self.sentencepiece_model_path))
return sp
def get_hash_string(self):
return f'{self.__class__.__name__}(vocab_size={self.vocab_size})'
def learn_sentencepiece(self):
if self.sentencepiece_model_path.exists():
return
self.sentencepiece_model_path.parent.mkdir(parents=True, exist_ok=True)
sentencepiece_model_prefix = self.sentencepiece_model_path.parent / self.sentencepiece_model_path.stem
args_str = ' '.join([
f'--input={",".join([str(path) for path in self.input_filepaths])}',
f'--model_prefix={sentencepiece_model_prefix}',
f'--vocab_size={self.vocab_size}',
])
max_lines = 10**6
if sum([count_lines(filepath) for filepath in self.input_filepaths]) > max_lines:
args_str += f' --input_sentence_size={max_lines} --shuffle_input_sentence=true'
spm.SentencePieceTrainer.Train(args_str)
def fit(self, complex_filepath, simple_filepath):
# Args are not used
self.learn_sentencepiece()
def encode_sentence(self, sentence, encoder_sentence=None):
# TODO: Do we really need to extract the tokens
special_tokens, sentence = extract_special_tokens(sentence)
encoded_sentence = ' '.join(self.sp.EncodeAsPieces(sentence))
if special_tokens != '':
encoded_sentence = f'{special_tokens} {encoded_sentence}'
return encoded_sentence
def decode_sentence(self, sentence, encoder_sentence=None):
return self.sp.DecodePieces(sentence.split(' '))
| access-main | access/preprocessors.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from functools import lru_cache
import re
from string import punctuation
from nltk.tokenize.nist import NISTTokenizer
from nltk.corpus import stopwords as nltk_stopwords
import spacy
# TODO: #language_specific
stopwords = set(nltk_stopwords.words('english'))
@lru_cache(maxsize=100) # To speed up subsequent calls
def word_tokenize(sentence):
tokenizer = NISTTokenizer()
sentence = ' '.join(tokenizer.tokenize(sentence))
# Rejoin special tokens that where tokenized by error: e.g. "<PERSON_1>" -> "< PERSON _ 1 >"
for match in re.finditer(r'< (?:[A-Z]+ _ )+\d+ >', sentence):
sentence = sentence.replace(match.group(), ''.join(match.group().split()))
return sentence
def to_words(sentence):
return sentence.split()
def remove_punctuation_characters(text):
return ''.join([char for char in text if char not in punctuation])
@lru_cache(maxsize=1000)
def is_punctuation(word):
return remove_punctuation_characters(word) == ''
@lru_cache(maxsize=100)
def remove_punctuation_tokens(text):
return ' '.join([w for w in to_words(text) if not is_punctuation(w)])
def remove_stopwords(text):
return ' '.join([w for w in to_words(text) if w.lower() not in stopwords])
@lru_cache(maxsize=1)
def get_spacy_model():
model = 'en_core_web_sm'
if not spacy.util.is_package(model):
spacy.cli.download(model)
spacy.cli.link(model, model, force=True, model_path=spacy.util.get_package_path(model))
return spacy.load(model) # python -m spacy download en_core_web_sm`
@lru_cache(maxsize=10**6)
def spacy_process(text):
return get_spacy_model()(str(text))
| access-main | access/text.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from itertools import product
from pathlib import Path
REPO_DIR = Path(__file__).resolve().parent.parent.parent
EXP_DIR = REPO_DIR / 'experiments'
RESOURCES_DIR = REPO_DIR / 'resources'
DATASETS_DIR = RESOURCES_DIR / 'datasets'
VARIOUS_DIR = RESOURCES_DIR / 'various'
FASTTEXT_EMBEDDINGS_PATH = VARIOUS_DIR / 'fasttext-vectors/wiki.en.vec'
MODELS_DIR = RESOURCES_DIR / 'models'
BEST_MODEL_DIR = MODELS_DIR / 'best_model'
LANGUAGES = ['complex', 'simple']
PHASES = ['train', 'valid', 'test']
def get_dataset_dir(dataset):
return DATASETS_DIR / dataset
def get_data_filepath(dataset, phase, language, i=None):
suffix = '' # Create suffix e.g. for multiple references
if i is not None:
suffix = f'.{i}'
filename = f'{dataset}.{phase}.{language}{suffix}'
return get_dataset_dir(dataset) / filename
def get_filepaths_dict(dataset):
return {(phase, language): get_data_filepath(dataset, phase, language)
for phase, language in product(PHASES, LANGUAGES)}
| access-main | access/resources/paths.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import hashlib
from pathlib import Path
from access.preprocess import get_parallel_file_pair_preprocessor
from access.preprocessors import dump_preprocessors, load_preprocessors
from access.resources.paths import PHASES, get_dataset_dir, get_data_filepath, get_filepaths_dict
from access.utils.helpers import count_lines, read_lines, create_directory_or_skip
def yield_indexes_of_lines(filepath, lines):
lines = set(lines)
with Path(filepath).open('r') as f:
for idx, line in enumerate(f):
if line.strip('\n') in lines:
yield idx
def sort_files_by_line_count(filepaths):
return sorted(filepaths, key=lambda filepath: count_lines(filepath))
def has_lines_in_common(filepath1, filepath2):
[smallest_filepath, largest_filepath] = sort_files_by_line_count([filepath1, filepath2])
for idx in yield_indexes_of_lines(largest_filepath, read_lines(smallest_filepath)):
return True
return False
def get_preprocessed_dataset_name(dataset, preprocessor):
return '_' + hashlib.md5((dataset + preprocessor.get_hash()).encode()).hexdigest()
def create_preprocessed_dataset_one_preprocessor(dataset, preprocessor, n_jobs):
new_dataset = get_preprocessed_dataset_name(dataset, preprocessor)
with create_directory_or_skip(get_dataset_dir(new_dataset)):
print(f'Creating preprocessed dataset with {preprocessor}: {dataset} -> {new_dataset}')
new_dataset_dir = get_dataset_dir(new_dataset)
filepaths_dict = get_filepaths_dict(dataset)
new_filepaths_dict = get_filepaths_dict(new_dataset)
for phase in PHASES:
if not filepaths_dict[phase, 'complex'].exists() or not filepaths_dict[phase, 'complex'].exists():
continue
parallel_file_pair_preprocessor = get_parallel_file_pair_preprocessor(
preprocessor.encode_file_pair,
n_jobs=n_jobs,
)
parallel_file_pair_preprocessor(filepaths_dict[phase, 'complex'], filepaths_dict[phase, 'simple'],
new_filepaths_dict[phase, 'complex'], new_filepaths_dict[phase, 'simple'])
previous_preprocessors = load_preprocessors(get_dataset_dir(dataset))
if previous_preprocessors is not None:
preprocessors = previous_preprocessors + [preprocessor]
else:
preprocessors = [preprocessor]
dump_preprocessors(preprocessors, new_dataset_dir)
with open(new_dataset_dir / 'original_dataset', 'w') as f:
f.write(dataset + '\n')
return new_dataset
def create_preprocessed_dataset(dataset, preprocessors, n_jobs=1):
for preprocessor in preprocessors:
# Fit preprocessor on input dataset
preprocessor.fit(get_data_filepath(dataset, 'train', 'complex'), get_data_filepath(dataset, 'train', 'simple'))
dataset = create_preprocessed_dataset_one_preprocessor(dataset, preprocessor, n_jobs)
return dataset
| access-main | access/resources/datasets.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import bz2
import gzip
import os
from pathlib import Path
import shutil
import sys
import tarfile
import tempfile
import time
from urllib.request import urlretrieve
import zipfile
import git
from tqdm import tqdm
def reporthook(count, block_size, total_size):
# Download progress bar
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size_mb = count * block_size / (1024 * 1024)
speed = progress_size_mb / duration
percent = int(count * block_size * 100 / total_size)
msg = f'\r... {percent}% - {int(progress_size_mb)} MB - {speed:.2f} MB/s - {int(duration)}s'
sys.stdout.write(msg)
def download(url, destination_path):
print('Downloading...')
try:
urlretrieve(url, destination_path, reporthook)
sys.stdout.write('\n')
except (Exception, KeyboardInterrupt, SystemExit):
print('Rolling back: remove partially downloaded file')
os.remove(destination_path)
raise
def download_and_extract(url):
tmp_dir = Path(tempfile.mkdtemp())
compressed_filename = url.split('/')[-1]
compressed_filepath = tmp_dir / compressed_filename
download(url, compressed_filepath)
print('Extracting...')
return extract(compressed_filepath, tmp_dir)
def extract(filepath, output_dir):
# Infer extract method based on extension
extensions_to_methods = {
'.tar.gz': untar,
'.tar.bz2': untar,
'.tgz': untar,
'.zip': unzip,
'.gz': ungzip,
'.bz2': unbz2,
}
def get_extension(filename, extensions):
possible_extensions = [ext for ext in extensions if filename.endswith(ext)]
if len(possible_extensions) == 0:
raise Exception(f'File {filename} has an unknown extension')
# Take the longest (.tar.gz should take precedence over .gz)
return max(possible_extensions, key=lambda ext: len(ext))
filename = os.path.basename(filepath)
extension = get_extension(filename, list(extensions_to_methods))
extract_method = extensions_to_methods[extension]
# Extract files in a temporary dir then move the extracted item back to
# the ouput dir in order to get the details of what was extracted
tmp_extract_dir = tempfile.mkdtemp()
# Extract
extract_method(filepath, output_dir=tmp_extract_dir)
extracted_items = os.listdir(tmp_extract_dir)
output_paths = []
for name in extracted_items:
extracted_path = os.path.join(tmp_extract_dir, name)
output_path = os.path.join(output_dir, name)
move_with_overwrite(extracted_path, output_path)
output_paths.append(output_path)
return output_paths
def move_with_overwrite(source_path, target_path):
if os.path.isfile(target_path):
os.remove(target_path)
if os.path.isdir(target_path) and os.path.isdir(source_path):
shutil.rmtree(target_path)
shutil.move(source_path, target_path)
def untar(compressed_path, output_dir):
with tarfile.open(compressed_path) as f:
f.extractall(output_dir)
def unzip(compressed_path, output_dir):
with zipfile.ZipFile(compressed_path, 'r') as f:
f.extractall(output_dir)
def ungzip(compressed_path, output_dir):
filename = os.path.basename(compressed_path)
assert filename.endswith('.gz')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_path = os.path.join(output_dir, filename[:-3])
with gzip.open(compressed_path, 'rb') as f_in:
with open(output_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def unbz2(compressed_path, output_dir):
extract_filename = os.path.basename(compressed_path).replace('.bz2', '')
extract_path = os.path.join(output_dir, extract_filename)
with bz2.BZ2File(compressed_path, 'rb') as compressed_file, open(extract_path, 'wb') as extract_file:
for data in tqdm(iter(lambda: compressed_file.read(1024 * 1024), b'')):
extract_file.write(data)
def add_newline_at_end_of_file(file_path):
with open(file_path, 'r') as f:
last_character = f.readlines()[-1][-1]
if last_character == '\n':
return
print(f'Adding newline at the end of {file_path}')
with open(file_path, 'a') as f:
f.write('\n')
def git_clone(url, output_dir, overwrite=True):
if Path(output_dir).exists():
shutil.rmtree(output_dir)
git.Repo.clone_from(url, output_dir)
def replace_lrb_rrb_file(filepath):
tmp_filepath = filepath + '.tmp'
with open(filepath, 'r') as input_file, open(tmp_filepath, 'w') as output_file:
for line in input_file:
output_file.write(line.replace('-lrb-', '(').replace('-rrb-', ')'))
os.rename(tmp_filepath, filepath)
| access-main | access/resources/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from glob import glob
import os
from pathlib import Path
import shutil
import tempfile
import numpy as np
from access.text import word_tokenize
from access.utils.helpers import (yield_lines_in_parallel, write_lines_in_parallel, create_directory_or_skip,
lock_directory)
from access.preprocess import replace_lrb_rrb, replace_lrb_rrb_file, normalize_quotes
from access.resources.utils import download_and_extract, add_newline_at_end_of_file, git_clone
from access.resources.paths import (FASTTEXT_EMBEDDINGS_PATH, get_dataset_dir, get_data_filepath, PHASES, MODELS_DIR,
BEST_MODEL_DIR)
def prepare_wikilarge():
dataset = 'wikilarge'
with create_directory_or_skip(get_dataset_dir(dataset)):
url = 'https://github.com/louismartin/dress-data/raw/master/data-simplification.tar.bz2'
extracted_path = download_and_extract(url)[0]
# Only rename files and put them in local directory architecture
for phase in PHASES:
for (old_language_name, new_language_name) in [('src', 'complex'), ('dst', 'simple')]:
old_path_glob = os.path.join(extracted_path, dataset, f'*.ori.{phase}.{old_language_name}')
globs = glob(old_path_glob)
assert len(globs) == 1
old_path = globs[0]
new_path = get_data_filepath(dataset, phase, new_language_name)
shutil.copyfile(old_path, new_path)
shutil.move(replace_lrb_rrb_file(new_path), new_path)
add_newline_at_end_of_file(new_path)
return dataset
def prepare_turkcorpus_lower():
dataset = 'turkcorpus_lower'
with create_directory_or_skip(get_dataset_dir(dataset)):
url = 'https://github.com/cocoxu/simplification.git'
output_dir = Path(tempfile.mkdtemp())
git_clone(url, output_dir)
print(output_dir)
print('Processing...')
# Only rename files and put them in local directory architecture
turkcorpus_lower_dir = output_dir / 'data/turkcorpus'
print(turkcorpus_lower_dir)
for (old_phase, new_phase) in [('test', 'test'), ('tune', 'valid')]:
for (old_language_name, new_language_name) in [('norm', 'complex'), ('simp', 'simple')]:
old_path = turkcorpus_lower_dir / f'{old_phase}.8turkers.tok.{old_language_name}'
new_path = get_data_filepath('turkcorpus_lower', new_phase, new_language_name)
shutil.copyfile(old_path, new_path)
add_newline_at_end_of_file(new_path)
shutil.move(replace_lrb_rrb_file(new_path), new_path)
for i in range(8):
old_path = turkcorpus_lower_dir / f'{old_phase}.8turkers.tok.turk.{i}'
new_path = get_data_filepath('turkcorpus_lower', new_phase, 'simple.turk', i=i)
shutil.copyfile(old_path, new_path)
add_newline_at_end_of_file(new_path)
shutil.move(replace_lrb_rrb_file(new_path), new_path)
print('Done.')
return dataset
def prepare_turkcorpus():
dataset = 'turkcorpus'
with create_directory_or_skip(get_dataset_dir(dataset)):
# Import here to avoid circular imports
from access.feature_extraction import get_levenshtein_similarity
prepare_turkcorpus_lower()
url = 'https://github.com/cocoxu/simplification.git'
output_dir = Path(tempfile.mkdtemp())
git_clone(url, output_dir)
print('Processing...')
# Only rename files and put them in local directory architecture
turkcorpus_truecased_dir = output_dir / 'data/turkcorpus/truecased'
for (old_phase, new_phase) in [('test', 'test'), ('tune', 'valid')]:
# (1) read the .tsv for which each line is tab separated:
# `idx, complex_sentence, *turk_sentences = line.split('\t')`
# (2) replace lrb and rrb, tokenize
# (3) Turk sentences are shuffled for each sample so need to realign them with turkcorpus lower
tsv_filepath = turkcorpus_truecased_dir / f'{old_phase}.8turkers.organized.tsv'
output_complex_filepath = get_data_filepath(dataset, new_phase, 'complex')
output_ref_filepaths = [get_data_filepath(dataset, new_phase, 'simple.turk', i) for i in range(8)]
# These files will be used to reorder the shuffled ref sentences
ordered_ref_filepaths = [
get_data_filepath('turkcorpus_lower', new_phase, 'simple.turk', i) for i in range(8)
]
with write_lines_in_parallel([output_complex_filepath] + output_ref_filepaths) as files:
input_filepaths = [tsv_filepath] + ordered_ref_filepaths
for tsv_line, *ordered_ref_sentences in yield_lines_in_parallel(input_filepaths):
sample_id, complex_sentence, *shuffled_ref_sentences = [
word_tokenize(normalize_quotes(replace_lrb_rrb(s))) for s in tsv_line.split('\t')
]
reordered_sentences = []
for ordered_ref_sentence in ordered_ref_sentences:
# Find the position of the ref_sentence in the shuffled sentences
similarities = [
get_levenshtein_similarity(ordered_ref_sentence.replace(' ', ''),
shuffled_ref_sentence.lower().replace(' ', ''))
for shuffled_ref_sentence in shuffled_ref_sentences
]
idx = np.argmax(similarities)
# A few sentences have differing punctuation marks
assert similarities[idx] > 0.98, \
f'{ordered_ref_sentence} != {shuffled_ref_sentences[idx].lower()} {similarities[idx]:.2f}'
reordered_sentences.append(shuffled_ref_sentences.pop(idx))
assert len(shuffled_ref_sentences) == 0
assert len(reordered_sentences) == 8
files.write([complex_sentence] + reordered_sentences)
return dataset
def prepare_fasttext_embeddings():
FASTTEXT_EMBEDDINGS_PATH.parent.mkdir(parents=True, exist_ok=True)
with lock_directory(FASTTEXT_EMBEDDINGS_PATH.parent):
if FASTTEXT_EMBEDDINGS_PATH.exists():
return
url = 'https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.en.300.vec.gz'
extracted_path = download_and_extract(url)[0]
shutil.move(extracted_path, FASTTEXT_EMBEDDINGS_PATH)
def prepare_models():
MODELS_DIR.mkdir(parents=True, exist_ok=True)
if not BEST_MODEL_DIR.exists():
url = 'http://dl.fbaipublicfiles.com/access/best_model.tar.gz'
extracted_path = download_and_extract(url)[0]
shutil.move(extracted_path, BEST_MODEL_DIR)
all_parameters_model_dir = MODELS_DIR / 'all_parameters_model'
if not all_parameters_model_dir.exists():
url = 'http://dl.fbaipublicfiles.com/access/all_parameters_model.tar.gz'
extracted_path = download_and_extract(url)[0]
shutil.move(extracted_path, all_parameters_model_dir)
return BEST_MODEL_DIR
| access-main | access/resources/prepare.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from collections import defaultdict
from functools import lru_cache
import shutil
from nevergrad.instrumentation import Instrumentation
from nevergrad.optimization import optimizerlib
import re
from access.evaluation.general import evaluate_simplifier_on_turkcorpus
from access.evaluation.utils import combine_metrics
from access.fairseq.base import (fairseq_preprocess, fairseq_train, fairseq_generate, get_fairseq_exp_dir,
)
from access.resources.datasets import has_lines_in_common
from access.preprocessors import get_preprocessors, get_preprocessor_by_name
from access.resources.datasets import create_preprocessed_dataset
from access.resources.paths import get_data_filepath, get_dataset_dir
from access.simplifiers import get_fairseq_simplifier, get_preprocessed_simplifier
from access.utils.training import (print_method_name, print_args, print_result, print_running_time,
)
from access.utils.helpers import get_allowed_kwargs
def check_dataset(dataset):
# Sanity check with evaluation dataset
assert not has_lines_in_common(get_data_filepath(dataset, 'train', 'complex'),
get_data_filepath('turkcorpus', 'valid', 'complex'))
assert not has_lines_in_common(get_data_filepath(dataset, 'train', 'complex'),
get_data_filepath('turkcorpus', 'test', 'complex'))
def prepare_exp_dir():
exp_dir = get_fairseq_exp_dir()
if exp_dir.exists():
# Remove exp dir to prevent conflicts with requeue and non deterministic args
# https://github.com/fairinternal/dfoptim/issues/126 #private
shutil.rmtree(exp_dir)
exp_dir.mkdir(parents=True)
return exp_dir
def get_simplifier(exp_dir, preprocessors_kwargs, generate_kwargs):
# TODO: Take kwargs as input and separate between get_preprocessors kwargs and generate_kwargs
preprocessors = get_preprocessors(preprocessors_kwargs)
simplifier = get_fairseq_simplifier(exp_dir, **generate_kwargs)
return get_preprocessed_simplifier(simplifier, preprocessors=preprocessors)
def find_best_parametrization(exp_dir, metrics_coefs, preprocessors_kwargs, parametrization_budget=64):
@lru_cache()
def evaluate_parametrization(**instru_kwargs):
# Note that we use default generate kwargs instead of provided one because they are faster
preprocessors_kwargs = instru_kwargs_to_preprocessors_kwargs(instru_kwargs)
simplifier = get_simplifier(exp_dir, preprocessors_kwargs=preprocessors_kwargs, generate_kwargs={})
scores = evaluate_simplifier_on_turkcorpus(simplifier, phase='valid')
return combine_metrics(scores['BLEU'], scores['SARI'], scores['FKGL'], metrics_coefs)
def preprocessors_kwargs_to_instru_kwargs(preprocessors_kwargs):
instru_kwargs = {}
for preprocessor_name, preprocessor_kwargs in preprocessors_kwargs.items():
assert '_' not in preprocessor_name
preprocessor = get_preprocessor_by_name(preprocessor_name)(**preprocessor_kwargs)
# First we set the values from preprocessors_kwargs which are constant
for kwarg_name, kwarg_value in preprocessor_kwargs.items():
instru_kwargs[f'{preprocessor_name}_{kwarg_name}'] = kwarg_value
# Then we overwrite some of these values with nevergrad variables when necessary
for kwarg_name, kwarg_value in preprocessor.get_nevergrad_variables().items():
instru_kwargs[f'{preprocessor_name}_{kwarg_name}'] = kwarg_value
return instru_kwargs
def instru_kwargs_to_preprocessors_kwargs(instru_kwargs):
preprocessors_kwargs = defaultdict(dict)
for key, value in instru_kwargs.items():
preprocessor_name, kwarg_name = re.match(r'([a-zA-Z0-9]+)_([a-z0-9_]+)', key).groups()
preprocessors_kwargs[preprocessor_name][kwarg_name] = value
return dict(preprocessors_kwargs)
instru_kwargs = preprocessors_kwargs_to_instru_kwargs(preprocessors_kwargs)
instru = Instrumentation(**instru_kwargs)
if instru.dimension == 0:
return preprocessors_kwargs
# No need to search a lot when there is only a few parameters
parametrization_budget = min(32**instru.dimension, parametrization_budget)
optimizer = optimizerlib.ScrHammersleySearch(instrumentation=instru, budget=parametrization_budget, num_workers=1)
recommendation = optimizer.optimize(evaluate_parametrization, verbosity=0)
return instru_kwargs_to_preprocessors_kwargs(recommendation.kwargs)
def check_and_resolve_args(kwargs):
if kwargs.get('diverse_beam_groups_ratio', None) is not None:
diverse_beam_groups = max(int(kwargs['beam'] * kwargs['diverse_beam_groups_ratio']), 1)
print(f'diverse_beam_groups={diverse_beam_groups}')
assert kwargs['beam'] % diverse_beam_groups == 0
kwargs['diverse_beam_groups'] = diverse_beam_groups
else:
diverse_beam_groups = None
return kwargs
@print_method_name
@print_args
@print_result
@print_running_time
def fairseq_train_and_evaluate(dataset, metrics_coefs=[1, 1, 1], parametrization_budget=64, **kwargs):
check_dataset(dataset)
kwargs = check_and_resolve_args(kwargs)
exp_dir = prepare_exp_dir()
preprocessors_kwargs = kwargs.get('preprocessors_kwargs', {})
preprocessors = get_preprocessors(preprocessors_kwargs)
if len(preprocessors) > 0:
dataset = create_preprocessed_dataset(dataset, preprocessors, n_jobs=1)
shutil.copy(get_dataset_dir(dataset) / 'preprocessors.pickle', exp_dir)
preprocessed_dir = fairseq_preprocess(dataset)
train_kwargs = get_allowed_kwargs(fairseq_train, preprocessed_dir, exp_dir, **kwargs)
fairseq_train(preprocessed_dir, exp_dir=exp_dir, **train_kwargs)
# Evaluation
generate_kwargs = get_allowed_kwargs(fairseq_generate, 'complex_filepath', 'pred_filepath', exp_dir, **kwargs)
recommended_preprocessors_kwargs = find_best_parametrization(exp_dir, metrics_coefs, preprocessors_kwargs,
parametrization_budget)
print(f'recommended_preprocessors_kwargs={recommended_preprocessors_kwargs}')
simplifier = get_simplifier(exp_dir, recommended_preprocessors_kwargs, generate_kwargs)
scores = evaluate_simplifier_on_turkcorpus(simplifier, phase='valid')
print(f'scores={scores}')
score = combine_metrics(scores['BLEU'], scores['SARI'], scores['FKGL'], metrics_coefs)
return score
| access-main | access/fairseq/main.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from collections import defaultdict
import os
from pathlib import Path
import random
import re
import shutil
import tempfile
import time
from fairseq import options
from fairseq_cli import preprocess, train, generate
from access.resources.paths import get_dataset_dir, EXP_DIR
from access.utils.helpers import (log_stdout, lock_directory, create_directory_or_skip, yield_lines,
write_lines)
def get_fairseq_exp_dir(job_id=None):
if job_id is not None:
dir_name = f'slurmjob_{job_id}'
else:
dir_name = f'local_{int(time.time() * 1000)}'
return Path(EXP_DIR) / f'fairseq' / dir_name
def fairseq_preprocess(dataset):
dataset_dir = get_dataset_dir(dataset)
with lock_directory(dataset_dir):
preprocessed_dir = dataset_dir / 'fairseq_preprocessed'
with create_directory_or_skip(preprocessed_dir):
preprocessing_parser = options.get_preprocessing_parser()
preprocess_args = preprocessing_parser.parse_args([
'--source-lang',
'complex',
'--target-lang',
'simple',
'--trainpref',
os.path.join(dataset_dir, f'{dataset}.train'),
'--validpref',
os.path.join(dataset_dir, f'{dataset}.valid'),
'--testpref',
os.path.join(dataset_dir, f'{dataset}.test'),
'--destdir',
str(preprocessed_dir),
'--output-format',
'raw',
])
preprocess.main(preprocess_args)
return preprocessed_dir
def fairseq_train(
preprocessed_dir,
exp_dir,
ngpus=None,
max_tokens=2000,
arch='fconv_iwslt_de_en',
pretrained_emb_path=None,
embeddings_dim=None,
# Transformer (decoder is the same as encoder for now)
encoder_embed_dim=512,
encoder_layers=6,
encoder_attention_heads=8,
# encoder_decoder_dim_ratio=1,
# share_embeddings=True,
max_epoch=50,
warmup_updates=None,
lr=0.1,
min_lr=1e-9,
dropout=0.2,
label_smoothing=0.1,
lr_scheduler='fixed',
weight_decay=0.0001,
criterion='label_smoothed_cross_entropy',
optimizer='nag',
validations_before_sari_early_stopping=10,
fp16=False):
exp_dir = Path(exp_dir)
with log_stdout(exp_dir / 'fairseq_train.stdout'):
preprocessed_dir = Path(preprocessed_dir)
exp_dir.mkdir(exist_ok=True, parents=True)
# Copy dictionaries to exp_dir for generation
shutil.copy(preprocessed_dir / 'dict.complex.txt', exp_dir)
shutil.copy(preprocessed_dir / 'dict.simple.txt', exp_dir)
train_parser = options.get_training_parser()
# if share_embeddings:
# assert encoder_decoder_dim_ratio == 1
args = [
'--task',
'translation',
preprocessed_dir,
'--raw-text',
'--source-lang',
'complex',
'--target-lang',
'simple',
'--save-dir',
os.path.join(exp_dir, 'checkpoints'),
'--clip-norm',
0.1,
'--criterion',
criterion,
'--no-epoch-checkpoints',
'--save-interval-updates',
5000, # Validate every n updates
'--validations-before-sari-early-stopping',
validations_before_sari_early_stopping,
'--arch',
arch,
# '--decoder-out-embed-dim', int(embeddings_dim * encoder_decoder_dim_ratio), # Output dim of decoder
'--max-tokens',
max_tokens,
'--max-epoch',
max_epoch,
'--lr-scheduler',
lr_scheduler,
'--dropout',
dropout,
'--lr',
lr,
'--lr-shrink',
0.5, # For reduce lr on plateau scheduler
'--min-lr',
min_lr,
'--weight-decay',
weight_decay,
'--optimizer',
optimizer,
'--label-smoothing',
label_smoothing,
'--seed',
random.randint(1, 1000),
# '--force-anneal', '200',
# '--distributed-world-size', '1',
]
if arch == 'transformer':
args.extend([
'--encoder-embed-dim',
encoder_embed_dim,
'--encoder-ffn-embed-dim',
4 * encoder_embed_dim,
'--encoder-layers',
encoder_layers,
'--encoder-attention-heads',
encoder_attention_heads,
'--decoder-layers',
encoder_layers,
'--decoder-attention-heads',
encoder_attention_heads,
])
if pretrained_emb_path is not None:
args.extend(['--encoder-embed-path', pretrained_emb_path if pretrained_emb_path is not None else ''])
args.extend(['--decoder-embed-path', pretrained_emb_path if pretrained_emb_path is not None else ''])
if embeddings_dim is not None:
args.extend(['--encoder-embed-dim', embeddings_dim]) # Input and output dim of encoder
args.extend(['--decoder-embed-dim', embeddings_dim]) # Input dim of decoder
if ngpus is not None:
args.extend(['--distributed-world-size', ngpus])
# if share_embeddings:
# args.append('--share-input-output-embed')
if fp16:
args.append('--fp16')
if warmup_updates is not None:
args.extend(['--warmup-updates', warmup_updates])
args = [str(arg) for arg in args]
train_args = options.parse_args_and_arch(train_parser, args)
train.main(train_args)
def _fairseq_generate(complex_filepath,
output_pred_filepath,
checkpoint_paths,
complex_dictionary_path,
simple_dictionary_path,
beam=5,
hypothesis_num=1,
lenpen=1.,
diverse_beam_groups=None,
diverse_beam_strength=0.5,
sampling=False,
batch_size=128):
# exp_dir must contain checkpoints/checkpoint_best.pt, and dict.{complex,simple}.txt
# First copy input complex file to exp_dir and create dummy simple file
tmp_dir = Path(tempfile.mkdtemp())
new_complex_filepath = tmp_dir / 'tmp.complex-simple.complex'
dummy_simple_filepath = tmp_dir / 'tmp.complex-simple.simple'
shutil.copy(complex_filepath, new_complex_filepath)
shutil.copy(complex_filepath, dummy_simple_filepath)
shutil.copy(complex_dictionary_path, tmp_dir / 'dict.complex.txt')
shutil.copy(simple_dictionary_path, tmp_dir / 'dict.simple.txt')
generate_parser = options.get_generation_parser()
args = [
tmp_dir,
'--path',
':'.join([str(path) for path in checkpoint_paths]),
'--beam',
beam,
'--nbest',
hypothesis_num,
'--lenpen',
lenpen,
'--diverse-beam-groups',
diverse_beam_groups if diverse_beam_groups is not None else -1,
'--diverse-beam-strength',
diverse_beam_strength,
'--batch-size',
batch_size,
'--raw-text',
'--print-alignment',
'--gen-subset',
'tmp',
# We don't want to reload pretrained embeddings
'--model-overrides',
{
'encoder_embed_path': None,
'decoder_embed_path': None
},
]
if sampling:
args.extend([
'--sampling',
'--sampling-topk',
10,
])
args = [str(arg) for arg in args]
generate_args = options.parse_args_and_arch(generate_parser, args)
out_filepath = tmp_dir / 'generation.out'
with log_stdout(out_filepath, mute_stdout=True):
# evaluate model in batch mode
generate.main(generate_args)
# Retrieve translations
def parse_all_hypotheses(out_filepath):
hypotheses_dict = defaultdict(list)
for line in yield_lines(out_filepath):
match = re.match(r'^H-(\d+)\t-?\d+\.\d+\t(.*)$', line)
if match:
sample_id, hypothesis = match.groups()
hypotheses_dict[int(sample_id)].append(hypothesis)
# Sort in original order
return [hypotheses_dict[i] for i in range(len(hypotheses_dict))]
all_hypotheses = parse_all_hypotheses(out_filepath)
predictions = [hypotheses[hypothesis_num - 1] for hypotheses in all_hypotheses]
write_lines(predictions, output_pred_filepath)
os.remove(dummy_simple_filepath)
os.remove(new_complex_filepath)
def fairseq_generate(complex_filepath,
output_pred_filepath,
exp_dir,
beam=1,
hypothesis_num=1,
lenpen=1.,
diverse_beam_groups=None,
diverse_beam_strength=0.5,
sampling=False,
batch_size=128):
exp_dir = Path(exp_dir)
checkpoint_path = exp_dir / 'checkpoints/checkpoint_best.pt'
assert checkpoint_path.exists(), f'Generation failed, no checkpoint at {checkpoint_path}'
complex_dictionary_path = exp_dir / 'dict.complex.txt'
simple_dictionary_path = exp_dir / 'dict.simple.txt'
_fairseq_generate(complex_filepath,
output_pred_filepath, [checkpoint_path],
complex_dictionary_path=complex_dictionary_path,
simple_dictionary_path=simple_dictionary_path,
beam=beam,
hypothesis_num=hypothesis_num,
lenpen=lenpen,
diverse_beam_groups=diverse_beam_groups,
diverse_beam_strength=diverse_beam_strength,
sampling=sampling,
batch_size=batch_size)
| access-main | access/fairseq/base.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from contextlib import contextmanager, AbstractContextManager
from fcntl import flock, LOCK_EX, LOCK_UN
import inspect
import io
from itertools import zip_longest
from pathlib import Path
import shutil
import sys
import tempfile
import numpy as np
@contextmanager
def open_files(filepaths, mode='r'):
files = []
try:
files = [Path(filepath).open(mode) for filepath in filepaths]
yield files
finally:
[f.close() for f in files]
def yield_lines_in_parallel(filepaths, strip=True, strict=True, n_lines=float('inf')):
assert type(filepaths) == list
with open_files(filepaths) as files:
for i, parallel_lines in enumerate(zip_longest(*files)):
if i >= n_lines:
break
if None in parallel_lines:
assert not strict, f'Files don\'t have the same number of lines: {filepaths}, use strict=False'
if strip:
parallel_lines = [l.rstrip('\n') if l is not None else None for l in parallel_lines]
yield parallel_lines
class FilesWrapper:
'''Write to multiple open files at the same time'''
def __init__(self, files, strict=True):
self.files = files
self.strict = strict # Whether to raise an exception when a line is None
def write(self, lines):
assert len(lines) == len(self.files)
for line, f in zip(lines, self.files):
if line is None:
assert not self.strict
continue
f.write(line.rstrip('\n') + '\n')
@contextmanager
def write_lines_in_parallel(filepaths, strict=True):
with open_files(filepaths, 'w') as files:
yield FilesWrapper(files, strict=strict)
def write_lines(lines, filepath):
filepath = Path(filepath)
filepath.parent.mkdir(parents=True, exist_ok=True)
with filepath.open('w') as f:
for line in lines:
f.write(line + '\n')
def yield_lines(filepath, n_lines=float('inf'), prop=1):
if prop < 1:
assert n_lines == float('inf')
n_lines = int(prop * count_lines(filepath))
with open(filepath, 'r') as f:
for i, l in enumerate(f):
if i >= n_lines:
break
yield l.rstrip('\n')
def read_lines(filepath, n_lines=float('inf'), prop=1):
return list(yield_lines(filepath, n_lines, prop))
def count_lines(filepath):
n_lines = 0
with Path(filepath).open() as f:
for l in f:
n_lines += 1
return n_lines
@contextmanager
def open_with_lock(filepath, mode):
with open(filepath, mode) as f:
flock(f, LOCK_EX)
yield f
flock(f, LOCK_UN)
def get_lockfile_path(path):
path = Path(path)
if path.is_dir():
return path / '.lockfile'
if path.is_file():
return path.parent / f'.{path.name}.lockfile'
@contextmanager
def lock_directory(dir_path):
# TODO: Locking a directory should lock all files in that directory
# Right now if we lock foo/, someone else can lock foo/bar.txt
# TODO: Nested with lock_directory() should not be blocking
assert Path(dir_path).exists(), f'Directory does not exists: {dir_path}'
lockfile_path = get_lockfile_path(dir_path)
with open_with_lock(lockfile_path, 'w'):
yield
def safe_division(a, b):
if b == 0:
return 0
return a / b
def harmonic_mean(values, coefs=None):
if 0 in values:
return 0
values = np.array(values)
if coefs is None:
coefs = np.ones(values.shape)
values = np.array(values)
coefs = np.array(coefs)
return np.sum(coefs) / np.dot(coefs, 1 / values)
@contextmanager
def mute(mute_stdout=True, mute_stderr=True):
save_stdout = sys.stdout
save_stderr = sys.stderr
if mute_stdout:
sys.stdout = io.StringIO()
if mute_stderr:
sys.stderr = io.StringIO()
try:
yield
finally:
sys.stdout = save_stdout
sys.stderr = save_stderr
@contextmanager
def log_stdout(filepath, mute_stdout=False):
'''Context manager to write both to stdout and to a file'''
class MultipleStreamsWriter:
def __init__(self, streams):
self.streams = streams
def write(self, message):
for stream in self.streams:
stream.write(message)
def flush(self):
for stream in self.streams:
stream.flush()
save_stdout = sys.stdout
log_file = open(filepath, 'w')
if mute_stdout:
sys.stdout = MultipleStreamsWriter([log_file]) # Write to file only
else:
sys.stdout = MultipleStreamsWriter([save_stdout, log_file]) # Write to both stdout and file
try:
yield
finally:
sys.stdout = save_stdout
log_file.close()
def add_dicts(*dicts):
return {k: v for dic in dicts for k, v in dic.items()}
def get_default_args(func):
signature = inspect.signature(func)
return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}
def get_allowed_kwargs(func, *args, **kwargs):
expected_args = inspect.getargspec(func).args
allowed_kwargs = expected_args[len(args):]
return {k: v for k, v in kwargs.items() if k in allowed_kwargs}
class SkipWithBlock(Exception):
pass
class create_directory_or_skip(AbstractContextManager):
'''Context manager for creating a new directory (with rollback and skipping with block if exists)
In order to skip the execution of the with block if the dataset already exists, this context manager uses deep
magic from https://stackoverflow.com/questions/12594148/skipping-execution-of-with-block
'''
def __init__(self, dir_path, overwrite=False):
self.dir_path = Path(dir_path)
self.overwrite = overwrite
def __enter__(self):
if self.dir_path.exists():
self.directory_lock = lock_directory(self.dir_path)
self.directory_lock.__enter__()
files_in_directory = list(self.dir_path.iterdir())
if set(files_in_directory) in [set([]), set([self.dir_path / '.lockfile'])]:
# TODO: Quick hack to remove empty directories
self.directory_lock.__exit__(None, None, None)
print(f'Removing empty directory {self.dir_path}')
shutil.rmtree(self.dir_path)
else:
# Deep magic hack to skip the execution of the code inside the with block
# We set the trace to a dummy function
sys.settrace(lambda *args, **keys: None)
# Get the calling frame (sys._getframe(0) is the current frame)
frame = sys._getframe(1)
# Set the calling frame's trace to the one that raises the special exception
frame.f_trace = self.trace
return
print(f'Creating {self.dir_path}...')
self.dir_path.mkdir(parents=True, exist_ok=True)
self.directory_lock = lock_directory(self.dir_path)
self.directory_lock.__enter__()
def trace(self, frame, event, arg):
# This method is called when a new local scope is entered, i.e. right when the code in the with block begins
# The exception will therefore be caught by the __exit__()
raise SkipWithBlock()
def __exit__(self, type, value, traceback):
self.directory_lock.__exit__(type, value, traceback)
if type is not None:
if issubclass(type, SkipWithBlock):
return True # Suppress special SkipWithBlock exception
if issubclass(type, BaseException):
# Rollback
print(f'Error: Rolling back creation of directory {self.dir_path}')
shutil.rmtree(self.dir_path)
return False # Reraise the exception
def get_temp_filepath(create=False):
temp_filepath = Path(tempfile.mkstemp()[1])
if not create:
temp_filepath.unlink()
return temp_filepath
def get_temp_filepaths(n_filepaths, create=False):
return [get_temp_filepath(create=create) for _ in range(n_filepaths)]
def delete_files(filepaths):
for filepath in filepaths:
filepath = Path(filepath)
assert filepath.is_file()
filepath.unlink()
| access-main | access/utils/helpers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# TODO: Move to utils/training.py
from functools import wraps
import time
def print_method_name(func):
'''Decorator to print method name for logging purposes'''
@wraps(func) # To preserve the name and path for pickling purposes
def wrapped_func(*args, **kwargs):
print(f"method_name='{func.__name__}'")
return func(*args, **kwargs)
return wrapped_func
def print_args(func):
'''Decorator to print arguments of method for logging purposes'''
@wraps(func) # To preserve the name and path for pickling purposes
def wrapped_func(*args, **kwargs):
print(f'args={args}')
print(f'kwargs={kwargs}')
return func(*args, **kwargs)
return wrapped_func
def print_result(func):
'''Decorator to print result of method for logging purposes'''
@wraps(func) # To preserve the name and path for pickling purposes
def wrapped_func(*args, **kwargs):
result = func(*args, **kwargs)
print(f'result={result}')
return result
return wrapped_func
def print_running_time(func):
'''Decorator to print running time of method for logging purposes'''
@wraps(func) # To preserve the name and path for pickling purposes
def wrapped_func(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
print(f'running_time={time.time() - start_time}')
return result
return wrapped_func
| access-main | access/utils/training.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from access.utils.helpers import harmonic_mean
# Tranforms take a value and cast it to a score between 0 and 1, the higher the better
def bleu_transform(bleu):
min_bleu = 0
max_bleu = 100
bleu = max(bleu, min_bleu)
bleu = min(bleu, max_bleu)
return (bleu - min_bleu) / (max_bleu - min_bleu)
def sari_transform(sari):
min_sari = 0
max_sari = 60
sari = max(sari, min_sari)
sari = min(sari, max_sari)
return (sari - min_sari) / (max_sari - min_sari)
def fkgl_transform(fkgl):
min_fkgl = 0
max_fkgl = 20
fkgl = max(fkgl, min_fkgl)
fkgl = min(fkgl, max_fkgl)
return 1 - (fkgl - min_fkgl) / (max_fkgl - min_fkgl)
def combine_metrics(bleu, sari, fkgl, coefs):
# Combine into a score between 0 and 1, LOWER the better
assert len(coefs) == 3
return 1 - harmonic_mean([bleu_transform(bleu), sari_transform(sari), fkgl_transform(fkgl)], coefs)
| access-main | access/evaluation/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from easse.cli import evaluate_system_output
from access.preprocess import lowercase_file, to_lrb_rrb_file
from access.resources.paths import get_data_filepath
from access.utils.helpers import mute, get_temp_filepath
'''A simplifier is a method with signature: simplifier(complex_filepath, output_pred_filepath)'''
def get_prediction_on_turkcorpus(simplifier, phase):
source_filepath = get_data_filepath('turkcorpus', phase, 'complex')
pred_filepath = get_temp_filepath()
with mute():
simplifier(source_filepath, pred_filepath)
return pred_filepath
def evaluate_simplifier_on_turkcorpus(simplifier, phase):
pred_filepath = get_prediction_on_turkcorpus(simplifier, phase)
pred_filepath = lowercase_file(pred_filepath)
pred_filepath = to_lrb_rrb_file(pred_filepath)
return evaluate_system_output(f'turkcorpus_{phase}_legacy',
sys_sents_path=pred_filepath,
metrics=['bleu', 'sari_legacy', 'fkgl'],
quality_estimation=True)
| access-main | access/evaluation/general.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import fileinput
from access.preprocessors import get_preprocessors
from access.resources.prepare import prepare_models
from access.simplifiers import get_fairseq_simplifier, get_preprocessed_simplifier
from access.text import word_tokenize
from access.utils.helpers import yield_lines, write_lines, get_temp_filepath, mute
if __name__ == '__main__':
# Usage: python generate.py < my_file.complex
# Read from stdin
source_filepath = get_temp_filepath()
write_lines([word_tokenize(line) for line in fileinput.input()], source_filepath)
# Load best model
best_model_dir = prepare_models()
recommended_preprocessors_kwargs = {
'LengthRatioPreprocessor': {'target_ratio': 0.95},
'LevenshteinPreprocessor': {'target_ratio': 0.75},
'WordRankRatioPreprocessor': {'target_ratio': 0.75},
'SentencePiecePreprocessor': {'vocab_size': 10000},
}
preprocessors = get_preprocessors(recommended_preprocessors_kwargs)
simplifier = get_fairseq_simplifier(best_model_dir, beam=8)
simplifier = get_preprocessed_simplifier(simplifier, preprocessors=preprocessors)
# Simplify
pred_filepath = get_temp_filepath()
with mute():
simplifier(source_filepath, pred_filepath)
for line in yield_lines(pred_filepath):
print(line)
| access-main | scripts/generate.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from access.fairseq.main import fairseq_train_and_evaluate
from access.resources.prepare import prepare_wikilarge, prepare_turkcorpus
if __name__ == '__main__':
print('Training a model from scratch')
prepare_wikilarge()
prepare_turkcorpus()
kwargs = {
'arch': 'transformer',
'warmup_updates': 4000,
'parametrization_budget': 256,
'beam': 8,
'dataset': 'wikilarge',
'dropout': 0.2,
'fp16': False,
'label_smoothing': 0.54,
'lr': 0.00011,
'lr_scheduler': 'fixed',
'max_epoch': 100,
'max_tokens': 5000,
'metrics_coefs': [0, 1, 0],
'optimizer': 'adam',
'preprocessors_kwargs': {
'LengthRatioPreprocessor': {
'target_ratio': 0.8 # Default initial value
},
'LevenshteinPreprocessor': {
'target_ratio': 0.8 # Default initial value
},
'WordRankRatioPreprocessor': {
'target_ratio': 0.8 # Default initial value
},
'DependencyTreeDepthRatioPreprocessor': {
'target_ratio': 0.8 # Default initial value
},
'SentencePiecePreprocessor': {
'vocab_size': 10000
}
}
}
fairseq_train_and_evaluate(**kwargs)
| access-main | scripts/train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from access.evaluation.general import evaluate_simplifier_on_turkcorpus
from access.preprocessors import get_preprocessors
from access.resources.prepare import prepare_turkcorpus, prepare_models
from access.simplifiers import get_fairseq_simplifier, get_preprocessed_simplifier
if __name__ == '__main__':
print('Evaluating pretrained model')
prepare_turkcorpus()
best_model_dir = prepare_models()
recommended_preprocessors_kwargs = {
'LengthRatioPreprocessor': {'target_ratio': 0.95},
'LevenshteinPreprocessor': {'target_ratio': 0.75},
'WordRankRatioPreprocessor': {'target_ratio': 0.75},
'SentencePiecePreprocessor': {'vocab_size': 10000},
}
preprocessors = get_preprocessors(recommended_preprocessors_kwargs)
simplifier = get_fairseq_simplifier(best_model_dir, beam=8)
simplifier = get_preprocessed_simplifier(simplifier, preprocessors=preprocessors)
print(evaluate_simplifier_on_turkcorpus(simplifier, phase='test'))
| access-main | scripts/evaluate.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import click
from covid19_spread.data.usa import us_recurring
@click.group()
def cli():
pass
REGIONS = {"us": us_recurring.USARRecurring}
@cli.command()
@click.argument("region", type=click.Choice(REGIONS.keys()))
def install(region):
mod = REGIONS[region]()
mod.install()
@cli.command()
@click.argument("region", type=click.Choice(REGIONS.keys()))
def run(region):
mod = REGIONS[region]()
mod.refresh()
if __name__ == "__main__":
cli()
| covid19_spread-main | recurring.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import click
from covid19_spread.data.usa.convert import main as us_convert, SOURCES as US_SOURCES
import os
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
@click.group()
def cli():
pass
@cli.command()
@click.option("--metric", default="cases", type=click.Choice(["cases", "deaths"]))
@click.option("--with-features", is_flag=True)
@click.option("--source", default="nyt", type=click.Choice(US_SOURCES.keys()))
@click.option("--resolution", default="county", type=click.Choice(["county", "state"]))
def us(metric, with_features, source, resolution):
us_convert(metric, with_features, source, resolution)
if __name__ == "__main__":
cli()
| covid19_spread-main | prepare_data.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup, find_packages
setup(
name="covid19_spread",
version="0.1",
py_modules=["covid19_spread"],
install_requires=["Click",],
packages=find_packages(),
entry_points="""
[console_scripts]
cv=cv:cli
prepare-data=prepare_data:cli
recurring=recurring:cli
""",
)
| covid19_spread-main | setup.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import copy
import click
import importlib
import itertools
import json
import pandas as pd
import os
import random
import shutil
import submitit
import tempfile
import torch as th
import re
import yaml
from argparse import Namespace
from datetime import datetime
from functools import partial
from glob import glob, iglob
from typing import Dict, Any, List, Optional
from contextlib import nullcontext, ExitStack
from covid19_spread import common
from covid19_spread import metrics
from covid19_spread.lib import cluster
from covid19_spread.lib.click_lib import DefaultGroup
from covid19_spread.lib.slurm_pool_executor import (
SlurmPoolExecutor,
JobStatus,
TransactionManager,
)
from covid19_spread.lib.slack import post_slack_message
from submitit.helpers import RsyncSnapshot
from covid19_spread.cross_val import load_config
import sqlite3
from ax.service.ax_client import AxClient
from ax.exceptions.generation_strategy import MaxParallelismReachedException
import time
import queue
import threading
def set_dict(d: Dict[str, Any], keys: List[str], v: Any):
"""
update a dict using a nested list of keys.
Ex:
x = {'a': {'b': {'c': 2}}}
set_dict(x, ['a', 'b'], 4) == {'a': {'b': 4}}
"""
if len(keys) > 0:
d[keys[0]] = set_dict(d[keys[0]], keys[1:], v)
return d
else:
return v
def mk_executor(
name: str, folder: str, extra_params: Dict[str, Any], ex=SlurmPoolExecutor, **kwargs
):
executor = (ex or submitit.AutoExecutor)(folder=folder, **kwargs)
executor.update_parameters(
job_name=name,
partition=cluster.PARTITION,
gpus_per_node=extra_params.get("gpus", 0),
cpus_per_task=extra_params.get("cpus", 3),
mem=f'{cluster.MEM_GB(extra_params.get("memgb", 20))}GB',
array_parallelism=extra_params.get("array_parallelism", 100),
time=extra_params.get("timeout", 12 * 60),
)
return executor
def ensemble(basedirs, cfg, module, prefix, outdir):
def _path(x):
return os.path.join(basedir, prefix + x)
means = []
stds = []
mean_deltas = []
kwargs = {"index_col": "date", "parse_dates": ["date"]}
stdfile = "std_closed_form.csv"
meanfile = "mean_closed_form.csv"
for basedir in basedirs:
if os.path.exists(_path(cfg["validation"]["output"])):
means.append(pd.read_csv(_path(cfg["validation"]["output"]), **kwargs))
if os.path.exists(_path(stdfile)):
stds.append(pd.read_csv(_path(stdfile), **kwargs))
mean_deltas.append(pd.read_csv(_path(meanfile), **kwargs))
if len(stds) > 0:
# Average the variance, and take square root
std = pd.concat(stds).pow(2).groupby(level=0).mean().pow(0.5)
std.to_csv(os.path.join(outdir, prefix + stdfile))
mean_deltas = pd.concat(mean_deltas).groupby(level=0).mean()
mean_deltas.to_csv(os.path.join(outdir, prefix + meanfile))
assert len(means) > 0, "All ensemble jobs failed!!!!"
mod = importlib.import_module("covid19_spread." + module).CV_CLS()
if len(stds) > 0:
pred_interval = cfg.get("prediction_interval", {})
piv = mod.run_prediction_interval(
os.path.join(outdir, prefix + meanfile),
os.path.join(outdir, prefix + stdfile),
pred_interval.get("intervals", [0.99, 0.95, 0.8]),
)
piv.to_csv(os.path.join(outdir, prefix + "piv.csv"), index=False)
mean = pd.concat(means).groupby(level=0).median()
outfile = os.path.join(outdir, prefix + cfg["validation"]["output"])
mean.to_csv(outfile, index_label="date")
# -- metrics --
metric_args = cfg[module].get("metrics", {})
df_val, json_val = mod.compute_metrics(
cfg[module]["data"], outfile, None, metric_args
)
df_val.to_csv(os.path.join(outdir, prefix + "metrics.csv"))
with open(os.path.join(outdir, prefix + "metrics.json"), "w") as fout:
json.dump(json_val, fout)
print(df_val)
def run_cv(
module: str,
basedir: str,
cfg: Dict[str, Any],
prefix="",
basedate=None,
executor=None,
test_run: bool = False, # is this a test or validation run?
):
"""Runs cross validaiton for one set of hyperaparmeters"""
try:
basedir = basedir.replace("%j", submitit.JobEnvironment().job_id)
except Exception:
pass # running locally, basedir is fine...
os.makedirs(basedir, exist_ok=True)
print(f"CWD = {os.getcwd()}")
def _path(path):
return os.path.join(basedir, path)
log_configs(cfg, module, _path(prefix + f"{module}.yml"))
n_models = cfg[module]["train"].get("n_models", 1)
if n_models > 1:
launcher = map if executor is None else executor.map_array
fn = partial(
run_cv,
module,
prefix=prefix,
basedate=basedate,
executor=executor,
test_run=test_run,
)
configs = [
set_dict(copy.deepcopy(cfg), [module, "train", "n_models"], 1)
for _ in range(n_models)
]
basedirs = [os.path.join(basedir, f"job_{i}") for i in range(n_models)]
with ExitStack() as stack:
if executor is not None:
stack.enter_context(executor.set_folder(os.path.join(basedir, "%j")))
jobs = list(launcher(fn, basedirs, configs))
launcher = (
ensemble
if executor is None
else partial(executor.submit_dependent, jobs, ensemble)
)
ensemble_job = launcher(basedirs, cfg, module, prefix, basedir)
if executor is not None:
# Whatever jobs depend on "this" job, should be extended to the newly created jobs
executor.extend_dependencies(jobs + [ensemble_job])
return jobs + [ensemble_job]
# setup input/output paths
dset = cfg[module]["data"]
val_in = _path(prefix + "filtered_" + os.path.basename(dset))
val_test_key = "test" if test_run else "validation"
val_out = _path(prefix + cfg[val_test_key]["output"])
cfg[module]["train"]["fdat"] = val_in
mod = importlib.import_module("covid19_spread." + module).CV_CLS()
# -- store configs to reproduce results --
log_configs(cfg, module, _path(prefix + f"{module}.yml"))
ndays = 0 if test_run else cfg[val_test_key]["days"]
if basedate is not None:
# If we want to train from a particular basedate, then also subtract
# out the different in days. Ex: if ground truth contains data up to 5/20/2020
# but the basedate is 5/10/2020, then drop an extra 10 days in addition to validation.days
gt = metrics.load_ground_truth(dset)
assert gt.index.max() >= basedate
ndays += (gt.index.max() - basedate).days
filter_validation_days(dset, val_in, ndays)
# apply data pre-processing
preprocessed = _path(prefix + "preprocessed_" + os.path.basename(dset))
mod.preprocess(val_in, preprocessed, cfg[module].get("preprocess", {}))
mod.setup_tensorboard(basedir)
# setup logging
train_params = Namespace(**cfg[module]["train"])
n_models = getattr(train_params, "n_models", 1)
print(f"Training {n_models} models")
# -- train --
model = mod.run_train(
preprocessed, train_params, _path(prefix + cfg[module]["output"])
)
# -- simulate --
with th.no_grad():
sim_params = cfg[module].get("simulate", {})
# Returns the number of new cases for each day
df_forecast_deltas = mod.run_simulate(
preprocessed,
train_params,
model,
sim_params=sim_params,
days=cfg[val_test_key]["days"],
)
df_forecast = common.rebase_forecast_deltas(val_in, df_forecast_deltas)
mod.tb_writer.close()
print(f"Storing validation in {val_out}")
df_forecast.to_csv(val_out, index_label="date")
# -- metrics --
metric_args = cfg[module].get("metrics", {})
df_val, json_val = mod.compute_metrics(
cfg[module]["data"], val_out, model, metric_args
)
df_val.to_csv(_path(prefix + "metrics.csv"))
with open(_path(prefix + "metrics.json"), "w") as fout:
json.dump(json_val, fout)
print(df_val)
# -- prediction interval --
if "prediction_interval" in cfg and prefix == "final_model_":
try:
with th.no_grad():
# FIXME: refactor to use rebase_forecast_deltas
gt = metrics.load_ground_truth(val_in)
basedate = gt.index.max()
prev_day = gt.loc[[basedate]]
pred_interval = cfg.get("prediction_interval", {})
df_std, df_mean = mod.run_standard_deviation(
preprocessed,
train_params,
pred_interval.get("nsamples", 100),
pred_interval.get("intervals", [0.99, 0.95, 0.8]),
prev_day.values.T,
model,
pred_interval.get("batch_size", 8),
closed_form=True,
)
df_std.to_csv(_path(f"{prefix}std_closed_form.csv"), index_label="date")
df_mean.to_csv(
_path(f"{prefix}mean_closed_form.csv"), index_label="date"
)
piv = mod.run_prediction_interval(
_path(f"{prefix}mean_closed_form.csv"),
_path(f"{prefix}std_closed_form.csv"),
pred_interval.get("intervals", [0.99, 0.95, 0.8]),
)
piv.to_csv(_path(f"{prefix}piv.csv"), index=False)
except NotImplementedError:
pass # naive...
def filter_validation_days(dset: str, val_in: str, validation_days: int):
"""Filters validation days and writes output to val_in path"""
if dset.endswith(".csv"):
common.drop_k_days_csv(dset, val_in, validation_days)
elif dset.endswith(".h5"):
common.drop_k_days(dset, val_in, validation_days)
else:
raise RuntimeError(f"Unrecognized dataset extension: {dset}")
def load_model(model_pth, cv, args):
chkpnt = th.load(model_pth)
cv.initialize(args)
cv.func.load_state_dict(chkpnt)
return cv.func
def copy_assets(cfg, dir):
if isinstance(cfg, dict):
return {k: copy_assets(v, dir) for k, v in cfg.items()}
elif isinstance(cfg, list):
return [copy_assets(x, dir) for x in cfg]
elif isinstance(cfg, str) and os.path.exists(cfg):
new_pth = os.path.join(dir, "assets", os.path.basename(cfg))
shutil.copy(cfg, new_pth)
return new_pth
else:
return cfg
def log_configs(cfg: Dict[str, Any], module: str, path: str):
"""Logs configs for job for reproducibility"""
with open(path, "w") as f:
yaml.dump(cfg[module], f)
def run_best(config, module, remote, basedir, basedate=None, executor=None):
mod = importlib.import_module("covid19_spread." + module).CV_CLS()
sweep_config = load_config(os.path.join(basedir, "cfg.yml"))
best_runs = mod.model_selection(basedir, config=sweep_config[module], module=module)
if remote and executor is None:
executor = mk_executor(
"model_selection", basedir, config[module].get("resources", {})
)
with open(os.path.join(basedir, "model_selection.json"), "w") as fout:
json.dump([x._asdict() for x in best_runs], fout)
cfg = copy.deepcopy(config)
best_runs_df = pd.DataFrame(best_runs)
def run_cv_and_copy_results(tags, module, pth, cfg, prefix):
try:
jobs = run_cv(
module,
pth,
cfg,
prefix=prefix,
basedate=basedate,
executor=executor,
test_run=True,
)
def rest():
for tag in tags:
shutil.copy(
os.path.join(pth, f'final_model_{cfg["validation"]["output"]}'),
os.path.join(
os.path.dirname(pth), f"forecasts/forecast_{tag}.csv"
),
)
if "prediction_interval" in cfg:
piv_pth = os.path.join(
pth,
f'final_model_{cfg["prediction_interval"]["output_std"]}',
)
if os.path.exists(piv_pth):
shutil.copy(
piv_pth,
os.path.join(
os.path.dirname(pth), f"forecasts/std_{tag}.csv"
),
)
if cfg[module]["train"].get("n_models", 1) > 1 and executor is not None:
executor.submit_dependent(jobs, rest)
else:
rest()
except Exception as e:
msg = f"*Final run failed for {tags}*\nbasedir = {basedir}\nException was: {e}"
post_slack_message(channel="#cron_errors", text=msg)
raise e
for pth, tags in best_runs_df.groupby("pth")["name"].agg(list).items():
os.makedirs(os.path.join(os.path.dirname(pth), "forecasts"), exist_ok=True)
name = ",".join(tags)
print(f"Starting {name}: {pth}")
job_config = load_config(os.path.join(pth, module + ".yml"))
if "test" in cfg:
job_config["train"]["test_on"] = cfg["test"]["days"]
cfg[module] = job_config
launcher = run_cv_and_copy_results
if remote:
launcher = partial(executor.submit, run_cv_and_copy_results)
with executor.set_folder(pth) if remote else nullcontext():
launcher(tags, module, pth, cfg, "final_model_")
@click.group(cls=DefaultGroup, default_command="cv")
def cli():
pass
@cli.command()
@click.argument("chkpnts", nargs=-1)
@click.option("-remote", is_flag=True)
@click.option("-nsamples", type=click.INT)
@click.option("-batchsize", type=int)
@click.option("-closed-form", is_flag=True)
def prediction_interval(chkpnts, remote, nsamples, batchsize, closed_form):
def f(chkpnt_pth):
prefix = "final_model_" if "final_model_" in chkpnt_pth else ""
chkpnt = th.load(chkpnt_pth)
job_pth = os.path.dirname(chkpnt_pth)
cfg_pth = os.path.join(job_pth, "../cfg.yml")
if not os.path.exists(cfg_pth):
cfg_pth = os.path.join(job_pth, "../../cfg.yml")
cfg = load_config(cfg_pth)
module = cfg["this_module"]
job_config = load_config(os.path.join(job_pth, f"{prefix}{module}.yml"))
opt = Namespace(**job_config["train"])
mod = importlib.import_module("covid19_spread." + module).CV_CLS()
new_cases, regions, basedate, device = mod.initialize(opt)
model = mod.func
model.load_state_dict(chkpnt)
dset = os.path.join(
job_pth, prefix + "preprocessed_" + os.path.basename(job_config["data"])
)
val_in = os.path.join(
job_pth, prefix + "filtered_" + os.path.basename(job_config["data"])
)
gt = metrics.load_ground_truth(val_in)
prev_day = gt.loc[[pd.to_datetime(basedate)]]
pred_interval = cfg.get("prediction_interval", {})
df_std, df_mean = mod.run_standard_deviation(
dset,
opt,
nsamples or pred_interval.get("nsamples", 100),
pred_interval.get("intervals", [0.99, 0.95, 0.8]),
prev_day.values.T,
model,
batchsize or pred_interval.get("batch_size", 8),
closed_form=closed_form,
)
suffix = "_closed_form" if closed_form else ""
df_std.to_csv(
os.path.join(job_pth, f"{prefix}std{suffix}.csv"), index_label="date"
)
df_mean.to_csv(
os.path.join(job_pth, f"{prefix}mean{suffix}.csv"), index_label="date"
)
pred_intervals = mod.run_prediction_interval(
os.path.join(job_pth, f"{prefix}mean{suffix}.csv"),
os.path.join(job_pth, f"{prefix}std{suffix}.csv"),
pred_interval.get("intervals", [0.99, 0.95, 0.8]),
)
pred_intervals.to_csv(
os.path.join(job_pth, f"{prefix}piv{suffix}.csv"), index=False
)
if remote:
now = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
folder = os.path.expanduser(f"~/.covid19/logs/{now}")
extra_params = {"gpus": 1, "cpus": 2, "memgb": 20, "timeout": 3600}
ex = mk_executor(
"prediction_interval", folder, extra_params, ex=submitit.AutoExecutor
)
ex.map_array(f, chkpnts)
print(folder)
else:
list(map(f, chkpnts))
@cli.command()
@click.argument("sweep_dirs", nargs=-1)
@click.argument("module")
@click.option("-remote", is_flag=True)
@click.option("-basedate", type=click.DateTime(), default=None)
def model_selection(sweep_dirs, module, remote, basedate):
executor = None
for sweep_dir in sweep_dirs:
cfg = load_config(os.path.join(sweep_dir, "cfg.yml"))
if executor is None:
executor = mk_executor(
"model_selection", sweep_dir, cfg[module].get("resources", {})
)
match = re.search(r"\d{4}-\d{2}-\d{2}", os.path.basename(sweep_dir))
if basedate is None and match:
basedate = pd.to_datetime(match.group(0))
run_best(cfg, module, remote, sweep_dir, basedate, executor=executor)
executor.launch(sweep_dir + "/workers", workers=4)
@cli.command()
@click.argument("config_pth")
@click.argument("module")
@click.option("-validate-only", type=click.BOOL, default=False)
@click.option("-remote", is_flag=True)
@click.option("-array-parallelism", type=click.INT, default=20)
@click.option("-max-jobs", type=click.INT, default=200)
@click.option("-basedir", default=None, help="Path to sweep base directory")
@click.option("-basedate", type=click.DateTime(), help="Date to treat as last date")
@click.option("-ablation", is_flag=True)
def cv(
config_pth: str,
module: str,
validate_only: bool,
remote: bool,
array_parallelism: int,
max_jobs: int,
basedir: str,
basedate: Optional[datetime] = None,
executor=None,
ablation=False,
):
"""
Run cross validation pipeline for a given module.
"""
# FIXME: This is a hack...
in_backfill = executor is not None
now = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
user = cluster.USER
cfg = load_config(config_pth)
region = cfg["region"]
cfg["this_module"] = module
if basedir is None:
if remote:
basedir = f"{cluster.FS}/{user}/covid19/forecasts/{region}/{now}"
else:
basedir = f"/tmp/{user}/covid19/forecasts/{region}/{now}"
os.makedirs(basedir, exist_ok=True)
if not in_backfill:
# Copy any asset files into `basedir/assets`
os.makedirs(os.path.join(basedir, "assets"))
cfg[module] = copy_assets(cfg[module], basedir)
# Copy the dataset into the basedir
shutil.copy(cfg[module]["data"], basedir)
cfg[module]["data"] = os.path.join(basedir, os.path.basename(cfg[module]["data"]))
with open(os.path.join(basedir, "cfg.yml"), "w") as fout:
yaml.dump(cfg, fout)
# if we are running an ablation, create new time features from ablation field
# all list entries in are assumed to be a single ablation
# all features in one list entry will be dropped from the full features to
# perform the ablation
if ablation:
feats = []
if not any([len(x) == 0 for x in cfg[module]["train"]["ablation"]]):
# Add a baseline ablation that uses all time features by default
cfg[module]["train"]["ablation"].append([])
all_feats = set(cfg[module]["train"]["time_features"][0])
for x in cfg[module]["train"]["ablation"]:
feats.append(list(all_feats - set(x)))
cfg[module]["train"]["time_features"] = feats
cfgs = []
sweep_params = [
([module, "train", k], v)
for k, v in cfg[module]["train"].items()
if isinstance(v, list)
]
sweep_params.extend(
[
([module, "preprocess", k], v)
for k, v in cfg[module].get("preprocess", {}).items()
if isinstance(v, list)
]
)
if len(sweep_params) == 0:
cfgs.append(cfg)
else:
random.seed(0)
keys, values = zip(*sweep_params)
for vals in itertools.product(*values):
clone = copy.deepcopy(cfg)
[set_dict(clone, ks, vs) for ks, vs in zip(keys, vals)]
cfgs.append(clone)
random.shuffle(cfgs)
cfgs = cfgs[:max_jobs]
print(f"Launching {len(cfgs)} jobs")
if remote:
extra = cfg[module].get("resources", {})
if executor is None:
executor = mk_executor(
f"cv_{region}",
basedir + "/%j",
{**extra, "array_parallelism": array_parallelism},
)
launcher = executor.map_array
else:
launcher = map
basedirs = [os.path.join(basedir, f"job_{i}") for i in range(len(cfgs))]
with ExitStack() as stack:
if not in_backfill:
stack.enter_context(
RsyncSnapshot(
snapshot_dir=basedir + "/snapshot",
exclude=["notebooks/*", "tests/*"],
)
)
jobs = list(
launcher(
partial(
run_cv, module, basedate=basedate, executor=executor, test_run=False
),
basedirs,
cfgs,
)
)
# Find the best model and retrain on the full dataset
launcher = (
partial(
executor.submit_dependent,
jobs,
run_best,
executor=copy.deepcopy(executor),
)
if remote
else run_best
)
if not validate_only:
job = launcher(cfg, module, remote, basedir, basedate=basedate)
jobs.append(job)
if remote:
executor.launch(basedir + "/workers", array_parallelism)
print(basedir)
return basedir, jobs
@cli.command()
@click.argument("config_pth")
@click.argument("module")
@click.option("-period", type=int, help="Number of days for sliding window")
@click.option(
"-start-date", type=click.DateTime(), default="2020-04-01", help="Start date"
)
@click.option("-dates", default=None, multiple=True, type=click.DateTime())
@click.option("-validate-only", type=click.BOOL, default=False, is_flag=True)
@click.option("-remote", is_flag=True)
@click.option("-array-parallelism", type=click.INT, default=20)
@click.option("-max-jobs", type=click.INT, default=200)
@click.option("-ablation", is_flag=True)
@click.pass_context
def backfill(
ctx: click.Context,
config_pth: str,
module: str,
period: Optional[int] = None,
start_date: Optional[datetime.date] = None,
dates: Optional[List[datetime.date]] = None,
validate_only: bool = False,
remote: bool = False,
array_parallelism: int = 20,
max_jobs: int = 200,
ablation: bool = False,
):
"""
Run the cross validation pipeline over multiple time points.
"""
config = common.mk_absolute_paths(load_config(config_pth))
# allow to set backfill dates in config (function argument overrides)
if not dates and "backfill" in config:
dates = list(pd.to_datetime(config["backfill"]))
assert (
dates is not None or period is not None
), "Must specify either dates or period"
gt = metrics.load_ground_truth(config[module]["data"])
if not dates:
assert period is not None
dates = pd.date_range(
start=start_date, end=gt.index.max(), freq=f"{period}D", closed="left"
)
print(
"Running backfill for "
+ ", ".join(map(lambda x: x.strftime("%Y-%m-%d"), dates))
)
# setup experiment environment
now = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
experiment_id = f'{config["region"]}/{now}'
basedir = f"{cluster.FS}/{cluster.USER}/covid19/forecasts/{experiment_id}"
# setup executor
extra_params = config[module].get("resources", {})
executor = mk_executor(
f'backfill_{config["region"]}',
basedir,
{**extra_params, "array_parallelism": array_parallelism},
)
print(f"Backfilling in {basedir}")
# Copy any asset files into `basedir/assets`
os.makedirs(os.path.join(basedir, "assets"))
config[module] = copy_assets(config[module], basedir)
with RsyncSnapshot(
snapshot_dir=basedir + "/snapshot", exclude=["notebooks/*", "tests/*"],
), tempfile.NamedTemporaryFile() as tfile:
# Make sure that we use the CFG with absolute paths since we are now inside the snapshot directory
with open(tfile.name, "w") as fout:
yaml.dump(config, fout)
for date in dates:
print(f"Running CV for {date.date()}")
cv_params = {
k: v for k, v in ctx.params.items() if k in {p.name for p in cv.params}
}
cv_params["config_pth"] = tfile.name
with executor.nest(), executor.set_folder(
os.path.join(basedir, f"sweep_{date.date()}/%j")
):
_, jobs = ctx.invoke(
cv,
basedir=os.path.join(basedir, f"sweep_{date.date()}"),
basedate=date,
executor=executor,
**cv_params,
)
if remote:
executor.launch(basedir + "/workers", array_parallelism)
@cli.command()
@click.argument("paths", nargs=-1)
def ensemble_jobs(paths):
for path in paths:
ms = json.load(open(os.path.join(path, "model_selection.json")))
ms = {x["name"]: x["pth"] for x in ms}
jobs = [
x for x in glob(os.path.join(ms["best_mae"], "job_*")) if os.path.isdir(x)
]
cfg = load_config(os.path.join(path, "cfg.yml"))
cfg["prediction_interval"]["intervals"] = [0.95, 0.8, 0.5]
ensemble(jobs, cfg, cfg["this_module"], "final_model_", ms["best_mae"])
@cli.command()
@click.argument("sweep_dirs", nargs=-1)
def progress(sweep_dirs):
for sweep_dir in sweep_dirs:
sweep_dir = os.path.realpath(sweep_dir)
db_file = next(iglob(os.path.join(sweep_dir, "**/.job.db"), recursive=True))
db_file = os.path.realpath(db_file)
conn = sqlite3.connect(db_file)
df = pd.read_sql(
f"SELECT status, worker_id FROM jobs WHERE id='{db_file}'", conn
)
msg = {
"sweep_dir": sweep_dir,
"success": int((df["status"] == JobStatus.success.value).sum()),
"failed": int((df["status"] == JobStatus.failure.value).sum()),
"pending": int((df["status"] == JobStatus.pending.value).sum()),
"running": int((df["status"] > len(JobStatus)).sum()),
}
print(json.dumps(msg, indent=4))
@cli.command()
@click.argument("sweep_dir")
@click.argument("workers", type=click.INT)
def add_workers(sweep_dir, workers):
DB = os.path.abspath(glob(f"{sweep_dir}/**/.job.db", recursive=True)[0])
cfg = load_config(glob(f"{sweep_dir}/**/cfg.yml", recursive=True)[0])
extra_params = cfg[cfg["this_module"]].get("resources", {})
executor = mk_executor(
"add_workers", os.path.dirname(DB), extra_params, db_pth=os.path.realpath(DB)
)
executor.launch(f"{sweep_dir}/workers", workers)
@cli.command()
@click.argument("sweep_dir")
@click.option("-workers", type=click.INT)
@click.option("-reset-running", is_flag=True, default=False)
def repair(sweep_dir, workers=None, reset_running=False):
db_file = next(iglob(os.path.join(sweep_dir, "**/.job.db"), recursive=True))
txn_manager = TransactionManager(os.path.realpath(db_file))
cond = ""
if reset_running:
cond = f" OR status >= {len(JobStatus)}"
txn_manager.run(
lambda conn: conn.execute(
f"""
UPDATE jobs SET status={JobStatus.pending}
WHERE id='{os.path.realpath(db_file)}' AND (status={JobStatus.failure} {cond})
"""
)
)
if workers is not None:
cfg = load_config(next(iglob(f"{sweep_dir}/**/cfg.yml", recursive=True)))
extra_params = cfg[cfg["this_module"]].get("resources", {})
executor = mk_executor(
"repair", sweep_dir, extra_params, db_pth=os.path.realpath(db_file)
)
executor.launch(os.path.join(sweep_dir, "workers"), workers or -1)
@cli.command()
@click.argument("sweep_dir")
@click.option(
"--type",
"-t",
type=click.Choice(["failure", "running", "pending", "success"]),
required=True,
)
def list_jobs(sweep_dir, type):
db_file = next(iglob(os.path.join(sweep_dir, "**/.job.db"), recursive=True))
db_file = os.path.realpath(db_file)
txn_manager = TransactionManager(db_file)
if type == "running":
cond = f"status >= {len(JobStatus)}"
else:
cond = f"status = {getattr(JobStatus, type)}"
with txn_manager as cur:
cur.execute(
f"""
SELECT pickle, worker_id FROM jobs WHERE id='{db_file}' AND {cond}
"""
)
for row in cur:
print(row)
@cli.command()
@click.argument("config_pth")
@click.argument("module")
@click.argument("basedate", type=click.DateTime())
@click.option("--iters", type=click.INT, default=300)
@click.option("--array-parallelism", type=click.INT, default=20)
@click.option("--resume")
def optimize(config_pth, module, basedate, iters, array_parallelism, resume):
cfg = load_config(config_pth)
ax_client = AxClient(enforce_sequential_optimization=False)
ax_client.create_experiment(
name="covid optimize",
parameters=cfg[module]["optimize"],
objective_name="mae",
choose_generation_strategy_kwargs={
"max_parallelism_override": int(array_parallelism / 5)
},
minimize=True,
)
region = cfg["region"]
cfg["this_module"] = module
now = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
user = cluster.USER
if resume is not None:
params_used = list(
json.load(open(os.path.join(resume, "best_params.json"))).keys()
)
for metrics_pth in glob(os.path.join(resume, "*", "metrics.csv")):
mets = pd.read_csv(metrics_pth, index_col="Measure")
mae = mets.loc["MAE"].mean()
cfg_ = load_config(os.path.join(os.path.dirname(metrics_pth), "bar.yml"))
params = {k: cfg_["train"][k] for k in params_used}
try:
_, idx = ax_client.attach_trial(params)
ax_client.complete_trial(idx, {"mae": mae})
except ValueError as e:
if "valid value for parameter" in str(e):
continue # this trial isn't valid for this grid, skip it...
raise e
basedir = f"{cluster.FS}/{user}/covid19/forecasts/{region}/{now}"
extra = cfg[module].get("resources", {})
executor = mk_executor(
f"cv_{region}",
basedir + "/%j",
{**extra, "array_parallelism": array_parallelism},
)
db_pth = executor.db_pth
def optimize_run(q, id, current_cfg):
executor = SlurmPoolExecutor(folder=basedir + "/%j", db_pth=db_pth)
executor.update_parameters(
job_name=f"cv_{region}",
partition=cluster.PARTITION,
gpus_per_node=extra.get("gpus", 0),
cpus_per_task=extra.get("cpus", 3),
mem=f'{cluster.MEM_GB(extra.get("memgb", 20))}GB',
array_parallelism=extra.get("array_parallelism", 100),
time=extra.get("timeout", 12 * 60),
)
job = executor.submit(
run_cv,
module=module,
basedir=basedir + "/%j",
cfg=current_cfg,
basedate=basedate,
executor=executor,
test_run=True,
)
result_pth = os.path.join(
os.path.dirname(str(job.paths.result_pickle)), "metrics.csv"
)
while not os.path.exists(os.path.join(result_pth)):
time.sleep(5)
metrics = pd.read_csv(result_pth, index_col="Measure")
q.put({"id": id, "parameters": parameters, "mae": metrics.loc["MAE"].mean()})
return {"mae": metrics.loc["MAE"].mean()}
q = queue.Queue()
waiting_for = 0
launched = False
for _ in range(iters):
while True:
try:
parameters, trial_idx = ax_client.get_next_trial()
break
except MaxParallelismReachedException:
if not launched:
executor.launch(
os.path.join(basedir, "workers"), workers=array_parallelism
)
launched = True
if waiting_for == 0 and q.qsize() == 0:
break
item = q.get()
ax_client.complete_trial(
trial_index=item["id"], raw_data={"mae": item["mae"]}
)
best_parameters, values = ax_client.get_best_parameters()
trials_df = ax_client.generation_strategy.trials_as_df
with open(os.path.join(basedir, "best_params.json"), "w") as fout:
print(json.dumps(best_parameters), file=fout)
with open(os.path.join(basedir, "ax_state.json"), "w") as fout:
print(json.dumps(ax_client.to_json_snapshot()), file=fout)
trials_df.to_csv(os.path.join(basedir, "trials.csv"), index=False)
current_cfg = copy.deepcopy(cfg)
current_cfg[module]["train"] = {**cfg[module]["train"], **parameters}
current_cfg[module]["train"] = {
k: v[0] if isinstance(v, list) else v
for k, v in current_cfg[module]["train"].items()
}
threading.Thread(target=optimize_run, args=(q, trial_idx, current_cfg)).start()
waiting_for += 1
if __name__ == "__main__":
cli()
| covid19_spread-main | cv.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from covid19_spread.bar import BARCV
import yaml
from argparse import Namespace
import torch as th
class TestBatchedInference:
def test_batched_inference(self):
with th.no_grad():
th.set_default_tensor_type(th.DoubleTensor)
th.manual_seed(0)
mod = BARCV()
cfg = yaml.safe_load(open("cv/us.yml"))
opt = Namespace(
**{
k: v[0] if isinstance(v, list) else v
for k, v in cfg["bar"]["train"].items()
}
)
opt.fdat = cfg["bar"]["data"]
cases, regions, basedate, device = mod.initialize(opt)
cases = cases.type(th.get_default_dtype())
tmax = cases.size(-1)
# torch.bmm can give small precision differences on the CPU when comparing
# batched vs. non-batched inputs. If we do too many simulation iterations,
# this error can compound to highly noticiable values. Limit the number of
# iterations to a small value. Interestingly, on the GPU it isn't a problem...
sim = mod.func.simulate(tmax, cases, 5, deterministic=True)
sim_batched = mod.func.simulate(
tmax, cases.repeat(2, 1, 1).contiguous(), 5, deterministic=True
)
assert (sim - sim_batched[0]).abs().max().item() < 1e-7
| covid19_spread-main | tests/test_batched_bar_inference.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import cv
import pandas as pd
from click.testing import CliRunner
import pytest
class TestCV:
def test_load_config(self):
"""Checks configs are loaded correctly"""
job_config = cv.load_config("cv/us.yml")
assert "naive" in job_config
assert job_config["region"] == "us"
def test_run_cv(self, tmpdir):
"""Runs cv pipeline using a single set of paramters from cv/us.yml.
Run is stored in temporary directory using PyTest Fixture `tmpdir`
"""
job_config = cv.load_config("cv/us.yml")
cv.run_cv("naive", tmpdir, job_config)
def test_filter_validation_days(self, tmp_path):
"""Tests split of validation days using tmp_path fixtures"""
data_path = "covid19_spread/data/usa/data_cases.csv"
output_path = tmp_path / "val.csv"
cv.filter_validation_days(data_path, output_path, 7)
original_df = pd.read_csv(data_path, index_col="region")
filtered_df = pd.read_csv(output_path, index_col="region")
assert (original_df.shape[1] - filtered_df.shape[1]) == 7
@pytest.mark.integration
class TestCVIntegration:
def test_cv_naive_us(self, tmpdir):
"""Runs integration test with tmpdir fixture that's cleaned up after tests"""
runner = CliRunner()
result = runner.invoke(cv.cv, f"cv/us.yml naive -basedir {tmpdir}")
assert result.exit_code == 0
def test_cv_naive_basedate(self, tmpdir):
"""Runs integration test with tmpdir fixture that's cleaned up after tests"""
runner = CliRunner()
result = runner.invoke(
cv.cv, f"cv/us.yml naive -basedir {tmpdir} -basedate 2020-04-01"
)
assert result.exit_code == 0
| covid19_spread-main | tests/test_cv.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from covid19_spread import load
import pandas as pd
import pytest
DATA_PATH_US_CSV = "covid19_spread/data/usa/data_cases.csv"
DATA_PATH_NY_CSV = "covid19_spread/data/usa/data_cases_ny.csv"
class TestLoad:
@pytest.mark.parametrize("path", [DATA_PATH_US_CSV, DATA_PATH_NY_CSV])
def test_load_cases_by_region(self, path):
"""Confirms cases loaded are per region"""
cases_df = load.load_confirmed_by_region(path)
assert cases_df.index.name == "date"
assert type(cases_df.index) == pd.core.indexes.datetimes.DatetimeIndex
assert (cases_df >= 0).all().all()
regions = cases_df.columns
suffolk_present = (
"Suffolk County" in regions or "Suffolk County, New York" in regions
)
assert suffolk_present
@pytest.mark.parametrize("path", [DATA_PATH_US_CSV, DATA_PATH_NY_CSV])
def test_load_confirmed(self, path):
df = load.load_confirmed(path, None)
assert df.index.name == "date"
assert (df >= 0).all()
# should only have one column for total cases
assert len(df.shape) == 1
| covid19_spread-main | tests/test_load.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pandas as pd
from datetime import timedelta
def load_ground_truth(path):
df = pd.read_csv(path)
df = df.rename(columns={"region": "date"})
df.set_index("date", inplace=True)
df = df.transpose()
df.index = pd.to_datetime(df.index)
return df
def rmse(pred, gt):
return (pred - gt).pow(2).mean(axis=1).pow(1.0 / 2)
def mae(pred, gt):
return (pred - gt).abs().mean(axis=1)
def mape(pred, gt):
return ((pred - gt).abs() / gt.clip(1)).mean(axis=1)
def max_mae(pred, gt):
return (pred - gt).abs().max(axis=1)
def compute_metrics(df_true, df_pred, mincount=0, nanfill=False):
if isinstance(df_true, str):
df_true = load_ground_truth(df_true)
if isinstance(df_pred, str):
df_pred = pd.read_csv(df_pred, parse_dates=["date"], index_col="date")
return _compute_metrics(df_true, df_pred, mincount, nanfill=nanfill)
def _compute_metrics(df_true, df_pred, mincount=0, nanfill=False):
if nanfill:
cols = sorted(set(df_true.columns).difference(set(df_pred.columns)))
zeros = pd.DataFrame(np.zeros((len(df_pred), len(cols))), columns=cols)
zeros.index = df_pred.index
df_pred = pd.concat([df_pred, zeros], axis=1)
common_cols = list(set(df_true.columns).intersection(set(df_pred.columns)))
df_pred = df_pred[common_cols]
df_true = df_true[common_cols]
z = len(df_pred)
# print(df_pred.round(2))
basedate = df_pred.index.min()
pdate = basedate - timedelta(1)
diff = df_true.loc[pdate] - df_true.loc[basedate - timedelta(2)]
naive = [df_true.loc[pdate] + d * diff for d in range(1, z + 1)]
naive = pd.DataFrame(naive)
naive.index = df_pred.index
ix = df_pred.index.intersection(df_true.index)
df_pred = df_pred.loc[ix]
naive = naive.loc[ix]
gt = df_true.loc[ix]
# compute state level MAE
state_gt = gt.transpose().groupby(lambda x: x.split(", ")[-1]).sum()
state_pred = df_pred.transpose().groupby(lambda x: x.split(", ")[-1]).sum()
state_mae = (state_gt.sort_index() - state_pred.sort_index()).abs().mean(axis=0)
metrics = pd.DataFrame(
[
rmse(df_pred, gt),
mae(df_pred, gt),
mape(df_pred, gt),
rmse(naive, gt),
mae(naive, gt),
state_mae,
max_mae(df_pred, gt),
max_mae(naive, gt),
],
columns=df_pred.index.to_numpy(),
)
metrics["Measure"] = [
"RMSE",
"MAE",
"MAPE",
"RMSE_NAIVE",
"MAE_NAIVE",
"STATE_MAE",
"MAX_MAE",
"MAX_NAIVE_MAE",
]
metrics.set_index("Measure", inplace=True)
if metrics.shape[1] > 0:
metrics.loc["MAE_MASE"] = metrics.loc["MAE"] / metrics.loc["MAE_NAIVE"]
metrics.loc["RMSE_MASE"] = metrics.loc["RMSE"] / metrics.loc["RMSE_NAIVE"]
# Stack predictions onto last ground truth date.
# We'll take the diff and compute MAE on the new daily counts
stack = pd.concat(
[df_true.loc[[df_pred.index.min() - timedelta(days=1)]], df_pred]
)
stack_diff = stack.diff().loc[ix]
true_diff = df_true.diff().loc[ix]
metrics.loc["MAE_DELTAS"] = mae(stack_diff, true_diff)
metrics.loc["RMSE_DELTAS"] = rmse(stack_diff, true_diff)
return metrics
| covid19_spread-main | covid19_spread/metrics.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pandas as pd
from .cross_val import CV
from . import load
from datetime import timedelta
def simulate(latest_count, latest_delta, latest_date, days):
"""Forecasts 7 days ahead using naive model for a single region:
day_n+1 prediction = day_n + day_n * (day_n - day_n-1 confirmed)
Args:
latest_delta (int): day_n - day_n-1 confirmed
latest_count (int): day_n confirmed
latest_date (datetime): last date with confirmed cases
days (int): number of days to forecast
Returns: dataframe of predictions
"""
forecast = {
-1: latest_count,
0: latest_count + latest_delta,
}
for day in range(1, days):
delta = forecast[day - 1] - forecast[day - 2]
forecast[day] = forecast[day - 1] + delta
# remove latest confirmed from prediction
forecast.pop(-1)
return forecast_to_dataframe(forecast, latest_date, days)
def forecast_to_dataframe(forecast, latest_date, days):
"""Converts dictionary of forecasts into dataframe with dates.
forcast (dict): {0: predicted case count, 1: ...}
"""
prediction_end_date = latest_date + timedelta(days)
dates = pd.date_range(start=latest_date, end=prediction_end_date, closed="right")
forecast_list = [forecast[day] for day in range(days)]
df = pd.DataFrame.from_dict(zip(dates, forecast_list))
df.columns = ["date", "total cases"]
df = df.set_index("date")
return df
def train(region_cases_df):
"""Returns latest count, delta, date needed for forecasting"""
latest_count = region_cases_df[-1]
latest_delta = region_cases_df[-1] - region_cases_df[-2]
latest_date = pd.to_datetime(region_cases_df.index.max())
return latest_count, latest_delta, latest_date
def naive(data_path="data/usa/data.csv", days=7):
"""Performs region level naive forecasts"""
cases_df = load.load_confirmed_by_region(data_path)
regions = cases_df.columns
forecasts = []
for region in regions:
latest_count, latest_delta, latest_date = train(cases_df[region])
forecast_df = simulate(latest_count, latest_delta, latest_date, days)
forecast_df = forecast_df.rename(columns={"total cases": region})
forecasts.append(forecast_df)
df = pd.concat(forecasts, axis=1)
return df
class NaiveCV(CV):
def run_train(self, dset, train_params, model_out):
"""Returns delta between last two days and last confirmed total.
Args:
dset (str): path for confirmed cases
train_params (dict): training parameters
model_out (str): path for saving training checkpoints
Returns: list of (doubling_times (np.float64), regions (list of str))
"""
def run_simulate(self, dset, train_params, model, days, sim_params):
"""Returns new cases count predictions"""
forecast_df = naive(data_path=dset, days=days)
cases_df = load.load_confirmed_by_region(dset)
new_cases_forecast_df = (
pd.concat([cases_df, forecast_df])
.sort_index()
.diff()
.loc[forecast_df.index]
)
return new_cases_forecast_df
CV_CLS = NaiveCV
if __name__ == "__main__":
print(naive())
| covid19_spread-main | covid19_spread/naive.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pandas as pd
from numpy.linalg import norm
import os
import re
from covid19_spread.lib import cluster
from subprocess import check_call
from covid19_spread import metrics
from datetime import timedelta
def mk_absolute_paths(cfg):
if isinstance(cfg, dict):
return {k: mk_absolute_paths(v) for k, v in cfg.items()}
elif isinstance(cfg, list):
return list(map(mk_absolute_paths, cfg))
else:
return (
os.path.realpath(cfg)
if isinstance(cfg, str) and os.path.exists(cfg)
else cfg
)
def rebase_forecast_deltas(val_in, df_forecast_deltas):
gt = metrics.load_ground_truth(val_in)
# Ground truth for the day before our first forecast
prev_day = gt.loc[[df_forecast_deltas.index.min() - timedelta(days=1)]]
# Stack the first day ground truth on top of the forecasts
common_cols = set(df_forecast_deltas.columns).intersection(set(gt.columns))
stacked = pd.concat([prev_day[common_cols], df_forecast_deltas[common_cols]])
# Cumulative sum to compute total cases for the forecasts
df_forecast = stacked.sort_index().cumsum().iloc[1:]
return df_forecast
def update_repo(repo, no_pull=False):
user = cluster.USER
match = re.search(r"([^(\/|:)]+)/([^(\/|:)]+)\.git", repo)
name = f"{match.group(1)}_{match.group(2)}"
data_pth = f"{cluster.FS}/{user}/covid19/data/{name}"
if not os.path.exists(data_pth):
check_call(["git", "clone", repo, data_pth])
if not no_pull:
check_call(["git", "checkout", "master"], cwd=data_pth)
check_call(["git", "pull"], cwd=data_pth)
return data_pth
def drop_k_days_csv(dset, outfile, days):
df = pd.read_csv(dset, index_col="region")
if days > 0:
df = df[sorted(df.columns)[:-days]]
df = drop_all_zero_csv(df)
df.to_csv(outfile)
def drop_all_zero_csv(df):
counts = df.sum(axis=1)
df = df[counts > 0]
return df
def smooth_csv(indset: str, outdset: str, days: int):
df = pd.read_csv(indset, index_col="region").transpose()
incident_cases = df.diff()
smooth = np.round(incident_cases.rolling(window=days, min_periods=1).mean())
smooth.iloc[0] = df.iloc[0]
smooth.cumsum(0).transpose().to_csv(outdset)
smooth = smooth_csv
def print_model_stats(mus, beta, S, U, V, A):
C = A - np.diag(np.diag(A))
print("beta =", beta)
print(f"\nNorms : U = {norm(U).item():.3f}, V = {norm(V):.3f}")
print(f"Max Element: U = {np.max(U).item():.3f}, V = {np.max(V):.3f}")
print(f"Avg Element: U = {np.mean(U).item():.3f}, V = {np.mean(V):.3f}")
print(f"\nSelf: max = {np.max(S):.3f}, avg = {np.mean(S):.3f}")
print(f"Cross: max = {np.max(C):.3f}, avg = {np.mean(C):.3f}")
def standardize_county_name(county):
return (
county.replace(" County", "")
.replace(" Parish", "")
.replace(" Municipality", "")
.replace(" Municipality", "")
.replace(" Borough", "")
)
| covid19_spread-main | covid19_spread/common.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import numpy as np
import pandas as pd
import warnings
from datetime import timedelta
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import NegativeBinomial, Normal, Poisson
from . import load
from .cross_val import CV
from .common import rebase_forecast_deltas
import yaml
from . import metrics
import click
import sys
from scipy.stats import nbinom, norm
from bisect import bisect_left, bisect_right
from tqdm import tqdm
import timeit
from typing import List
import os
warnings.filterwarnings("ignore", category=UserWarning)
class BetaRNN(nn.Module):
def __init__(self, M, layers, dim, input_dim, dropout=0.0):
# initialize parameters
super(BetaRNN, self).__init__()
self.h0 = nn.Parameter(th.zeros(layers, M, dim))
self.rnn = nn.RNN(input_dim, dim, layers, dropout=dropout)
self.v = nn.Linear(dim, 1, bias=False)
self.fpos = th.sigmoid
# initialize weights
nn.init.xavier_normal_(self.v.weight)
for p in self.rnn.parameters():
if p.dim() == 2:
nn.init.xavier_normal_(p)
def forward(self, x):
ht, hn = self.rnn(x, self.h0)
beta = self.fpos(self.v(ht))
return beta
def __repr__(self):
return str(self.rnn)
class BetaGRU(BetaRNN):
def __init__(self, M, layers, dim, input_dim, dropout=0.0):
super().__init__(M, layers, dim, input_dim, dropout)
self.rnn = nn.GRU(input_dim, dim, layers, dropout=dropout)
self.rnn.reset_parameters()
self.h0 = nn.Parameter(th.randn(layers, M, dim))
class BetaLSTM(BetaRNN):
def __init__(self, M, layers, dim, input_dim, dropout=0.0):
super().__init__(M, layers, dim, input_dim, dropout)
self.rnn = nn.LSTM(input_dim, dim, layers, dropout=dropout)
self.rnn.reset_parameters()
self.h0 = nn.Parameter(th.zeros(layers, M, dim))
self.c0 = nn.Parameter(th.randn(layers, M, dim))
def forward(self, x):
ht, (hn, cn) = self.rnn(x, (self.h0, self.c0))
beta = self.fpos(self.v(ht))
return beta
class BetaLatent(nn.Module):
def __init__(self, fbeta, regions, tmax, time_features):
"""
Params
======
- regions: names of regions (list)
- dim: dimensionality of hidden vector (int)
- layer: number of RNN layers (int)
- tmax: maximum observation time (float)
- time_features: tensor of temporal features (time x region x features)
"""
super(BetaLatent, self).__init__()
self.M = len(regions)
self.tmax = tmax
self.time_features = time_features
input_dim = 0
if time_features is not None:
input_dim += time_features.size(2)
self.fbeta = fbeta(self.M, input_dim)
def forward(self, t, ys):
x = []
if self.time_features is not None:
if self.time_features.size(0) > t.size(0):
f = self.time_features.narrow(0, 0, t.size(0))
else:
f = th.zeros(
t.size(0), self.M, self.time_features.size(2), device=t.device
)
f.copy_(self.time_features.narrow(0, -1, 1))
f.narrow(0, 0, self.time_features.size(0)).copy_(self.time_features)
x.append(f)
x = th.cat(x, dim=2)
beta = self.fbeta(x)
return beta.squeeze().t()
def apply(self, x):
ht, hn = self.rnn(x, self.h0)
return self.fpos(self.v(ht))
def __repr__(self):
return str(self.fbeta)
class BAR(nn.Module):
def __init__(
self,
regions,
beta,
window,
dist,
graph,
features,
self_correlation=True,
cross_correlation=True,
offset=None,
):
super(BAR, self).__init__()
self.regions = regions
self.M = len(regions)
self.beta = beta
self.features = features
self.self_correlation = self_correlation
self.cross_correlation = cross_correlation
self.window = window
self.z = nn.Parameter(th.ones((self.M, 7)).fill_(1))
self._alphas = nn.Parameter(th.zeros((self.M, self.M)).fill_(-3))
self.nu = nn.Parameter(th.ones((self.M, 1)).fill_(8))
self.scale = nn.Parameter(th.ones((self.M, 1)))
self._dist = dist
self.graph = graph
self.offset = offset
self.neighbors = self.M
self.adjdrop = nn.Dropout2d(0.1)
if graph is not None:
assert graph.size(0) == self.M, graph.size()
assert graph.size(1) == self.M, graph.size()
self.neighbors = graph.sum(axis=1)
if features is not None:
self.w_feat = nn.Linear(features.size(1), 1)
nn.init.xavier_normal_(self.w_feat.weight)
def dist(self, scores):
if self._dist == "poisson":
return Poisson(scores)
elif self._dist == "nb":
return NegativeBinomial(scores, logits=self.nu)
elif self._dist == "normal":
return Normal(scores, th.exp(self.nu))
else:
raise RuntimeError("Unknown loss")
def alphas(self):
alphas = self._alphas
if self.self_correlation:
with th.no_grad():
alphas.fill_diagonal_(-1e10)
return alphas
def metapopulation_weights(self):
alphas = self.alphas()
W = th.sigmoid(alphas)
W = W.squeeze(0).squeeze(-1).t()
if self.graph is not None:
W = W * self.graph
return W
def score(self, t, ys):
assert t.size(-1) == ys.size(-1), (t.size(), ys.size())
length = ys.size(-1) - self.window + 1
# beta evolution
beta = self.beta(t, ys)
Z = th.zeros(0).sum()
if self.self_correlation:
ws = F.softplus(self.z)
ws = ws.expand(self.M, self.z.size(1))
# self-correlation
Z = F.conv1d(
F.pad(ys.unsqueeze(0) if ys.ndim == 2 else ys, (self.z.size(1) - 1, 0)),
ws.unsqueeze(1),
groups=self.M,
)
Z = Z.squeeze(0)
Z = Z.div(float(self.z.size(1)))
# cross-correlation
Ys = th.zeros(0).sum(0)
W = th.zeros(1, 1)
if self.cross_correlation:
W = self.metapopulation_weights()
Ys = th.stack(
[
F.pad(ys.narrow(-1, i, length), (self.window - 1, 0))
for i in range(self.window)
]
)
orig_shape = Ys.shape
Ys = Ys.view(-1, Ys.size(-2), Ys.size(-1)) if Ys.ndim == 4 else Ys
Ys = (
th.bmm(W.unsqueeze(0).expand(Ys.size(0), self.M, self.M), Ys)
.view(orig_shape)
.mean(dim=0)
)
with th.no_grad():
self.train_stats = (Z.mean().item(), Ys.mean().item())
if self.features is not None:
Ys = Ys + F.softplus(self.w_feat(self.features))
Ys = beta * (Z + Ys) / self.neighbors
return Ys, beta, W
def simulate(self, tobs, ys, days, deterministic=True, return_stds=False):
preds = ys.clone()
self.eval()
assert tobs == preds.size(-1), (tobs, preds.size())
stds = []
for d in range(days):
t = th.arange(tobs + d, device=ys.device) + 1
s, _, _ = self.score(t, preds)
assert (s >= 0).all(), s.squeeze()
if deterministic:
y = self.dist(s).mean
else:
y = self.dist(s).sample()
assert (y >= 0).all(), y.squeeze()
y = y.narrow(-1, -1, 1).clamp(min=1e-8)
preds = th.cat([preds, y], dim=-1)
stds.append(self.dist(s).stddev)
preds = preds.narrow(-1, -days, days)
self.train()
if return_stds:
return preds, stds
return preds
def __repr__(self):
return f"bAR({self.window}) | {self.beta} | EX ({self.train_stats[0]:.1e}, {self.train_stats[1]:.1e})"
def train(model, new_cases, regions, optimizer, checkpoint, args):
print(args)
days_ahead = getattr(args, "days_ahead", 1)
M = len(regions)
device = new_cases.device
tmax = new_cases.size(1)
t = th.arange(tmax, device=device) + 1
size_pred = tmax - days_ahead
reg = th.tensor([0.0], device=device)
target = new_cases.narrow(1, days_ahead, size_pred)
start_time = timeit.default_timer()
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
scores, beta, W = model.score(t, new_cases)
scores = scores.clamp(min=1e-8)
assert scores.dim() == 2, scores.size()
assert scores.size(1) == size_pred + 1
assert beta.size(0) == M
# compute loss
dist = model.dist(scores.narrow(1, days_ahead - 1, size_pred))
_loss = dist.log_prob(target)
loss = -_loss.sum(axis=1).mean()
stddev = model.dist(scores).stddev.mean()
# loss += stddev * args.weight_decay
# temporal smoothness
if args.temporal > 0:
reg = (
args.temporal * th.pow(beta[:, 1:] - beta[:, :-1], 2).sum(axis=1).mean()
)
# back prop
(loss + reg).backward()
# do AdamW-like update for Granger regularization
if args.granger > 0:
with th.no_grad():
mu = np.log(args.granger / (1 - args.granger))
y = args.granger
n = th.numel(model._alphas)
ex = th.exp(-model._alphas)
model._alphas.fill_diagonal_(mu)
de = 2 * (model._alphas.sigmoid().mean() - y) * ex
nu = n * (ex + 1) ** 2
_grad = de / nu
_grad.fill_diagonal_(0)
r = args.lr * args.eta * n
model._alphas.copy_(model._alphas - r * _grad)
# make sure we have no NaNs
assert loss == loss, (loss, scores, _loss)
nn.utils.clip_grad_norm_(model.parameters(), 5)
# take gradient step
optimizer.step()
# control
if itr % 500 == 0:
time = timeit.default_timer() - start_time
with th.no_grad(), np.printoptions(precision=3, suppress=True):
length = scores.size(1) - 1
maes = th.abs(dist.mean - new_cases.narrow(1, 1, length))
z = model.z
nu = th.sigmoid(model.nu)
means = model.dist(scores).mean
W_spread = (W * (1 - W)).mean()
_err = W.mean() - args.granger
print(
f"[{itr:04d}] Loss {loss.item():.2f} | "
f"Temporal {reg.item():.5f} | "
f"MAE {maes.mean():.2f} | "
f"{model} | "
f"{args.loss} ({means[:, -1].min().item():.2f}, {means[:, -1].max().item():.2f}) | "
f"z ({z.min().item():.2f}, {z.mean().item():.2f}, {z.max().item():.2f}) | "
f"W ({W.min().item():.2f}, {W.mean().item():.2f}, {W.max().item():.2f}) | "
f"W_spread {W_spread:.2f} | mu_err {_err:.3f} | "
f"nu ({nu.min().item():.2f}, {nu.mean().item():.2f}, {nu.max().item():.2f}) | "
f"nb_stddev ({stddev.data.mean().item():.2f}) | "
f"scale ({th.exp(model.scale).mean():.2f}) | "
f"time = {time:.2f}s"
)
th.save(model.state_dict(), checkpoint)
start_time = timeit.default_timer()
print(f"Train MAE,{maes.mean():.2f}")
return model
def _get_arg(args, v, device, regions):
if hasattr(args, v):
print(getattr(args, v))
fs = []
for _file in getattr(args, v):
d = th.load(_file)
_fs = th.cat([d[r].unsqueeze(0) for r in regions], dim=0)
fs.append(_fs)
return th.cat(fs, dim=1).float().to(device)
else:
return None
def _get_dict(args, v, device, regions):
if hasattr(args, v):
_feats = []
for _file in getattr(args, v):
print(f"Loading {_file}")
d = th.load(_file)
feats = None
for i, r in enumerate(regions):
if r not in d:
continue
_f = d[r]
if feats is None:
feats = th.zeros(len(regions), d[r].size(0), _f.size(1))
feats[i, :, : _f.size(1)] = _f
_feats.append(feats.to(device).float())
return th.cat(_feats, dim=2)
else:
return None
class BARCV(CV):
def initialize(self, args):
device = th.device(
"cuda" if th.cuda.is_available() and getattr(args, "cuda", True) else "cpu"
)
cases, regions, basedate = load.load_confirmed_csv(args.fdat)
assert (cases == cases).all(), th.where(cases != cases)
# Cumulative max across time
cases = np.maximum.accumulate(cases, axis=1)
new_cases = th.zeros_like(cases)
new_cases.narrow(1, 1, cases.size(1) - 1).copy_(cases[:, 1:] - cases[:, :-1])
assert (new_cases >= 0).all(), new_cases[th.where(new_cases < 0)]
new_cases = new_cases.float().to(device)[:, args.t0 :]
print("Number of Regions =", new_cases.size(0))
print("Timeseries length =", new_cases.size(1))
print(
"Increase: max all = {}, max last = {}, min last = {}".format(
new_cases.max().item(),
new_cases[:, -1].max().item(),
new_cases[:, -1].min().item(),
)
)
tmax = new_cases.size(1) + 1
# adjust max window size to available data
args.window = min(args.window, new_cases.size(1) - 4)
# setup optional features
graph = (
th.load(args.graph).to(device).float() if hasattr(args, "graph") else None
)
features = _get_arg(args, "features", device, regions)
time_features = _get_dict(args, "time_features", device, regions)
if time_features is not None:
time_features = time_features.transpose(0, 1)
time_features = time_features.narrow(0, args.t0, new_cases.size(1))
print("Feature size = {} x {} x {}".format(*time_features.size()))
print(time_features.min(), time_features.max())
self.weight_decay = 0
# setup beta function
if args.decay.startswith("latent"):
dim, layers = args.decay[6:].split("_")
fbeta = lambda M, input_dim: BetaRNN(
M,
int(layers),
int(dim),
input_dim,
dropout=getattr(args, "dropout", 0.0),
)
beta_net = BetaLatent(fbeta, regions, tmax, time_features)
self.weight_decay = args.weight_decay
elif args.decay.startswith("lstm"):
dim, layers = args.decay[len("lstm") :].split("_")
fbeta = lambda M, input_dim: BetaLSTM(
M,
int(layers),
int(dim),
input_dim,
dropout=getattr(args, "dropout", 0.0),
)
beta_net = BetaLatent(fbeta, regions, tmax, time_features)
self.weight_decay = args.weight_decay
elif args.decay.startswith("gru"):
dim, layers = args.decay[len("gru") :].split("_")
fbeta = lambda M, input_dim: BetaGRU(
M,
int(layers),
int(dim),
input_dim,
dropout=getattr(args, "dropout", 0.0),
)
beta_net = BetaLatent(fbeta, regions, tmax, time_features)
self.weight_decay = args.weight_decay
else:
raise ValueError("Unknown beta function")
self.func = BAR(
regions,
beta_net,
args.window,
args.loss,
graph,
features,
self_correlation=getattr(args, "self_correlation", True),
cross_correlation=not getattr(args, "no_cross_correlation", False),
offset=cases[:, 0].unsqueeze(1).to(device).float(),
).to(device)
return new_cases, regions, basedate, device
def run_train(self, dset, args, checkpoint):
args.fdat = dset
new_cases, regions, _, device = self.initialize(args)
params = []
exclude = {
"z",
"nu",
"_alphas",
"_alpha_weights",
"beta.fbeta.h0",
"beta.fbeta.c0",
"beta.fbeta.conv.weight",
"beta.fbeta.conv.bias",
"scale",
}
for name, p in dict(self.func.named_parameters()).items():
wd = 0 if name in exclude else args.weight_decay
if wd != 0:
print(f"Regularizing {name} = {wd}")
params.append({"params": p, "weight_decay": wd})
optimizer = optim.AdamW(params, lr=args.lr, betas=[args.momentum, 0.999])
model = train(self.func, new_cases, regions, optimizer, checkpoint, args)
return model
def run_prediction_interval(
self, means_pth: str, stds_pth: str, intervals: List[float],
):
means = pd.read_csv(means_pth, index_col="date", parse_dates=["date"])
stds = pd.read_csv(stds_pth, index_col="date", parse_dates=["date"])
means_t = means.values
stds_t = stds.values
multipliers = np.array([norm.ppf(1 - (1 - x) / 2) for x in intervals])
result = np.empty((means_t.shape[0], means_t.shape[1], len(intervals), 3))
lower = means_t[:, :, None] - multipliers.reshape(1, 1, -1) * stds_t[:, :, None]
upper = means_t[:, :, None] + multipliers.reshape(1, 1, -1) * stds_t[:, :, None]
result = np.stack(
[np.clip(lower, a_min=0, a_max=None), upper, np.ones(lower.shape)], axis=-1,
)
cols = pd.MultiIndex.from_product(
[means.columns, intervals, ["lower", "upper", "fallback"]]
)
result_df = pd.DataFrame(result.reshape(result.shape[0], -1), columns=cols)
result_df["date"] = means.index
melted = result_df.melt(
id_vars=["date"], var_name=["location", "interval", "lower/upper"]
)
pivot = melted.pivot(
index=["date", "location", "interval"],
columns="lower/upper",
values="value",
).reset_index()
return pivot.merge(
means.reset_index().melt(
id_vars=["date"], var_name="location", value_name="mean"
),
on=["date", "location"],
).merge(
stds.reset_index().melt(
id_vars=["date"], var_name="location", value_name="std"
),
on=["date", "location"],
)
CV_CLS = BARCV
@click.group()
def cli():
pass
@cli.command()
@click.argument("pth")
def simulate(pth):
chkpnt = th.load(pth)
mod = BARCV()
prefix = ""
if "final_model" in pth:
prefix = "final_model_"
cfg = yaml.safe_load(open(f"{os.path.dirname(pth)}/{prefix}bar.yml"))
args = argparse.Namespace(**cfg["train"])
new_cases, regions, basedate, device = mod.initialize(args)
mod.func.load_state_dict(chkpnt)
res = mod.func.simulate(new_cases.size(1), new_cases, args.test_on)
df = pd.DataFrame(res.cpu().data.numpy().transpose(), columns=regions)
df.index = pd.date_range(
start=pd.to_datetime(basedate) + timedelta(days=1), periods=len(df)
)
df = rebase_forecast_deltas(cfg["data"], df)
gt = pd.read_csv(cfg["data"], index_col="region").transpose()
gt.index = pd.to_datetime(gt.index)
print(metrics._compute_metrics(gt, df, nanfill=True))
def main(args):
parser = argparse.ArgumentParser("beta-AR")
parser.add_argument("-fdat", help="Path to confirmed cases", required=True)
parser.add_argument("-lr", type=float, default=5e-2)
parser.add_argument("-weight-decay", type=float, default=0)
parser.add_argument("-niters", type=int, default=2000)
parser.add_argument("-amsgrad", default=False, action="store_true")
parser.add_argument("-loss", default="lsq", choices=["nb", "poisson"])
parser.add_argument("-decay", default="exp")
parser.add_argument("-t0", default=10, type=int)
parser.add_argument("-fit-on", default=5, type=int)
parser.add_argument("-test-on", default=5, type=int)
parser.add_argument("-checkpoint", type=str, default="/tmp/bar_model.bin")
parser.add_argument("-window", type=int, default=25)
parser.add_argument("-momentum", type=float, default=0.99)
args = parser.parse_args()
mod = BARCV()
model = mod.run_train(args.fdat, args, args.checkpoint)
with th.no_grad():
forecast = mod.run_simulate(args, model)
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] in cli.commands:
cli()
else:
main(sys.argv[1:])
| covid19_spread-main | covid19_spread/bar.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pandas as pd
import torch as th
import yaml
from pathlib import Path
import json
import os
def load_confirmed_csv(path):
df = pd.read_csv(path)
df.set_index("region", inplace=True)
basedate = df.columns[-1]
nodes = df.index.to_numpy()
cases = df.to_numpy()
return th.from_numpy(cases), nodes, basedate
def load_confirmed(path, regions):
"""Returns dataframe of total confirmed cases"""
df = load_confirmed_by_region(path, regions=regions)
return df.sum(axis=1)
def load_confirmed_by_region(path, regions=None, filter_unknown=True):
"""Loads csv file for confirmed cases by region"""
df = pd.read_csv(path, index_col=0, header=None)
# transpose so dates are along rows to match h5
df = df.T
# set date as index
df = df.rename(columns={"region": "date"})
df = df.set_index("date")
df.index = pd.to_datetime(df.index)
df = df.astype(float)
if regions is not None:
df = df[regions]
if filter_unknown:
df = df.loc[:, df.columns != "Unknown"]
return df
def load_backfill(
jobdir, model=None, indicator="model_selection.json", forecast="best_mae",
):
"""collect all forcasts from job dir"""
forecasts = {}
configs = []
for path in Path(jobdir).rglob(indicator):
date = str(path).split("/")[-2]
assert date.startswith("sweep_"), str(path)
jobs = [m["pth"] for m in json.load(open(path)) if m["name"] == forecast]
assert len(jobs) == 1, jobs
job = jobs[0]
date = date[6:]
forecasts[date] = os.path.join(job, "final_model_validation.csv")
cfg = yaml.safe_load(open(os.path.join(job, "../cfg.yml")))
cfg = yaml.safe_load(
open(os.path.join(job, f"{model or cfg['this_module']}.yml"))
)
cfg = cfg["train"]
cfg["date"] = date
cfg["job"] = job
configs.append(cfg)
configs = pd.DataFrame(configs)
configs.set_index("date", inplace=True)
return forecasts, configs
| covid19_spread-main | covid19_spread/load.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Any, List, Tuple
import pandas as pd
from datetime import timedelta
import torch as th
from tqdm import tqdm
import numpy as np
from .common import mk_absolute_paths
import yaml
from tensorboardX import SummaryWriter
from collections import namedtuple, defaultdict
from itertools import count
from . import common, metrics
import os
from glob import glob
import shutil
import json
BestRun = namedtuple("BestRun", ("pth", "name"))
def load_config(cfg_pth: str) -> Dict[str, Any]:
return mk_absolute_paths(yaml.load(open(cfg_pth), Loader=yaml.FullLoader))
class CV:
def run_simulate(
self,
dset: str,
args: Dict[str, Any],
model: Any,
days: int,
sim_params: Dict[str, Any],
) -> pd.DataFrame:
"""
Run a simulation given a trained model. This should return a pandas DataFrame with each
column corresponding to a location and each row corresponding to a date. The value
of each cell is the forecasted cases per day (*not* cumulative cases)
"""
args.fdat = dset
if model is None:
raise NotImplementedError
cases, regions, basedate, device = self.initialize(args)
tmax = cases.size(1)
test_preds = model.simulate(tmax, cases, days, **sim_params)
test_preds = test_preds.cpu().numpy()
df = pd.DataFrame(test_preds.T, columns=regions)
if basedate is not None:
base = pd.to_datetime(basedate)
ds = [base + timedelta(i) for i in range(1, days + 1)]
df["date"] = ds
df.set_index("date", inplace=True)
return df
def run_standard_deviation(
self,
dset,
args,
nsamples,
intervals,
orig_cases,
model=None,
batch_size=1,
closed_form=False,
):
with th.no_grad():
args.fdat = dset
if model is None:
raise NotImplementedError
cases, regions, basedate, device = self.initialize(args)
tmax = cases.size(1)
base = pd.to_datetime(basedate)
def mk_df(arr):
df = pd.DataFrame(arr, columns=regions)
df.index = pd.date_range(base + timedelta(days=1), periods=args.test_on)
return df
if closed_form:
preds, stds = model.simulate(
tmax, cases, args.test_on, deterministic=True, return_stds=True
)
stds = th.cat([x.narrow(-1, -1, 1) for x in stds], dim=-1)
return mk_df(stds.cpu().numpy().T), mk_df(preds.cpu().numpy().T)
samples = []
if batch_size > 1:
cases = cases.repeat(batch_size, 1, 1)
nsamples = nsamples // batch_size
for i in tqdm(range(nsamples)):
test_preds = model.simulate(tmax, cases, args.test_on, False)
test_preds = test_preds.cpu().numpy()
samples.append(test_preds)
samples = (
np.stack(samples, axis=0)
if batch_size <= 1
else np.concatenate(samples, axis=0)
)
return mk_df(np.std(samples, axis=0).T), mk_df(np.mean(samples, axis=0).T)
def run_train(self, dset, model_params, model_out):
"""
Train a model
"""
...
def preprocess(self, dset: str, preprocessed: str, preprocess_args: Dict[str, Any]):
"""
Perform any kind of model specific pre-processing.
"""
if "smooth" in preprocess_args:
common.smooth(dset, preprocessed, preprocess_args["smooth"])
else:
shutil.copy(dset, preprocessed)
def metric_df(self, basedir):
runs = []
for metrics_pth in glob(os.path.join(basedir, "*/metrics.csv")):
metrics = pd.read_csv(metrics_pth, index_col="Measure")
runs.append(
{
"pth": os.path.dirname(metrics_pth),
"mae": metrics.loc["MAE"][-1],
"rmse": metrics.loc["RMSE"][-1],
"mae_deltas": metrics.loc["MAE_DELTAS"].mean(),
"rmse_deltas": metrics.loc["RMSE_DELTAS"].mean(),
"state_mae": metrics.loc["STATE_MAE"][-1],
}
)
return pd.DataFrame(runs)
def model_selection(self, basedir: str, config, module) -> List[BestRun]:
"""
Evaluate a sweep returning a list of models to retrain on the full dataset.
"""
df = self.metric_df(basedir)
if "ablation" in config["train"]:
ablation_map = defaultdict(count().__next__)
ablations = []
for _, row in df.iterrows():
job_cfg = load_config(os.path.join(row.pth, f"{module}.yml"))
if (
job_cfg["train"]["ablation"] is not None
and len(job_cfg["train"]["ablation"]) > 0
):
ablation = ",".join(
os.path.basename(x) for x in job_cfg["train"]["ablation"]
)
else:
ablation = "null"
ablations.append(ablation)
ablation_map[ablation]
ablation_map = {k: f"ablation_{v}" for k, v in ablation_map.items()}
rev_map = {v: k for k, v in ablation_map.items()}
df["ablation"] = [ablation_map[x] for x in ablations]
with open(os.path.join(basedir, "ablation_map.json"), "w") as fout:
print(json.dumps(rev_map), file=fout)
best_runs = []
for key in ["mae", "rmse", "mae_deltas", "rmse_deltas"]:
best = df.loc[df.groupby("ablation")[key].idxmin()]
best_runs.extend(
[
BestRun(x.pth, f"best_{key}_{x.ablation}")
for _, x in best.iterrows()
]
)
return best_runs
return [
BestRun(df.sort_values(by="mae").iloc[0].pth, "best_mae"),
BestRun(df.sort_values(by="rmse").iloc[0].pth, "best_rmse"),
BestRun(df.sort_values(by="mae_deltas").iloc[0].pth, "best_mae_deltas"),
BestRun(df.sort_values(by="rmse_deltas").iloc[0].pth, "best_rmse_deltas"),
BestRun(df.sort_values(by="state_mae").iloc[0].pth, "best_state_mae"),
]
def compute_metrics(
self, gt: str, forecast: str, model: Any, metric_args: Dict[str, Any]
) -> Tuple[pd.DataFrame, Dict[str, Any]]:
return metrics.compute_metrics(gt, forecast).round(2), {}
def setup_tensorboard(self, basedir):
"""
Setup dir and writer for tensorboard logging
"""
self.tb_writer = SummaryWriter(logdir=basedir)
def run_prediction_interval(
self, means_pth: str, stds_pth: str, intervals: List[float]
):
...
| covid19_spread-main | covid19_spread/cross_val.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import click
class DefaultGroup(click.Group):
ignore_unknown_options = True
def __init__(self, *args, **kwargs):
default_command = kwargs.pop("default_command", None)
super(DefaultGroup, self).__init__(*args, **kwargs)
self.default_cmd_name = None
if default_command is not None:
self.set_default_command(default_command)
def set_default_command(self, command):
if isinstance(command, str):
cmd_name = command
else:
cmd_name = command.name
self.add_command(command)
self.default_cmd_name = cmd_name
def parse_args(self, ctx, args):
if not args and self.default_cmd_name is not None:
args.insert(0, self.default_cmd_name)
return super(DefaultGroup, self).parse_args(ctx, args)
def get_command(self, ctx, cmd_name):
if cmd_name not in self.commands and self.default_cmd_name is not None:
ctx.args0 = cmd_name
cmd_name = self.default_cmd_name
return super(DefaultGroup, self).get_command(ctx, cmd_name)
def resolve_command(self, ctx, args):
cmd_name, cmd, args = super(DefaultGroup, self).resolve_command(ctx, args)
args0 = getattr(ctx, "args0", None)
if args0 is not None:
args.insert(0, args0)
return cmd_name, cmd, args
class OptionNArgs(click.Option):
def __init__(self, *args, **kwargs):
self.save_other_options = kwargs.pop("save_other_options", True)
nargs = kwargs.pop("nargs", -1)
assert nargs == -1, "nargs, if set, must be -1 not {}".format(nargs)
super(OptionNArgs, self).__init__(*args, **kwargs)
self._previous_parser_process = None
self._eat_all_parser = None
def add_to_parser(self, parser, ctx):
def parser_process(value, state):
# method to hook to the parser.process
done = False
value = [value]
if self.save_other_options:
# grab everything up to the next option
while state.rargs and not done:
for prefix in self._eat_all_parser.prefixes:
if state.rargs[0].startswith(prefix):
done = True
if not done:
value.append(state.rargs.pop(0))
else:
# grab everything remaining
value += state.rargs
state.rargs[:] = []
value = tuple(value)
# call the actual process
self._previous_parser_process(value, state)
retval = super(OptionNArgs, self).add_to_parser(parser, ctx)
for name in self.opts:
our_parser = parser._long_opt.get(name) or parser._short_opt.get(name)
if our_parser:
self._eat_all_parser = our_parser
self._previous_parser_process = our_parser.process
our_parser.process = parser_process
break
return retval
| covid19_spread-main | covid19_spread/lib/click_lib.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import submitit
from submitit.slurm.slurm import SlurmExecutor, SlurmJob
from submitit.core import core, utils
import uuid
import typing as tp
import time
import sys
import os
import sqlite3
import enum
import random
from contextlib import (
contextmanager,
redirect_stderr,
redirect_stdout,
AbstractContextManager,
)
import traceback
import itertools
import timeit
from covid19_spread.lib.context_managers import env_var
class TransactionManager(AbstractContextManager):
"""
Class for managing exclusive database transactions. This locks the entire
database to ensure atomicity. This allows nesting transactions, where
the inner transaction is idempotent.
"""
def __init__(self, db_pth: str, nretries: int = 20):
self.retries = nretries
self.db_pth = db_pth
self.conn = None
self.cursor = None
self.nesting = 0
self.start_time = None
def __getstate__(self):
state = self.__dict__.copy()
state["nesting"] = 0
state["conn"] = None
state["cursor"] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
def run(self, txn, ntries: int = 100):
exn = None
for _ in range(ntries):
try:
with self as conn:
conn.execute("BEGIN EXCLUSIVE")
return txn(conn)
except Exception as e:
traceback.print_exc(file=sys.stdout)
sleep_time = random.randint(0, 10)
print(f"Transaction failed! Sleeping for {sleep_time} seconds")
time.sleep(sleep_time)
exn = e
print("Failed too many times!!!!")
raise exn
def __enter__(self):
print(f"Entering transaction, nesting = {self.nesting}")
self.nesting += 1
if self.conn is None:
self.conn = sqlite3.connect(self.db_pth)
self.cursor = self.conn.cursor()
self.start_time = timeit.default_timer()
return self.cursor
def __exit__(self, exc_type, exc_val, tb):
self.nesting -= 1
print(f"Exiting transaction, nesting = {self.nesting}")
if exc_type is not None:
traceback.print_exc(file=sys.stdout)
if self.nesting == 0:
if exc_type is None:
print("committing transaction")
self.conn.commit()
else:
print("Rolling back transaction")
self.conn.rollback()
self.cursor.close()
self.conn.close()
self.cursor = None
self.conn = None
print(f"Finished transaction in {timeit.default_timer() - self.start_time}")
self.start_time = None
class JobStatus(enum.IntEnum):
pending = 0
success = 1
failure = 2
final = 3 # pending if all other jobs are finished
def __conform__(self, protocol):
if protocol is sqlite3.PrepareProtocol:
return self.value
class Worker:
def __init__(self, db_pth: str, worker_id: int):
self.db_pth = db_pth
self.worker_id = worker_id
self.sleep = 0
self.worker_finished = False
self.current_job = None
def fetch_ready_job(self, cur):
# Select a pending job that doesn't have any unfinished dependencies
query = f"""
SELECT
jobs.pickle,
jobs.job_id,
jobs.retry_count,
MIN(COALESCE(j2.status, {JobStatus.success})) as min_status,
MAX(COALESCE(j2.status, {JobStatus.failure})) AS max_status
FROM jobs
LEFT JOIN dependencies USING(pickle)
LEFT JOIN jobs j2 ON dependencies.depends_on=j2.pickle
WHERE
jobs.status={JobStatus.pending} AND
jobs.id='{self.db_pth}' AND
(dependencies.id='{self.db_pth}' OR dependencies.id IS NULL) AND
(j2.id='{self.db_pth}' OR j2.id IS NULL)
GROUP BY jobs.pickle, jobs.job_id
HAVING MIN(COALESCE(j2.status, {JobStatus.success})) >= {JobStatus.success}
AND MAX(COALESCE(j2.status, {JobStatus.success})) <= {JobStatus.success}
LIMIT 1
"""
cur.execute(query)
return cur.fetchall()
def finished(self, cur):
cur.execute(
f"""
SELECT COUNT(1) FROM jobs
WHERE status NOT IN ({JobStatus.success}, {JobStatus.failure}) AND id='{self.db_pth}'
"""
)
return cur.fetchone()[0] == 0
def count_running(self, cur):
cur.execute(
f"SELECT COUNT(1) FROM jobs WHERE status > {len(JobStatus)} AND id='{self.db_pth}'"
)
return cur.fetchone()[0]
def get_final_jobs(self, cur):
cur.execute(
f"SELECT pickle, job_id, retry_count FROM jobs WHERE status={JobStatus.final} AND id='{self.db_pth}' LIMIT 1"
)
return cur.fetchall()
def checkpoint(self):
print(f"Worker {self.worker_id} checkpointing")
if self.current_job is not None:
pickle, job_id, retry_count = self.current_job
print(f"Worker {self.worker_id} setting {pickle} back to pending...")
transaction_manager = TransactionManager(self.db_pth)
# Set the job back to pending
transaction_manager.run(
lambda conn: conn.execute(
f"UPDATE jobs SET status={JobStatus.pending} WHERE pickle='{pickle}' AND id='{self.db_pth}'"
)
)
return submitit.helpers.DelayedSubmission(Worker(self.db_pth, self.worker_id))
def __call__(self):
self.worker_finished = False
worker_job_id = f"worker_{self.worker_id}"
running_status = (
len(JobStatus) + self.worker_id + 1
) # mark in progress with this code
transaction_manager = TransactionManager(self.db_pth)
while not self.worker_finished:
if self.sleep > 0:
print(f"Sleeping for {self.sleep} seconds...")
time.sleep(self.sleep)
print(f"Worker {self.worker_id} getting job to run")
def txn(conn):
ready = self.fetch_ready_job(conn)
status = JobStatus.pending
if len(ready) == 0: # no jobs ready
if self.finished(conn):
self.worker_finished = True
return None # all jobs are finished, exiting...
if self.count_running(conn) > 0:
self.sleep = min(max(self.sleep * 2, 1), 30)
return None
ready = self.get_final_jobs(conn)
status = JobStatus.final
if len(ready) == 0:
self.sleep = min(max(self.sleep * 2, 1), 30)
return None
print(
f"Worker {self.worker_id} is executing final_job: {ready[0][0]}"
)
pickle, job_id, retry_count = ready[0][0], ready[0][1], ready[0][2]
# Mark that we're working on this job.
conn.execute(
f"""
UPDATE jobs SET status={running_status}, worker_id='{worker_job_id}'
WHERE pickle='{pickle}' AND status={status} AND id='{self.db_pth}'
"""
)
return pickle, job_id, retry_count
res = transaction_manager.run(txn)
if res is None:
continue
self.current_job = res
self.sleep = 0
pickle, job_id, retry_count = res
print(f"Worker {self.worker_id} got job to run: {pickle}")
# Run the job
job_dir = os.path.dirname(pickle)
paths = utils.JobPaths(job_dir, job_id=job_id)
with paths.stderr.open("w", buffering=1) as stderr, paths.stdout.open(
"w", buffering=1
) as stdout:
with redirect_stderr(stderr), redirect_stdout(stdout):
try:
with env_var({"SLURM_PICKLE_PTH": str(pickle)}):
dl = utils.DelayedSubmission.load(pickle)
dl.result()
status = JobStatus.success
except Exception:
retry_count -= 1
print(f"Job failed, retry_count = {retry_count}")
status = (
JobStatus.failure if retry_count == 0 else JobStatus.pending
)
traceback.print_exc(file=sys.stderr)
print(f"Worker {self.worker_id} finished job with status {status}")
transaction_manager.run(
lambda conn: conn.execute(
f"UPDATE jobs SET status={status.value}, retry_count={retry_count} WHERE pickle='{pickle}' AND id='{self.db_pth}'"
)
)
self.current_job = None
print(f"Worker {self.worker_id} updated job status")
class SlurmPoolExecutor(SlurmExecutor):
def __init__(self, *args, **kwargs):
db_pth = kwargs.pop("db_pth", None)
super().__init__(*args, **kwargs)
self.launched = False
self.nested = False
os.makedirs(self.folder, exist_ok=True)
if db_pth is None:
# Place the actual database in ~/.slurm_pool/<unique_id>.db
unique_filename = str(uuid.uuid4())
self.db_pth = os.path.expanduser(f"~/.slurm_pool/{unique_filename}.db")
os.makedirs(os.path.dirname(self.db_pth), exist_ok=True)
if not os.path.exists(os.path.join(str(self.folder), ".job.db")):
os.symlink(self.db_pth, os.path.join(str(self.folder), ".job.db"))
else:
self.db_pth = db_pth
print(self.db_pth)
self.transaction_manager = TransactionManager(self.db_pth)
with self.transaction_manager as conn:
conn.execute(
"CREATE TABLE IF NOT EXISTS jobs(status int, pickle text, job_id text, worker_id text, id TEXT, retry_count INT)"
)
conn.execute("CREATE INDEX IF NOT EXISTS jobs_p_idx ON jobs(pickle)")
conn.execute("CREATE INDEX IF NOT EXISTS jobs_id_idx ON jobs(id)")
conn.execute(
"CREATE TABLE IF NOT EXISTS dependencies(pickle text, depends_on text, id TEXT)"
)
conn.execute("CREATE INDEX IF NOT EXISTS dep_p_idx ON dependencies(pickle)")
conn.execute(
"CREATE INDEX IF NOT EXISTS dep_d_idx ON dependencies(depends_on)"
)
conn.execute("CREATE INDEX IF NOT EXISTS dep_id_idx ON dependencies(id)")
def _submit_command(self, command):
tmp_uuid = uuid.uuid4().hex
tasks_ids = list(range(self._num_tasks()))
job = self.job_class(folder=self.folder, job_id=tmp_uuid, tasks=tasks_ids)
return job
def _internal_process_submissions(
self, delayed_submissions: tp.List[utils.DelayedSubmission]
) -> tp.List[core.Job[tp.Any]]:
if len(delayed_submissions) == 1:
jobs = super()._internal_process_submissions(delayed_submissions)
vals = (
JobStatus.pending,
str(jobs[0].paths.submitted_pickle),
jobs[0].job_id,
self.db_pth,
3,
)
with self.transaction_manager as conn:
conn.execute(
"INSERT INTO jobs(status, pickle, job_id, id, retry_count) VALUES(?, ?, ?, ?, ?)",
vals,
)
return jobs
# array
folder = utils.JobPaths.get_first_id_independent_folder(self.folder)
folder.mkdir(parents=True, exist_ok=True)
pickle_paths = []
for d in delayed_submissions:
pickle_path = folder / f"{uuid.uuid4().hex}.pkl"
d.timeout_countdown = self.max_num_timeout
d.dump(pickle_path)
pickle_paths.append(pickle_path)
n = len(delayed_submissions)
self._throttle()
tasks_ids = list(range(len(pickle_paths)))
jobs: tp.List[core.Job[tp.Any]] = [
SlurmJob(folder=self.folder, job_id=f"job_{a}", tasks=tasks_ids)
for a in range(n)
]
with self.transaction_manager as conn:
for job, pickle_path in zip(jobs, pickle_paths):
job.paths.move_temporary_file(pickle_path, "submitted_pickle")
vals = (
JobStatus.pending,
str(job.paths.submitted_pickle),
job.job_id,
self.db_pth,
3,
)
conn.execute(
"INSERT INTO jobs(status, pickle, job_id, id, retry_count) VALUES(?, ?, ?, ?, ?)",
vals,
)
return jobs
def submit(
self, fn: tp.Callable[..., core.R], *args: tp.Any, **kwargs: tp.Any
) -> core.Job[core.R]:
return self.transaction_manager.run(
lambda conn: super(SlurmPoolExecutor, self).submit(fn, *args, **kwargs)
)
def map_array(
self, fn: tp.Callable[..., core.R], *iterable: tp.Iterable[tp.Any]
) -> tp.List[core.Job[core.R]]:
return self.transaction_manager.run(
lambda conn: super(SlurmPoolExecutor, self).map_array(fn, *iterable)
)
def submit_dependent(
self,
depends_on: tp.List[core.Job],
fn: tp.Callable[..., core.R],
*args: tp.Any,
**kwargs: tp.Any,
) -> core.Job[core.R]:
ds = utils.DelayedSubmission(fn, *args, **kwargs)
def txn(conn):
job = self._internal_process_submissions([ds])[0]
for dep in depends_on:
vals = (
str(job.paths.submitted_pickle),
str(dep.paths.submitted_pickle),
self.db_pth,
)
conn.execute(
"INSERT INTO dependencies(pickle, depends_on, id) VALUES (?,?,?)",
vals,
)
return job
return self.transaction_manager.run(txn)
def launch(self, folder=None, workers: int = 2):
if not self.nested:
with self.transaction_manager as conn:
vals = (self.db_pth,)
conn.execute("SELECT COUNT(1) FROM jobs WHERE id=?", vals)
(njobs,) = conn.fetchone()
workers = njobs if workers == -1 else workers
ex = SlurmExecutor(folder or self.folder)
ex.update_parameters(**self.parameters)
self.launched = True
jobs = []
with ex.batch():
for i in range(workers):
jobs.append(ex.submit(Worker(self.db_pth, i)))
return jobs
def extend_dependencies(self, jobs: tp.List[core.Job]):
def txn(conn):
conn.execute(
"""
SELECT pickle
FROM dependencies
WHERE depends_on=? AND id=?
""",
(os.environ["SLURM_PICKLE_PTH"], self.db_pth),
)
my_deps = conn.fetchall()
for (pickle,), depends_on in itertools.product(my_deps, jobs):
vals = (
str(pickle),
str(depends_on.paths.submitted_pickle),
self.db_pth,
)
conn.execute(
"INSERT INTO dependencies (pickle, depends_on, id) VALUES(?,?,?)",
vals,
)
self.transaction_manager.run(txn)
@contextmanager
def nest(self):
self.nested = True
yield
self.nested = False
@contextmanager
def set_folder(self, folder):
old_folder = self.folder
self.folder = folder
yield
self.folder = old_folder
| covid19_spread-main | covid19_spread/lib/slurm_pool_executor.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import os
import copy
import sys
import typing as tp
@contextlib.contextmanager
def env_var(key_vals: tp.Dict[str, tp.Union[str, None]]):
"""
Context manager for manipulating environment variables. Environment is restored
upon exiting the context manager
Params:
key_vals - mapping of environment variables to their values. Of a value is
`None`, then it is deleted from the environment.
"""
old_dict = {k: os.environ.get(k, None) for k in key_vals.keys()}
for k, v in key_vals.items():
if v is None:
if k in os.environ:
del os.environ[k]
else:
os.environ[k] = v
yield
for k, v in old_dict.items():
if v:
os.environ[k] = v
elif k in os.environ:
del os.environ[k]
@contextlib.contextmanager
def chdir(d):
old_dir = os.getcwd()
os.chdir(d)
yield
os.chdir(old_dir)
@contextlib.contextmanager
def sys_path(x):
old_path = copy.deepcopy(sys.path)
sys.path.insert(0, x)
yield
sys.path = old_path
| covid19_spread-main | covid19_spread/lib/context_managers.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import getpass
USER = getpass.getuser()
if os.path.exists(f"/checkpoint"):
FS = "/checkpoint"
PARTITION = "learnfair"
MEM_GB = lambda x: x
elif os.path.exists(f"/fsx"):
FS = "/fsx"
PARTITION = "compute"
MEM_GB = lambda x: 0
else:
FS = os.getcwd() # for CI
| covid19_spread-main | covid19_spread/lib/cluster.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from slack import WebClient
import os
import json
import warnings
def post_slack_message(channel, text):
cred_path = os.path.expanduser("~/.credentials.json")
if not os.path.exists(cred_path):
msg = "Could not find ~/.credentials.json with Slack credentials, not posting message..."
warnings.warn(msg, UserWarning)
return
credentials = json.load(open(cred_path))
if "slack" not in credentials or "bot_token" not in credentials["slack"]:
warnings.warn(
"Could not find Slack credentials in ~/.credentials.json", UserWarning
)
return
client = WebClient(token=credentials["slack"]["bot_token"])
client.chat_postMessage(channel=channel, text=text)
| covid19_spread-main | covid19_spread/lib/slack.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "../"))
import cv
import tempfile
from subprocess import check_call, check_output
import sqlite3
import click
import datetime
from covid19_spread.lib.context_managers import chdir
script_dir = os.path.dirname(os.path.realpath(__file__))
DB = os.path.join(script_dir, ".sweep.db")
def mk_db():
if not os.path.exists(DB):
conn = sqlite3.connect(DB)
conn.execute(
"""
CREATE TABLE sweeps(
path text primary key,
basedate text NOT NULL,
launch_time real NOT NULL,
module text NOT NULL,
slurm_job text,
id text
);
"""
)
conn.execute(
"""
CREATE TABLE submitted(
sweep_path text UNIQUE,
submitted_at real NOT NULL,
FOREIGN KEY(sweep_path) REFERENCES sweeps(path)
);
"""
)
class Recurring:
script_dir = script_dir
def __init__(self, force=False):
self.force = force
mk_db()
def get_id(self) -> str:
"""Return a unique ID to be used in the database"""
raise NotImplementedError
def update_data(self) -> None:
"""Fetch new data (should be idempotent)"""
raise NotImplementedError
def command(self) -> str:
"""The command to run in cron"""
raise NotImplementedError
def latest_date(self) -> datetime.date:
""""Return the latest date that we have data for"""
raise NotImplementedError
def module(self):
"""CV module to run"""
return "mhp"
def schedule(self) -> str:
"""Cron schedule"""
return "*/5 * * * *" # run every 5 minutes
def install(self) -> None:
"""Method to install cron job"""
crontab = check_output(["crontab", "-l"]).decode("utf-8")
marker = f"__JOB_{self.get_id()}__"
if marker in crontab:
raise ValueError(
"Cron job already installed, cleanup crontab"
" with `crontab -e` before installing again"
)
envs = (
check_output(["conda", "env", "list"]).decode("utf-8").strip().split("\n")
)
active = [e for e in envs if "*" in e]
conda_env = None
if len(active) == 1:
conda_env = f"source activate {active[0].split()[0]}"
with tempfile.NamedTemporaryFile() as tfile:
with open(tfile.name, "w") as fout:
print(crontab, file=fout)
print(f"# {marker}", file=fout)
user = os.environ["USER"]
script = os.path.realpath(__file__)
schedule = self.schedule()
stdoutfile = os.path.join(self.script_dir, f".{self.get_id()}.log")
stderrfile = os.path.join(self.script_dir, f".{self.get_id()}.err")
home = os.path.expanduser("~")
cmd = [
"source /etc/profile.d/modules.sh",
f"source {home}/.profile",
f"source {home}/.bash_profile",
f"source {home}/.bashrc",
conda_env,
"slack-on-fail " + self.command(),
]
cmd = [c for c in cmd if c is not None]
subject = f"ERROR in recurring sweep: {self.get_id()}"
envs = [
f'PATH="/usr/local/bin:/private/home/{user}/bin:/usr/sbin:$PATH"',
"__PROD__=1",
f"USER={user}",
]
print(
f'{schedule} {" ".join(envs)} bash -c "{" && ".join(cmd)} >> {stdoutfile} 2>> {stderrfile}"',
file=fout,
)
check_call(["crontab", tfile.name])
def refresh(self) -> None:
"""Check for new data, schedule a job if new data is found"""
self.update_data()
latest_date = self.latest_date()
conn = sqlite3.connect(DB)
res = conn.execute(
"SELECT path, launch_time FROM sweeps WHERE basedate=? AND id=?",
(str(latest_date), self.get_id()),
)
if not self.force:
for pth, launch_time in res:
launch_time = datetime.datetime.fromtimestamp(launch_time)
if os.path.exists(pth):
print(f"Already launched {pth} at {launch_time}, exiting...")
return
# This directory got deleted, remove it from the database...
conn.execute(
"DELETE FROM sweeps WHERE path=? AND id=?", (pth, self.get_id())
)
conn.commit()
sweep_path = self.launch_job()
vals = (
sweep_path,
str(latest_date),
datetime.datetime.now().timestamp(),
self.module(),
self.get_id(),
)
conn.execute(
"INSERT INTO sweeps(path, basedate, launch_time, module, id) VALUES (?,?,?,?,?)",
vals,
)
conn.commit()
def launch_job(self, **kwargs):
"""Launch the sweep job"""
# Launch the sweep
config = os.path.join(script_dir, f"../../cv/{kwargs.get('cv_config')}.yml")
with chdir(f"{script_dir}/../../"):
sweep_path, jobs = click.Context(cv.cv).invoke(
cv.cv,
config_pth=config,
module=kwargs.get("module", "bar"),
remote=True,
array_parallelism=kwargs.get("array_parallelism", 20),
)
return sweep_path
| covid19_spread-main | covid19_spread/data/recurring.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from covid19_spread.common import update_repo
import pandas
import re
import datetime
def get_index():
index = pandas.read_csv(
"https://storage.googleapis.com/covid19-open-data/v2/index.csv"
)
index = index[index["key"].str.match(r"^US_[A-Z]+_\d{5}$").fillna(False)]
index["fips"] = index["subregion2_code"].astype(str).str.zfill(5)
index["name"] = index["subregion2_name"]
return index
def get_nyt(metric="cases"):
print("NYT")
data_repo = update_repo("https://github.com/nytimes/covid-19-data.git")
df = pandas.read_csv(
os.path.join(data_repo, "us-counties.csv"), dtype={"fips": str}
)
index = get_index()
df = df.merge(index[["fips", "subregion1_name", "name"]], on="fips")
df["loc"] = df["subregion1_name"] + "_" + df["name"]
pivot = df.pivot_table(values=metric, columns=["loc"], index="date")
pivot = pivot.fillna(0)
pivot.index = pandas.to_datetime(pivot.index)
if metric == "deaths":
return pivot
# Swap out NYTimes NY state data with the NY DOH data.
NYSTATE_URL = (
"https://health.data.ny.gov/api/views/xdss-u53e/rows.csv?accessType=DOWNLOAD"
)
df = pandas.read_csv(NYSTATE_URL).rename(
columns={"Test Date": "date", "Cumulative Number of Positives": "cases"}
)
df["loc"] = "New York_" + df["County"]
df = df.pivot_table(values=metric, columns=["loc"], index="date")
df.columns = [x + " County" for x in df.columns]
# The NYT labels each date as the date the report comes out, not the date the data corresponds to.
# Add 1 day to the NYS DOH data to get it to align
df.index = pandas.to_datetime(df.index) + datetime.timedelta(days=1)
without_nystate = pivot[[c for c in pivot.columns if not c.startswith("New York")]]
last_date = min(without_nystate.index.max(), df.index.max())
df = df[df.index <= last_date]
without_nystate = without_nystate[without_nystate.index <= last_date]
assert (
df.index.max() == without_nystate.index.max()
), "NYT and DOH data don't matchup yet!"
# Only take NYT data up to the date for which we have nystate data
without_nystate[without_nystate.index <= df.index.max()]
return without_nystate.merge(
df, left_index=True, right_index=True, how="outer"
).fillna(0)
def get_google(metric="cases"):
index = get_index()
df = pandas.read_csv(
"https://storage.googleapis.com/covid19-open-data/v2/epidemiology.csv",
parse_dates=["date"],
)
merged = df.merge(index, on="key")
merged = merged[~merged["subregion2_name"].isnull()]
merged["loc"] = merged["subregion1_name"] + "_" + merged["name"]
value_col = "total_confirmed" if metric == "cases" else "total_deceased"
pivot = merged.pivot(values=value_col, index="date", columns="loc")
if pivot.iloc[-1].isnull().any():
pivot = pivot.iloc[:-1]
pivot.iloc[0] = pivot.iloc[0].fillna(0)
pivot = pivot.fillna(method="ffill")
return pivot
def get_jhu(metric="cases"):
urls = {
"cases": "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv",
"deaths": "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv",
}
df = pandas.read_csv(urls[metric])
df = df[~df["FIPS"].isnull()]
df["FIPS"] = df["FIPS"].apply(lambda x: str(int(x)).zfill(5))
index = get_index()
index["loc"] = index["subregion1_name"] + "_" + index["name"]
merged = df.merge(index[["fips", "loc"]], left_on="FIPS", right_on="fips")
date_cols = [c for c in merged.columns if re.match("\d+/\d+/\d+", c)]
transposed = merged[date_cols + ["loc"]].set_index("loc").transpose()
transposed.index = pandas.to_datetime(transposed.index)
return transposed.sort_index()
SOURCES = {
"nyt": get_nyt,
"google": get_google,
"jhu": get_jhu,
}
| covid19_spread-main | covid19_spread/data/usa/process_cases.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import numpy as np
import pandas as pd
import torch as th
from os import listdir
from os.path import isfile, join
from covid19_spread.data.usa.process_cases import SOURCES
import warnings
from covid19_spread.common import standardize_county_name
import os
import multiprocessing as mp
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
nyc_boroughs = [
"Bronx, New York",
"Kings, New York",
"Queens, New York",
"New York, New York",
"Richmond, New York",
]
def county_id(county, state):
return f"{county}, {state}"
def rename_nyc_boroughs(county_name):
if county_name in nyc_boroughs:
return "New York City, New York"
else:
return county_name
def merge_nyc_boroughs(df, ntypes):
df["region"] = df["region"].transform(rename_nyc_boroughs)
prev_len = len(df)
df = df.groupby(["region", "type"]).mean()
assert len(df) == prev_len - ntypes * 4, (prev_len, len(df))
df = df.reset_index()
print(df[df["region"] == "New York City, New York"])
return df
def process_time_features(df, pth, shift=0, merge_nyc=False, input_resolution="county"):
print(f"Processing {pth} at resolution: {input_resolution}")
time_features = pd.read_csv(pth)
if input_resolution == "county_state":
# Expand state level time features to each county in `df`
idx = df.rename_axis("county").reset_index()[["county"]]
idx["region"] = idx["county"].apply(lambda x: x.split(", ")[-1])
time_features = time_features.merge(idx, on="region").drop(columns="region")
time_features = time_features.rename(columns={"county": "region"})
time_feature_regions = time_features["region"].unique()
ncommon = len(df.index.intersection(time_feature_regions))
if ncommon != len(df):
missing = set(df.index).difference(set(time_feature_regions))
warnings.warn(
f"{pth}: Missing time features for the following regions: {list(missing)}"
)
if ncommon != len(time_feature_regions):
ignoring = set(time_feature_regions).difference(set(df.index))
warnings.warn(
f"{pth}: Ignoring time features for the following regions: {list(ignoring)}"
)
time_features = time_features[time_features["region"].isin(set(df.index))]
if merge_nyc:
time_features = merge_nyc_boroughs(
time_features, len(time_features["type"].unique())
)
# Transpose to have two level columns (region, type) and dates as index
time_features = time_features.set_index(["region", "type"]).transpose().sort_index()
time_features.index = pd.to_datetime(time_features.index)
# Trim prefix if it starts before the dates in `df`
time_features = time_features.loc[time_features.index >= df.columns.min()]
# Fill in dates that are missing in `time_features` that exist in `df`
time_features = time_features.reindex(df.columns)
# Shift time features UP by `shift` days
time_features = time_features.shift(shift)
# forward fill the missing values
time_features = time_features.fillna(method="ffill")
# Fill the beginning end with zeros if null
time_features = time_features.fillna(0)
time_features = time_features[time_features.columns.sort_values()]
feature_tensors = {
region: th.from_numpy(time_features[region].values)
for region in time_features.columns.get_level_values(0).unique()
}
if input_resolution == "county_state":
pth = pth.replace("state", "county_state")
th.save(feature_tensors, pth.replace(".csv", ".pt"))
def run_par(fs, args, kwargs, max_par=None):
if not isinstance(fs, list):
fs = [fs] * len(args)
if "MAX_PARALLELISM" in os.environ:
max_par = int(os.environ["MAX_PARALLELISM"])
print(f"Max parallelism = {max_par}")
if max_par is not None and max_par <= 1:
for _f, _args, _kwargs in zip(fs, args, kwargs):
_f(*_args, **_kwargs)
return
with mp.Pool(max_par) as pool:
results = [
pool.apply_async(f, args=a, kwds=k) for f, a, k in zip(fs, args, kwargs)
]
[r.get() for r in results]
def create_time_features():
from .symptom_survey import prepare as ss_prepare
from .fb import prepare as fb_prepare
from .google import prepare as google_prepare
from .testing import prepare as testing_prepare
fs = [ss_prepare, fb_prepare, google_prepare, testing_prepare]
run_par(fs, [()] * len(fs), [{}] * len(fs))
def main(metric, with_features, source, resolution):
df = SOURCES[source](metric)
df.index = pd.to_datetime(df.index)
dates = df.index
df.columns = [c.split("_")[1] + ", " + c.split("_")[0] for c in df.columns]
# drop all zero columns
df = df[df.columns[(df.sum(axis=0) != 0).values]]
df = df.transpose() # row for each county, columns correspond to dates...
# make sure counts are strictly increasing
df = df.cummax(axis=1)
# throw away all-zero columns, i.e., days with no cases
counts = df.sum(axis=0)
df = df.iloc[:, np.where(counts > 0)[0]]
if resolution == "state":
df = df.groupby(lambda x: x.split(", ")[-1]).sum()
df = df.drop(
index=["Virgin Islands", "Northern Mariana Islands", "Puerto Rico", "Guam"],
errors="ignore",
)
county_id = {c: i for i, c in enumerate(df.index)}
df.to_csv(f"{SCRIPT_DIR}/data_{metric}.csv", index_label="region")
df[df.index.str.endswith("New York")].to_csv(
f"{SCRIPT_DIR}/data_{metric}_ny.csv", index_label="region"
)
df[df.index.str.endswith("Florida")].to_csv(
f"{SCRIPT_DIR}/data_{metric}_fl.csv", index_label="region"
)
if resolution == "county":
# Build state graph...
adj = np.zeros((len(df), len(df)))
for _, g in df.groupby(lambda x: x.split(", ")[-1]):
idxs = np.array([county_id[c] for c in g.index])
adj[np.ix_(idxs, idxs)] = 1
print(adj)
th.save(th.from_numpy(adj), f"{SCRIPT_DIR}/state_graph.pt")
if with_features:
create_time_features()
res = resolution
merge_nyc = metric == "deaths" and res == "county"
features = [
(f"{SCRIPT_DIR}/testing/ratio_features_{res}.csv", 0, res),
(f"{SCRIPT_DIR}/testing/total_features_{res}.csv", 0, res),
(f"{SCRIPT_DIR}/fb/mobility_features_{res}_fb.csv", 5, res),
(f"{SCRIPT_DIR}/google/mobility_features_{res}_google.csv", 5, res),
(f"{SCRIPT_DIR}/google/weather_features_{res}.csv", 5, res),
(f"{SCRIPT_DIR}/google/epi_features_{res}.csv", 7, res),
(f"{SCRIPT_DIR}/google/epi_features_{res}.csv", 7, res),
]
if res == "state":
features.append((f"{SCRIPT_DIR}/google/hosp_features_{res}.csv", 0, res))
features.append((f"{SCRIPT_DIR}/shifted_features_{res}.csv", 0, res))
features.append((f"{SCRIPT_DIR}/google/vaccination_state.csv", 0, "state"))
else:
features.append(
(f"{SCRIPT_DIR}/google/vaccination_state.csv", 0, "county_state")
)
for signal, lag in [
(f"{SCRIPT_DIR}/symptom_survey/doctor-visits_smoothed_adj_cli-{{}}.csv", 2),
(f"{SCRIPT_DIR}/symptom_survey/fb-survey_smoothed_wcli-{{}}.csv", 0),
(
f"{SCRIPT_DIR}/symptom_survey/fb-survey_smoothed_hh_cmnty_cli-{{}}.csv",
0,
),
(
f"{SCRIPT_DIR}/symptom_survey/fb-survey_smoothed_wearing_mask_all-{{}}.csv",
5,
),
(
f"{SCRIPT_DIR}/symptom_survey/fb-survey_smoothed_wothers_masked-{{}}.csv",
5,
),
(
f"{SCRIPT_DIR}/symptom_survey/fb-survey_smoothed_wcovid_vaccinated_or_accept-{{}}.csv",
5,
),
(f"{SCRIPT_DIR}/fb/mobility_features_{{}}_fb.csv", 5),
(f"{SCRIPT_DIR}/google/mobility_features_{{}}_google.csv", 5),
]:
if res == "county":
features.append((signal.format("county"), lag, "county"))
features.append((signal.format("state"), lag, "county_state"))
else:
features.append((signal.format("state"), lag, "state"))
features = [(df, pth, lag, merge_nyc, r) for pth, lag, r in features]
run_par([process_time_features] * len(features), features, [{}] * len(features))
if __name__ == "__main__":
parser = argparse.ArgumentParser("US data")
parser.add_argument("-metric", default="cases", choices=["cases", "deaths"])
parser.add_argument("-with-features", default=False, action="store_true")
parser.add_argument("-source", choices=SOURCES.keys(), default="nyt")
parser.add_argument("-resolution", choices=["county", "state"], default="county")
opt = parser.parse_args()
main(opt.metric, opt.with_features, opt.source, opt.resolution)
| covid19_spread-main | covid19_spread/data/usa/convert.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from .. import recurring
import pandas
from ...lib.slack import post_slack_message
from datetime import date, datetime, timedelta
from .convert import main as convert
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
class USARRecurring(recurring.Recurring):
script_dir = SCRIPT_DIR
def get_id(self):
return "us-bar"
def command(self):
return f"recurring run us"
def module(self):
return "bar_time_features"
def schedule(self):
return "*/10 * * * *"
def update_data(self):
convert("cases", with_features=False, source="nyt", resolution="county")
def latest_date(self):
df = pandas.read_csv(f"{SCRIPT_DIR}/data_cases.csv", index_col="region")
max_date = pandas.to_datetime(df.columns).max().date()
if max_date < (date.today() - timedelta(days=1)) and datetime.now().hour > 17:
expected_date = date.today() - timedelta(days=1)
msg = f"*WARNING: new data for {expected_date} is still not available!*"
post_slack_message(channel="#cron_errors", text=msg)
return pandas.to_datetime(df.columns).max().date()
def launch_job(self, **kwargs):
# Make clean with features
convert("cases", with_features=True, source="nyt", resolution="county")
msg = f"*New Data Available for US: {self.latest_date()}*"
post_slack_message(channel="#new-data", text=msg)
return super().launch_job(
module="bar", cv_config="us", array_parallelism=90, **kwargs
)
| covid19_spread-main | covid19_spread/data/usa/us_recurring.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pandas as pd
from datetime import datetime
from covid19_spread.data.usa.process_cases import get_index
import os
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main():
print("Getting Google mobility data...")
cols = [
"date",
"region",
"retail_and_recreation_percent_change_from_baseline",
"grocery_and_pharmacy_percent_change_from_baseline",
"parks_percent_change_from_baseline",
"transit_stations_percent_change_from_baseline",
"workplaces_percent_change_from_baseline",
"residential_percent_change_from_baseline",
]
def get_county_mobility_google(fin=None):
# Google LLC "Google COVID-19 Community Mobility Reports."
# https://www.google.com/covid19/mobility/ Accessed: 2020-05-04.
# unfortunately, this is only relative to mobility on a baseline date
if fin is None:
fin = "https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv"
df_Gmobility_global = pd.read_csv(
fin, parse_dates=["date"], dtype={"census_fips_code": str}
)
df_Gmobility_usa = df_Gmobility_global.query("country_region_code == 'US'")
return df_Gmobility_usa
df = get_county_mobility_google()
df = df[~df["census_fips_code"].isnull()]
index = get_index()
index["region"] = index["subregion2_name"] + ", " + index["subregion1_name"]
df = df.merge(
index, left_on="census_fips_code", right_on="fips", suffixes=("", "_x")
)[list(df.columns) + ["region"]]
df = df[cols]
val_cols = [c for c in df.columns if c not in {"region", "date"}]
ratio = (1 + df.set_index(["region", "date"]) / 100).reset_index()
piv = ratio.pivot(index="date", columns="region", values=val_cols)
piv = piv.rolling(7, min_periods=1).mean().transpose()
piv.iloc[0] = piv.iloc[0].fillna(0)
piv = piv.fillna(0)
dfs = []
for k in piv.index.get_level_values(0).unique():
df = piv.loc[k].copy()
df["type"] = k
dfs.append(df)
df = pd.concat(dfs)
df = df[["type"] + sorted([c for c in df.columns if isinstance(c, datetime)])]
df.columns = [str(c.date()) if isinstance(c, datetime) else c for c in df.columns]
df.to_csv(f"{SCRIPT_DIR}/mobility_features_county_google.csv")
state = get_county_mobility_google()
state = state[(~state["sub_region_1"].isnull()) & (state["sub_region_2"].isnull())]
state["region"] = state["sub_region_1"]
state = state[cols]
ratio = (1 + state[cols].set_index(["region", "date"]) / 100).reset_index()
piv = ratio.pivot(index="date", columns="region", values=val_cols)
piv = piv.rolling(7, min_periods=1).mean().transpose()
piv.columns = [str(x.date()) for x in sorted(piv.columns)]
piv = piv.fillna(0).reset_index(level=0).rename(columns={"level_0": "type"})
piv.to_csv(f"{SCRIPT_DIR}/mobility_features_state_google.csv")
if __name__ == "__main__":
main()
| covid19_spread-main | covid19_spread/data/usa/google/process_mobility.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pandas
from datetime import datetime
import os
from covid19_spread.data.usa.process_cases import get_index
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main():
index = pandas.read_csv(
"https://storage.googleapis.com/covid19-open-data/v2/index.csv"
)
state_index = index[(index["key"].str.match("^US_[A-Z]+$")).fillna(False)]
index = get_index()
def zscore(piv):
# z-zcore
piv = (piv - piv.mean(skipna=True)) / piv.std(skipna=True)
piv = piv.fillna(method="ffill").fillna(method="bfill")
# piv = piv.fillna(0)
return piv
def zero_one(df):
df = df.fillna(0)
# df = df.div(df.max(axis=0), axis=1)
df = df / df.max(axis=0)
df = df.fillna(0)
return df
def process_df(df, columns, resolution, func_normalize):
idx = state_index if resolution == "state" else index
merged = df.merge(idx, on="key")
if resolution == "state":
exclude = {"US_MP", "US_AS", "US_GU", "US_VI", "US_PR"}
merged = merged[~merged["key"].isin(exclude)]
merged["region"] = merged["subregion1_name"]
else:
merged["region"] = merged["name"] + ", " + merged["subregion1_name"]
piv = merged.pivot(index="date", columns="region", values=columns)
if func_normalize is not None:
piv = func_normalize(piv)
dfs = []
for k in piv.columns.get_level_values(0).unique():
dfs.append(piv[k].transpose())
dfs[-1]["type"] = k
df = pandas.concat(dfs)
df = df[["type"] + [c for c in df.columns if isinstance(c, datetime)]]
df.columns = [
str(c.date()) if isinstance(c, datetime) else c for c in df.columns
]
return df.fillna(0) # in case all values are NaN
def get_df(url):
if "weather" in url:
# This dataset is quite large. Iterate in chunks, and filter out non-US rows
chunks = []
for chunk in pandas.read_csv(url, parse_dates=["date"], chunksize=200000):
chunks.append(
chunk[~chunk["key"].isnull() & chunk["key"].str.startswith("US")]
)
df = pandas.concat(chunks)
else:
df = pandas.read_csv(url, parse_dates=["date"])
return df[~df["key"].isnull() & df["key"].str.startswith("US")]
def do_feature(url, columns, resolution, func_normalize, outfile):
print(f"Fetching {url}")
df = get_df(url)
vaccination = process_df(
df, columns=columns, resolution=resolution, func_normalize=func_normalize
)
vaccination = vaccination.reset_index().set_index(["region", "type"])
vaccination.to_csv(outfile, index_label=["region", "type"])
# --- Vaccination data ---
do_feature(
url="https://storage.googleapis.com/covid19-open-data/v2/vaccinations.csv",
columns=["new_persons_vaccinated", "total_persons_vaccinated"],
resolution="state",
func_normalize=zero_one,
outfile=os.path.join(SCRIPT_DIR, "vaccination_state.csv"),
)
# --- Hospitalizations ---
do_feature(
url="https://storage.googleapis.com/covid19-open-data/v2/hospitalizations.csv",
columns=[
"current_hospitalized",
"current_intensive_care",
"current_ventilator",
],
resolution="state",
func_normalize=lambda x: zero_one(x.clip(0, None)),
outfile=os.path.join(SCRIPT_DIR, "hosp_features_state.csv"),
)
# --- Weather features ---
do_feature(
url="https://storage.googleapis.com/covid19-open-data/v2/weather.csv",
columns=[
"average_temperature",
"minimum_temperature",
"maximum_temperature",
"rainfall",
"relative_humidity",
"dew_point",
],
resolution="state",
func_normalize=zscore,
outfile=os.path.join(SCRIPT_DIR, "weather_features_state.csv"),
)
do_feature(
url="https://storage.googleapis.com/covid19-open-data/v2/weather.csv",
columns=[
"average_temperature",
"minimum_temperature",
"maximum_temperature",
"rainfall",
"relative_humidity",
"dew_point",
],
resolution="county",
func_normalize=zscore,
outfile=os.path.join(SCRIPT_DIR, "weather_features_county.csv"),
)
# --- Epi features ---
do_feature(
url="https://storage.googleapis.com/covid19-open-data/v2/epidemiology.csv",
columns=["new_confirmed"],
resolution="state",
func_normalize=lambda x: zero_one(x.clip(0, None)),
outfile=os.path.join(SCRIPT_DIR, "epi_features_state.csv"),
)
do_feature(
url="https://storage.googleapis.com/covid19-open-data/v2/epidemiology.csv",
columns=["new_confirmed"],
resolution="county",
func_normalize=lambda x: zero_one(x.clip(0, None)),
outfile=os.path.join(SCRIPT_DIR, "epi_features_county.csv"),
)
# ---- Testing -----
print("Getting Google testing data...")
df = get_df("https://storage.googleapis.com/covid19-open-data/v2/epidemiology.csv")
testing = process_df(
df,
columns=["new_tested"],
resolution="state",
func_normalize=lambda x: zero_one(x.clip(0, None)),
)
testing.round(3).to_csv(f"{SCRIPT_DIR}/tested_total_state.csv")
df["ratio"] = df["new_confirmed"] / df["new_tested"]
testing = process_df(
df, columns=["ratio"], resolution="state", func_normalize=None,
)
testing.round(3).to_csv(f"{SCRIPT_DIR}/tested_ratio_state.csv")
if __name__ == "__main__":
main()
| covid19_spread-main | covid19_spread/data/usa/google/process_open_data.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .process_mobility import main as mobility_main
from .process_open_data import main as open_data_main
def prepare():
mobility_main()
open_data_main()
| covid19_spread-main | covid19_spread/data/usa/google/__init__.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pandas as pd
from hdx.api.configuration import Configuration
from hdx.data.dataset import Dataset
import shutil
from glob import glob
import os
from covid19_spread.data.usa.process_cases import get_index
import re
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main():
Configuration.create(
hdx_site="prod", user_agent="A_Quick_Example", hdx_read_only=True
)
dataset = Dataset.read_from_hdx("movement-range-maps")
resources = dataset.get_resources()
resource = [
x
for x in resources
if re.match(".*/movement-range-data-\d{4}-\d{2}-\d{2}\.zip", x["url"])
]
assert len(resource) == 1
resource = resource[0]
url, path = resource.download()
if os.path.exists(f"{SCRIPT_DIR}/fb_mobility"):
shutil.rmtree(f"{SCRIPT_DIR}/fb_mobility")
shutil.unpack_archive(path, f"{SCRIPT_DIR}/fb_mobility", "zip")
fips_map = get_index()
fips_map["location"] = fips_map["name"] + ", " + fips_map["subregion1_name"]
cols = [
"date",
"region",
"all_day_bing_tiles_visited_relative_change",
"all_day_ratio_single_tile_users",
]
def get_county_mobility_fb(fin):
df_mobility_global = pd.read_csv(
fin, parse_dates=["ds"], delimiter="\t", dtype={"polygon_id": str}
)
df_mobility_usa = df_mobility_global.query("country == 'USA'")
return df_mobility_usa
# fin = sys.argv[1] if len(sys.argv) == 2 else None
txt_files = glob(f"{SCRIPT_DIR}/fb_mobility/movement-range*.txt")
assert len(txt_files) == 1
fin = txt_files[0]
df = get_county_mobility_fb(fin)
df = df.rename(columns={"ds": "date", "polygon_id": "region"})
df = df.merge(fips_map, left_on="region", right_on="fips")[
list(df.columns) + ["location"]
]
df = df.drop(columns="region").rename(columns={"location": "region"})
def zscore(df):
# z-scores
df = (df.values - df.mean(skipna=True)) / df.std(skipna=True)
return df
def process_df(df, cols):
df = df[cols].copy()
regions = []
for (name, _df) in df.groupby("region"):
_df = _df.sort_values(by="date")
_df = _df.drop_duplicates(subset="date")
dates = _df["date"].to_list()
assert len(dates) == len(np.unique(dates)), _df
_df = _df.loc[:, ~_df.columns.duplicated()]
_df = _df.drop(columns=["region", "date"]).transpose()
# take 7 day average
_df = _df.rolling(7, min_periods=1, axis=1).mean()
# convert relative change into absolute numbers
_df.loc["all_day_bing_tiles_visited_relative_change"] += 1
_df["region"] = [name] * len(_df)
_df.columns = list(map(lambda x: x.strftime("%Y-%m-%d"), dates)) + [
"region"
]
regions.append(_df.reset_index())
df = pd.concat(regions, axis=0, ignore_index=True)
cols = ["region"] + [x for x in df.columns if x != "region"]
df = df[cols]
df = df.rename(columns={"index": "type"})
return df
county = process_df(df, cols)
state = df.copy()
state["region"] = state["region"].apply(lambda x: x.split(", ")[-1])
state = state.groupby(["region", "date"]).mean().reset_index()
state = process_df(state, cols)
county = county.fillna(0)
state = state.fillna(0)
county.round(4).to_csv(f"{SCRIPT_DIR}/mobility_features_county_fb.csv", index=False)
state.round(4).to_csv(f"{SCRIPT_DIR}/mobility_features_state_fb.csv", index=False)
if __name__ == "__main__":
main()
| covid19_spread-main | covid19_spread/data/usa/fb/process_mobility.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .process_mobility import main
def prepare():
main()
| covid19_spread-main | covid19_spread/data/usa/fb/__init__.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .process_testing import main
def prepare():
main()
| covid19_spread-main | covid19_spread/data/usa/testing/__init__.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pandas as pd
from datetime import datetime
import os
from covid19_spread.data.usa.process_cases import get_index
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main():
df = pd.read_csv(
"https://beta.healthdata.gov/api/views/j8mb-icvb/rows.csv?accessType=DOWNLOAD",
parse_dates=["date"],
)
df_piv = df.pivot(
columns=["overall_outcome"],
values="total_results_reported",
index=["state", "date"],
)
df_piv = df_piv.fillna(0).groupby(level=0).cummax()
index = get_index()
states = index.drop_duplicates("subregion1_name")
with_index = df_piv.reset_index().merge(
states, left_on="state", right_on="subregion1_code"
)
df = with_index[
["subregion1_name", "Negative", "Positive", "Inconclusive", "date"]
].set_index("date")
df = df.rename(columns={"subregion1_name": "state_name"})
df["Total"] = df["Positive"] + df["Negative"] + df["Inconclusive"]
def zscore(df):
df.iloc[:, 0:] = (
df.iloc[:, 0:].values
- df.iloc[:, 0:].mean(axis=1, skipna=True).values[:, None]
) / df.iloc[:, 0:].std(axis=1, skipna=True).values[:, None]
df = df.fillna(0)
return df
def zero_one(df):
df = df.fillna(0)
df = df.div(df.max(axis=1), axis=0)
# df = df / df.max()
df = df.fillna(0)
return df
def fmt_features(pivot, key, func_smooth, func_normalize):
df = pivot.transpose()
df = func_smooth(df)
if func_normalize is not None:
df = func_normalize(df)
df = df.fillna(0)
df.index.set_names("region", inplace=True)
df["type"] = f"testing_{key}"
merge = df.merge(index, left_index=True, right_on="subregion1_name")
merge.index = merge["name"] + ", " + merge["subregion1_name"]
return df, merge[df.columns]
def _diff(df):
return df.diff(axis=1).rolling(7, axis=1, min_periods=1).mean()
state_r, county_r = fmt_features(
df.pivot(columns="state_name", values=["Positive", "Total"]),
"ratio",
lambda _df: (_diff(_df.loc["Positive"]) / _diff(_df.loc["Total"])),
None,
)
state_t, county_t = fmt_features(
df.pivot(columns="state_name", values="Total"), "Total", _diff, zero_one,
)
def write_features(df, res, fout):
df = df[["type"] + [c for c in df.columns if isinstance(c, datetime)]]
df.columns = [
str(x.date()) if isinstance(x, datetime) else x for x in df.columns
]
df.round(3).to_csv(
f"{SCRIPT_DIR}/{fout}_features_{res}.csv", index_label="region"
)
write_features(state_t, "state", "total")
write_features(state_r, "state", "ratio")
write_features(county_t, "county", "total")
write_features(county_r, "county", "ratio")
if __name__ == "__main__":
main()
| covid19_spread-main | covid19_spread/data/usa/testing/process_testing.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import pandas as pd
import sys
from datetime import timedelta
from delphi_epidata import Epidata
import covidcast
# Fetch data
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main(geo_value, source, signal):
# grab start and end date from metadata
df = covidcast.metadata().drop(columns=["time_type", "min_lag", "max_lag"])
df.min_time = pd.to_datetime(df.min_time)
df.max_time = pd.to_datetime(df.max_time)
df = df.query(
f"data_source == '{source}' and signal == '{signal}' and geo_type == '{geo_value}'"
)
assert len(df) == 1
base_date = df.iloc[0].min_time - timedelta(1)
end_date = df.iloc[0].max_time
dfs = []
current_date = base_date
while current_date < end_date:
current_date = current_date + timedelta(1)
date_str = current_date.strftime("%Y%m%d")
os.makedirs(os.path.join(SCRIPT_DIR, geo_value, source), exist_ok=True)
fout = f"{SCRIPT_DIR}/{geo_value}/{source}/{signal}-{date_str}.csv"
# d/l only if we don't have the file already
if os.path.exists(fout):
dfs.append(pd.read_csv(fout))
continue
for _ in range(3):
res = Epidata.covidcast(source, signal, "day", geo_value, [date_str], "*")
print(date_str, res["result"], res["message"])
if res["result"] == 1:
break
if res["result"] != 1:
# response may be non-zero if there aren't enough respondants
# See: https://github.com/cmu-delphi/delphi-epidata/issues/613#event-4962274038
print(f"Skipping {source}/{signal} for {date_str}")
continue
df = pd.DataFrame(res["epidata"])
df.rename(
columns={
"geo_value": geo_value,
"time_value": "date",
"value": signal,
"direction": f"{signal}_direction",
"stderr": f"{signal}_stderr",
"sample_size": f"{signal}_sample_size",
},
inplace=True,
)
df.to_csv(fout, index=False)
dfs.append(df)
pd.concat(dfs).to_csv(f"{SCRIPT_DIR}/{geo_value}/{source}/{signal}.csv")
SIGNALS = [
("fb-survey", "smoothed_hh_cmnty_cli"),
("fb-survey", "smoothed_wcli"),
("doctor-visits", "smoothed_adj_cli"),
("fb-survey", "smoothed_wcovid_vaccinated_or_accept"),
("fb-survey", "smoothed_wearing_mask"),
("fb-survey", "smoothed_wearing_mask_7d"),
("fb-survey", "smoothed_wothers_masked"),
("fb-survey", "smoothed_wcovid_vaccinated_or_accept"),
]
if __name__ == "__main__":
main(sys.argv[1], *sys.argv[2].split("/"))
| covid19_spread-main | covid19_spread/data/usa/symptom_survey/fetch.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pandas as pd
import argparse
from datetime import datetime
import os
from covid19_spread.data.usa.process_cases import get_index
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def get_df(source, signal, resolution):
df = pd.read_csv(
f"{SCRIPT_DIR}/{resolution}/{source}/{signal}.csv", parse_dates=["date"]
)
df.dropna(axis=0, subset=["date"], inplace=True)
index = get_index()
state_index = index.drop_duplicates("subregion1_code")
if "state" in df.columns:
df["state"] = df["state"].str.upper()
merged = df.merge(state_index, left_on="state", right_on="subregion1_code")
df = merged[["subregion1_name", "date", signal]].rename(
columns={"subregion1_name": "loc"}
)
else:
df["county"] = df["county"].astype(str).str.zfill(5)
merged = df.merge(index, left_on="county", right_on="fips")
merged["loc"] = merged["name"] + ", " + merged["subregion1_name"]
df = merged[["loc", "date", signal]]
df = df.pivot(index="date", columns="loc", values=signal).copy()
# Fill in NaNs
df.iloc[0] = 0
df = df.fillna(0)
# Normalize
df = df.transpose() / 100
df["type"] = f"{source}_{signal}_{resolution}"
return df
def main(signal, resolution):
source, signal = signal.split("/")
df = get_df(source, signal, resolution)
if resolution == "county":
# Fill in missing counties with zeros
cases = pd.read_csv(
f"{SCRIPT_DIR}/../data_cases.csv", index_col="region"
).index.to_frame()
cases["state"] = [x.split(", ")[-1] for x in cases.index]
cases = cases.drop(columns="region")
idx = pd.MultiIndex.from_product([cases.index, df["type"].unique()])
type_ = df["type"].iloc[0]
df = df.reset_index().set_index(["loc", "type"]).reindex(idx).fillna(0)
df2 = get_df(source, signal, "state")
df2 = df2.merge(cases[["state"]], left_index=True, right_on="state")[
df2.columns
]
df = pd.concat([df, df2.set_index("type", append=True)])
df = df[[c for c in df.columns if isinstance(c, datetime)]]
df.columns = [str(x.date()) if isinstance(x, datetime) else x for x in df.columns]
df.round(3).to_csv(
f"{SCRIPT_DIR}/{source}_{signal}-{resolution}.csv",
index_label=["region", "type"],
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-signal", default="smoothed_hh_cmnty_cli")
parser.add_argument("-resolution", choices=["state", "county"], default="county")
opt = parser.parse_args()
main(opt.signal, opt.resolution)
| covid19_spread-main | covid19_spread/data/usa/symptom_survey/process_symptom_survey.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from covid19_spread.data.usa.symptom_survey.fetch import main as fetch, SIGNALS
from covid19_spread.data.usa.symptom_survey.process_symptom_survey import (
main as process,
)
import os
import pandas
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def concat_mask_data(resolution):
df1 = pandas.read_csv(
os.path.join(SCRIPT_DIR, resolution, "fb-survey", "smoothed_wearing_mask.csv")
)
df2 = pandas.read_csv(
os.path.join(
SCRIPT_DIR, resolution, "fb-survey", "smoothed_wearing_mask_7d.csv"
)
)
df2.columns = [c.replace("_7d", "") for c in df2.columns]
df = pandas.concat([df1, df2])
df.columns = [
c.replace("smoothed_wearing_mask", "smoothed_wearing_mask_all")
for c in df.columns
]
df = df.sort_values(by="signal", ascending=False)
df["signal"] = "smoothed_wearing_mask_all"
df = df.drop_duplicates([resolution, "date"])
df.to_csv(
os.path.join(
SCRIPT_DIR, resolution, "fb-survey", "smoothed_wearing_mask_all.csv"
),
index=False,
)
def prepare():
for source, signal in SIGNALS:
fetch("state", source, signal)
fetch("county", source, signal)
concat_mask_data("county")
concat_mask_data("state")
for source, signal in SIGNALS:
if "wearing_mask" in signal:
# Skip these since we end up concatenating the wearing_mask and wearing_mask_7d features
continue
process(f"{source}/{signal}", "state")
process(f"{source}/{signal}", "county")
process(f"fb-survey/smoothed_wearing_mask_all", "county")
process(f"fb-survey/smoothed_wearing_mask_all", "state")
if __name__ == "__main__":
prepare()
| covid19_spread-main | covid19_spread/data/usa/symptom_survey/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.