python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
VERSION = "0.27.9"
| APACAI-API-main | apacai/version.py |
import apacai
class ApacAIError(Exception):
def __init__(
self,
message=None,
http_body=None,
http_status=None,
json_body=None,
headers=None,
code=None,
):
super(ApacAIError, self).__init__(message)
if http_body and hasattr(http_body, "decode"):
try:
http_body = http_body.decode("utf-8")
except BaseException:
http_body = (
"<Could not decode body as utf-8. "
"Please contact us through our help center at help.apacai.com.>"
)
self._message = message
self.http_body = http_body
self.http_status = http_status
self.json_body = json_body
self.headers = headers or {}
self.code = code
self.request_id = self.headers.get("request-id", None)
self.error = self.construct_error_object()
self.organization = self.headers.get("apacai-organization", None)
def __str__(self):
msg = self._message or "<empty message>"
if self.request_id is not None:
return "Request {0}: {1}".format(self.request_id, msg)
else:
return msg
# Returns the underlying `Exception` (base class) message, which is usually
# the raw message returned by APACAI's API. This was previously available
# in python2 via `error.message`. Unlike `str(error)`, it omits "Request
# req_..." from the beginning of the string.
@property
def user_message(self):
return self._message
def __repr__(self):
return "%s(message=%r, http_status=%r, request_id=%r)" % (
self.__class__.__name__,
self._message,
self.http_status,
self.request_id,
)
def construct_error_object(self):
if (
self.json_body is None
or not isinstance(self.json_body, dict)
or "error" not in self.json_body
or not isinstance(self.json_body["error"], dict)
):
return None
return apacai.api_resources.error_object.ErrorObject.construct_from(
self.json_body["error"]
)
class APIError(ApacAIError):
pass
class TryAgain(ApacAIError):
pass
class Timeout(ApacAIError):
pass
class APIConnectionError(ApacAIError):
def __init__(
self,
message,
http_body=None,
http_status=None,
json_body=None,
headers=None,
code=None,
should_retry=False,
):
super(APIConnectionError, self).__init__(
message, http_body, http_status, json_body, headers, code
)
self.should_retry = should_retry
class InvalidRequestError(ApacAIError):
def __init__(
self,
message,
param,
code=None,
http_body=None,
http_status=None,
json_body=None,
headers=None,
):
super(InvalidRequestError, self).__init__(
message, http_body, http_status, json_body, headers, code
)
self.param = param
def __repr__(self):
return "%s(message=%r, param=%r, code=%r, http_status=%r, " "request_id=%r)" % (
self.__class__.__name__,
self._message,
self.param,
self.code,
self.http_status,
self.request_id,
)
def __reduce__(self):
return type(self), (
self._message,
self.param,
self.code,
self.http_body,
self.http_status,
self.json_body,
self.headers,
)
class AuthenticationError(ApacAIError):
pass
class PermissionError(ApacAIError):
pass
class RateLimitError(ApacAIError):
pass
class ServiceUnavailableError(ApacAIError):
pass
class InvalidAPIType(ApacAIError):
pass
class SignatureVerificationError(ApacAIError):
def __init__(self, message, sig_header, http_body=None):
super(SignatureVerificationError, self).__init__(message, http_body)
self.sig_header = sig_header
def __reduce__(self):
return type(self), (
self._message,
self.sig_header,
self.http_body,
)
| APACAI-API-main | apacai/error.py |
import logging
import os
import re
import sys
from enum import Enum
from typing import Optional
import apacai
APACAI_LOG = os.environ.get("APACAI_LOG")
logger = logging.getLogger("apacai")
__all__ = [
"log_info",
"log_debug",
"log_warn",
"logfmt",
]
api_key_to_header = (
lambda api, key: {"Authorization": f"Bearer {key}"}
if api in (ApiType.OPEN_AI, ApiType.AZURE_AD)
else {"api-key": f"{key}"}
)
class ApiType(Enum):
AZURE = 1
OPEN_AI = 2
AZURE_AD = 3
@staticmethod
def from_str(label):
if label.lower() == "azure":
return ApiType.AZURE
elif label.lower() in ("azure_ad", "azuread"):
return ApiType.AZURE_AD
elif label.lower() in ("open_ai", "apacai"):
return ApiType.OPEN_AI
else:
raise apacai.error.InvalidAPIType(
"The API type provided in invalid. Please select one of the supported API types: 'azure', 'azure_ad', 'open_ai'"
)
def _console_log_level():
if apacai.log in ["debug", "info"]:
return apacai.log
elif APACAI_LOG in ["debug", "info"]:
return APACAI_LOG
else:
return None
def log_debug(message, **params):
msg = logfmt(dict(message=message, **params))
if _console_log_level() == "debug":
print(msg, file=sys.stderr)
logger.debug(msg)
def log_info(message, **params):
msg = logfmt(dict(message=message, **params))
if _console_log_level() in ["debug", "info"]:
print(msg, file=sys.stderr)
logger.info(msg)
def log_warn(message, **params):
msg = logfmt(dict(message=message, **params))
print(msg, file=sys.stderr)
logger.warn(msg)
def logfmt(props):
def fmt(key, val):
# Handle case where val is a bytes or bytesarray
if hasattr(val, "decode"):
val = val.decode("utf-8")
# Check if val is already a string to avoid re-encoding into ascii.
if not isinstance(val, str):
val = str(val)
if re.search(r"\s", val):
val = repr(val)
# key should already be a string
if re.search(r"\s", key):
key = repr(key)
return "{key}={val}".format(key=key, val=val)
return " ".join([fmt(key, val) for key, val in sorted(props.items())])
def get_object_classes():
# This is here to avoid a circular dependency
from apacai.object_classes import OBJECT_CLASSES
return OBJECT_CLASSES
def convert_to_apacai_object(
resp,
api_key=None,
api_version=None,
organization=None,
engine=None,
plain_old_data=False,
):
# If we get a ApacAIResponse, we'll want to return a ApacAIObject.
response_ms: Optional[int] = None
if isinstance(resp, apacai.apacai_response.ApacAIResponse):
organization = resp.organization
response_ms = resp.response_ms
resp = resp.data
if plain_old_data:
return resp
elif isinstance(resp, list):
return [
convert_to_apacai_object(
i, api_key, api_version, organization, engine=engine
)
for i in resp
]
elif isinstance(resp, dict) and not isinstance(
resp, apacai.apacai_object.ApacAIObject
):
resp = resp.copy()
klass_name = resp.get("object")
if isinstance(klass_name, str):
klass = get_object_classes().get(
klass_name, apacai.apacai_object.ApacAIObject
)
else:
klass = apacai.apacai_object.ApacAIObject
return klass.construct_from(
resp,
api_key=api_key,
api_version=api_version,
organization=organization,
response_ms=response_ms,
engine=engine,
)
else:
return resp
def convert_to_dict(obj):
"""Converts a ApacAIObject back to a regular dict.
Nested ApacAIObjects are also converted back to regular dicts.
:param obj: The ApacAIObject to convert.
:returns: The ApacAIObject as a dict.
"""
if isinstance(obj, list):
return [convert_to_dict(i) for i in obj]
# This works by virtue of the fact that ApacAIObjects _are_ dicts. The dict
# comprehension returns a regular dict and recursively applies the
# conversion to each value.
elif isinstance(obj, dict):
return {k: convert_to_dict(v) for k, v in obj.items()}
else:
return obj
def merge_dicts(x, y):
z = x.copy()
z.update(y)
return z
def default_api_key() -> str:
if apacai.api_key_path:
with open(apacai.api_key_path, "rt") as k:
api_key = k.read().strip()
if not api_key.startswith("sk-"):
raise ValueError(f"Malformed API key in {apacai.api_key_path}.")
return api_key
elif apacai.api_key is not None:
return apacai.api_key
else:
raise apacai.error.AuthenticationError(
"No API key provided. You can set your API key in code using 'apacai.api_key = <API-KEY>', or you can set the environment variable APACAI_API_KEY=<API-KEY>). If your API key is stored in a file, you can point the apacai module at it with 'apacai.api_key_path = <PATH>'. You can generate API keys in the APACAI web interface. See https://platform.apacai.com/account/api-keys for details."
)
| APACAI-API-main | apacai/util.py |
try:
import wandb
WANDB_AVAILABLE = True
except:
WANDB_AVAILABLE = False
if WANDB_AVAILABLE:
import datetime
import io
import json
import re
from pathlib import Path
from apacai import File, FineTune
from apacai.datalib.numpy_helper import numpy as np
from apacai.datalib.pandas_helper import pandas as pd
class WandbLogger:
"""
Log fine-tunes to [Weights & Biases](https://wandb.me/apacai-docs)
"""
if not WANDB_AVAILABLE:
print("Logging requires wandb to be installed. Run `pip install wandb`.")
else:
_wandb_api = None
_logged_in = False
@classmethod
def sync(
cls,
id=None,
n_fine_tunes=None,
project="GPT-3",
entity=None,
force=False,
**kwargs_wandb_init,
):
"""
Sync fine-tunes to Weights & Biases.
:param id: The id of the fine-tune (optional)
:param n_fine_tunes: Number of most recent fine-tunes to log when an id is not provided. By default, every fine-tune is synced.
:param project: Name of the project where you're sending runs. By default, it is "GPT-3".
:param entity: Username or team name where you're sending runs. By default, your default entity is used, which is usually your username.
:param force: Forces logging and overwrite existing wandb run of the same fine-tune.
"""
if not WANDB_AVAILABLE:
return
if id:
fine_tune = FineTune.retrieve(id=id)
fine_tune.pop("events", None)
fine_tunes = [fine_tune]
else:
# get list of fine_tune to log
fine_tunes = FineTune.list()
if not fine_tunes or fine_tunes.get("data") is None:
print("No fine-tune has been retrieved")
return
fine_tunes = fine_tunes["data"][
-n_fine_tunes if n_fine_tunes is not None else None :
]
# log starting from oldest fine_tune
show_individual_warnings = (
False if id is None and n_fine_tunes is None else True
)
fine_tune_logged = [
cls._log_fine_tune(
fine_tune,
project,
entity,
force,
show_individual_warnings,
**kwargs_wandb_init,
)
for fine_tune in fine_tunes
]
if not show_individual_warnings and not any(fine_tune_logged):
print("No new successful fine-tunes were found")
return "🎉 wandb sync completed successfully"
@classmethod
def _log_fine_tune(
cls,
fine_tune,
project,
entity,
force,
show_individual_warnings,
**kwargs_wandb_init,
):
fine_tune_id = fine_tune.get("id")
status = fine_tune.get("status")
# check run completed successfully
if status != "succeeded":
if show_individual_warnings:
print(
f'Fine-tune {fine_tune_id} has the status "{status}" and will not be logged'
)
return
# check results are present
try:
results_id = fine_tune["result_files"][0]["id"]
results = File.download(id=results_id).decode("utf-8")
except:
if show_individual_warnings:
print(f"Fine-tune {fine_tune_id} has no results and will not be logged")
return
# check run has not been logged already
run_path = f"{project}/{fine_tune_id}"
if entity is not None:
run_path = f"{entity}/{run_path}"
wandb_run = cls._get_wandb_run(run_path)
if wandb_run:
wandb_status = wandb_run.summary.get("status")
if show_individual_warnings:
if wandb_status == "succeeded":
print(
f"Fine-tune {fine_tune_id} has already been logged successfully at {wandb_run.url}"
)
if not force:
print(
'Use "--force" in the CLI or "force=True" in python if you want to overwrite previous run'
)
else:
print(
f"A run for fine-tune {fine_tune_id} was previously created but didn't end successfully"
)
if wandb_status != "succeeded" or force:
print(
f"A new wandb run will be created for fine-tune {fine_tune_id} and previous run will be overwritten"
)
if wandb_status == "succeeded" and not force:
return
# start a wandb run
wandb.init(
job_type="fine-tune",
config=cls._get_config(fine_tune),
project=project,
entity=entity,
name=fine_tune_id,
id=fine_tune_id,
**kwargs_wandb_init,
)
# log results
df_results = pd.read_csv(io.StringIO(results))
for _, row in df_results.iterrows():
metrics = {k: v for k, v in row.items() if not np.isnan(v)}
step = metrics.pop("step")
if step is not None:
step = int(step)
wandb.log(metrics, step=step)
fine_tuned_model = fine_tune.get("fine_tuned_model")
if fine_tuned_model is not None:
wandb.summary["fine_tuned_model"] = fine_tuned_model
# training/validation files and fine-tune details
cls._log_artifacts(fine_tune, project, entity)
# mark run as complete
wandb.summary["status"] = "succeeded"
wandb.finish()
return True
@classmethod
def _ensure_logged_in(cls):
if not cls._logged_in:
if wandb.login():
cls._logged_in = True
else:
raise Exception("You need to log in to wandb")
@classmethod
def _get_wandb_run(cls, run_path):
cls._ensure_logged_in()
try:
if cls._wandb_api is None:
cls._wandb_api = wandb.Api()
return cls._wandb_api.run(run_path)
except Exception:
return None
@classmethod
def _get_wandb_artifact(cls, artifact_path):
cls._ensure_logged_in()
try:
if cls._wandb_api is None:
cls._wandb_api = wandb.Api()
return cls._wandb_api.artifact(artifact_path)
except Exception:
return None
@classmethod
def _get_config(cls, fine_tune):
config = dict(fine_tune)
for key in ("training_files", "validation_files", "result_files"):
if config.get(key) and len(config[key]):
config[key] = config[key][0]
if config.get("created_at"):
config["created_at"] = datetime.datetime.fromtimestamp(config["created_at"])
return config
@classmethod
def _log_artifacts(cls, fine_tune, project, entity):
# training/validation files
training_file = (
fine_tune["training_files"][0]
if fine_tune.get("training_files") and len(fine_tune["training_files"])
else None
)
validation_file = (
fine_tune["validation_files"][0]
if fine_tune.get("validation_files") and len(fine_tune["validation_files"])
else None
)
for file, prefix, artifact_type in (
(training_file, "train", "training_files"),
(validation_file, "valid", "validation_files"),
):
if file is not None:
cls._log_artifact_inputs(file, prefix, artifact_type, project, entity)
# fine-tune details
fine_tune_id = fine_tune.get("id")
artifact = wandb.Artifact(
"fine_tune_details",
type="fine_tune_details",
metadata=fine_tune,
)
with artifact.new_file(
"fine_tune_details.json", mode="w", encoding="utf-8"
) as f:
json.dump(fine_tune, f, indent=2)
wandb.run.log_artifact(
artifact,
aliases=["latest", fine_tune_id],
)
@classmethod
def _log_artifact_inputs(cls, file, prefix, artifact_type, project, entity):
file_id = file["id"]
filename = Path(file["filename"]).name
stem = Path(file["filename"]).stem
# get input artifact
artifact_name = f"{prefix}-{filename}"
# sanitize name to valid wandb artifact name
artifact_name = re.sub(r"[^a-zA-Z0-9_\-.]", "_", artifact_name)
artifact_alias = file_id
artifact_path = f"{project}/{artifact_name}:{artifact_alias}"
if entity is not None:
artifact_path = f"{entity}/{artifact_path}"
artifact = cls._get_wandb_artifact(artifact_path)
# create artifact if file not already logged previously
if artifact is None:
# get file content
try:
file_content = File.download(id=file_id).decode("utf-8")
except:
print(
f"File {file_id} could not be retrieved. Make sure you are allowed to download training/validation files"
)
return
artifact = wandb.Artifact(artifact_name, type=artifact_type, metadata=file)
with artifact.new_file(filename, mode="w", encoding="utf-8") as f:
f.write(file_content)
# create a Table
try:
table, n_items = cls._make_table(file_content)
artifact.add(table, stem)
wandb.config.update({f"n_{prefix}": n_items})
artifact.metadata["items"] = n_items
except:
print(f"File {file_id} could not be read as a valid JSON file")
else:
# log number of items
wandb.config.update({f"n_{prefix}": artifact.metadata.get("items")})
wandb.run.use_artifact(artifact, aliases=["latest", artifact_alias])
@classmethod
def _make_table(cls, file_content):
df = pd.read_json(io.StringIO(file_content), orient="records", lines=True)
return wandb.Table(dataframe=df), len(df)
| APACAI-API-main | apacai/wandb_logger.py |
import os
import sys
from typing import Any, Callable, NamedTuple, Optional
from apacai.datalib.pandas_helper import assert_has_pandas
from apacai.datalib.pandas_helper import pandas as pd
class Remediation(NamedTuple):
name: str
immediate_msg: Optional[str] = None
necessary_msg: Optional[str] = None
necessary_fn: Optional[Callable[[Any], Any]] = None
optional_msg: Optional[str] = None
optional_fn: Optional[Callable[[Any], Any]] = None
error_msg: Optional[str] = None
def num_examples_validator(df):
"""
This validator will only print out the number of examples and recommend to the user to increase the number of examples if less than 100.
"""
MIN_EXAMPLES = 100
optional_suggestion = (
""
if len(df) >= MIN_EXAMPLES
else ". In general, we recommend having at least a few hundred examples. We've found that performance tends to linearly increase for every doubling of the number of examples"
)
immediate_msg = (
f"\n- Your file contains {len(df)} prompt-completion pairs{optional_suggestion}"
)
return Remediation(name="num_examples", immediate_msg=immediate_msg)
def necessary_column_validator(df, necessary_column):
"""
This validator will ensure that the necessary column is present in the dataframe.
"""
def lower_case_column(df, column):
cols = [c for c in df.columns if str(c).lower() == column]
df.rename(columns={cols[0]: column.lower()}, inplace=True)
return df
immediate_msg = None
necessary_fn = None
necessary_msg = None
error_msg = None
if necessary_column not in df.columns:
if necessary_column in [str(c).lower() for c in df.columns]:
def lower_case_column_creator(df):
return lower_case_column(df, necessary_column)
necessary_fn = lower_case_column_creator
immediate_msg = (
f"\n- The `{necessary_column}` column/key should be lowercase"
)
necessary_msg = f"Lower case column name to `{necessary_column}`"
else:
error_msg = f"`{necessary_column}` column/key is missing. Please make sure you name your columns/keys appropriately, then retry"
return Remediation(
name="necessary_column",
immediate_msg=immediate_msg,
necessary_msg=necessary_msg,
necessary_fn=necessary_fn,
error_msg=error_msg,
)
def additional_column_validator(df, fields=["prompt", "completion"]):
"""
This validator will remove additional columns from the dataframe.
"""
additional_columns = []
necessary_msg = None
immediate_msg = None
necessary_fn = None
if len(df.columns) > 2:
additional_columns = [c for c in df.columns if c not in fields]
warn_message = ""
for ac in additional_columns:
dups = [c for c in additional_columns if ac in c]
if len(dups) > 0:
warn_message += f"\n WARNING: Some of the additional columns/keys contain `{ac}` in their name. These will be ignored, and the column/key `{ac}` will be used instead. This could also result from a duplicate column/key in the provided file."
immediate_msg = f"\n- The input file should contain exactly two columns/keys per row. Additional columns/keys present are: {additional_columns}{warn_message}"
necessary_msg = f"Remove additional columns/keys: {additional_columns}"
def necessary_fn(x):
return x[fields]
return Remediation(
name="additional_column",
immediate_msg=immediate_msg,
necessary_msg=necessary_msg,
necessary_fn=necessary_fn,
)
def non_empty_field_validator(df, field="completion"):
"""
This validator will ensure that no completion is empty.
"""
necessary_msg = None
necessary_fn = None
immediate_msg = None
if df[field].apply(lambda x: x == "").any() or df[field].isnull().any():
empty_rows = (df[field] == "") | (df[field].isnull())
empty_indexes = df.reset_index().index[empty_rows].tolist()
immediate_msg = f"\n- `{field}` column/key should not contain empty strings. These are rows: {empty_indexes}"
def necessary_fn(x):
return x[x[field] != ""].dropna(subset=[field])
necessary_msg = f"Remove {len(empty_indexes)} rows with empty {field}s"
return Remediation(
name=f"empty_{field}",
immediate_msg=immediate_msg,
necessary_msg=necessary_msg,
necessary_fn=necessary_fn,
)
def duplicated_rows_validator(df, fields=["prompt", "completion"]):
"""
This validator will suggest to the user to remove duplicate rows if they exist.
"""
duplicated_rows = df.duplicated(subset=fields)
duplicated_indexes = df.reset_index().index[duplicated_rows].tolist()
immediate_msg = None
optional_msg = None
optional_fn = None
if len(duplicated_indexes) > 0:
immediate_msg = f"\n- There are {len(duplicated_indexes)} duplicated {'-'.join(fields)} sets. These are rows: {duplicated_indexes}"
optional_msg = f"Remove {len(duplicated_indexes)} duplicate rows"
def optional_fn(x):
return x.drop_duplicates(subset=fields)
return Remediation(
name="duplicated_rows",
immediate_msg=immediate_msg,
optional_msg=optional_msg,
optional_fn=optional_fn,
)
def long_examples_validator(df):
"""
This validator will suggest to the user to remove examples that are too long.
"""
immediate_msg = None
optional_msg = None
optional_fn = None
ft_type = infer_task_type(df)
if ft_type != "open-ended generation":
def get_long_indexes(d):
long_examples = d.apply(
lambda x: len(x.prompt) + len(x.completion) > 10000, axis=1
)
return d.reset_index().index[long_examples].tolist()
long_indexes = get_long_indexes(df)
if len(long_indexes) > 0:
immediate_msg = f"\n- There are {len(long_indexes)} examples that are very long. These are rows: {long_indexes}\nFor conditional generation, and for classification the examples shouldn't be longer than 2048 tokens."
optional_msg = f"Remove {len(long_indexes)} long examples"
def optional_fn(x):
long_indexes_to_drop = get_long_indexes(x)
if long_indexes != long_indexes_to_drop:
sys.stdout.write(
f"The indices of the long examples has changed as a result of a previously applied recommendation.\nThe {len(long_indexes_to_drop)} long examples to be dropped are now at the following indices: {long_indexes_to_drop}\n"
)
return x.drop(long_indexes_to_drop)
return Remediation(
name="long_examples",
immediate_msg=immediate_msg,
optional_msg=optional_msg,
optional_fn=optional_fn,
)
def common_prompt_suffix_validator(df):
"""
This validator will suggest to add a common suffix to the prompt if one doesn't already exist in case of classification or conditional generation.
"""
error_msg = None
immediate_msg = None
optional_msg = None
optional_fn = None
# Find a suffix which is not contained within the prompt otherwise
suggested_suffix = "\n\n### =>\n\n"
suffix_options = [
" ->",
"\n\n###\n\n",
"\n\n===\n\n",
"\n\n---\n\n",
"\n\n===>\n\n",
"\n\n--->\n\n",
]
for suffix_option in suffix_options:
if suffix_option == " ->":
if df.prompt.str.contains("\n").any():
continue
if df.prompt.str.contains(suffix_option, regex=False).any():
continue
suggested_suffix = suffix_option
break
display_suggested_suffix = suggested_suffix.replace("\n", "\\n")
ft_type = infer_task_type(df)
if ft_type == "open-ended generation":
return Remediation(name="common_suffix")
def add_suffix(x, suffix):
x["prompt"] += suffix
return x
common_suffix = get_common_xfix(df.prompt, xfix="suffix")
if (df.prompt == common_suffix).all():
error_msg = f"All prompts are identical: `{common_suffix}`\nConsider leaving the prompts blank if you want to do open-ended generation, otherwise ensure prompts are different"
return Remediation(name="common_suffix", error_msg=error_msg)
if common_suffix != "":
common_suffix_new_line_handled = common_suffix.replace("\n", "\\n")
immediate_msg = (
f"\n- All prompts end with suffix `{common_suffix_new_line_handled}`"
)
if len(common_suffix) > 10:
immediate_msg += f". This suffix seems very long. Consider replacing with a shorter suffix, such as `{display_suggested_suffix}`"
if (
df.prompt.str[: -len(common_suffix)]
.str.contains(common_suffix, regex=False)
.any()
):
immediate_msg += f"\n WARNING: Some of your prompts contain the suffix `{common_suffix}` more than once. We strongly suggest that you review your prompts and add a unique suffix"
else:
immediate_msg = "\n- Your data does not contain a common separator at the end of your prompts. Having a separator string appended to the end of the prompt makes it clearer to the fine-tuned model where the completion should begin. See https://platform.apacai.com/docs/guides/fine-tuning/preparing-your-dataset for more detail and examples. If you intend to do open-ended generation, then you should leave the prompts empty"
if common_suffix == "":
optional_msg = (
f"Add a suffix separator `{display_suggested_suffix}` to all prompts"
)
def optional_fn(x):
return add_suffix(x, suggested_suffix)
return Remediation(
name="common_completion_suffix",
immediate_msg=immediate_msg,
optional_msg=optional_msg,
optional_fn=optional_fn,
error_msg=error_msg,
)
def common_prompt_prefix_validator(df):
"""
This validator will suggest to remove a common prefix from the prompt if a long one exist.
"""
MAX_PREFIX_LEN = 12
immediate_msg = None
optional_msg = None
optional_fn = None
common_prefix = get_common_xfix(df.prompt, xfix="prefix")
if common_prefix == "":
return Remediation(name="common_prefix")
def remove_common_prefix(x, prefix):
x["prompt"] = x["prompt"].str[len(prefix) :]
return x
if (df.prompt == common_prefix).all():
# already handled by common_suffix_validator
return Remediation(name="common_prefix")
if common_prefix != "":
immediate_msg = f"\n- All prompts start with prefix `{common_prefix}`"
if MAX_PREFIX_LEN < len(common_prefix):
immediate_msg += ". Fine-tuning doesn't require the instruction specifying the task, or a few-shot example scenario. Most of the time you should only add the input data into the prompt, and the desired output into the completion"
optional_msg = f"Remove prefix `{common_prefix}` from all prompts"
def optional_fn(x):
return remove_common_prefix(x, common_prefix)
return Remediation(
name="common_prompt_prefix",
immediate_msg=immediate_msg,
optional_msg=optional_msg,
optional_fn=optional_fn,
)
def common_completion_prefix_validator(df):
"""
This validator will suggest to remove a common prefix from the completion if a long one exist.
"""
MAX_PREFIX_LEN = 5
common_prefix = get_common_xfix(df.completion, xfix="prefix")
ws_prefix = len(common_prefix) > 0 and common_prefix[0] == " "
if len(common_prefix) < MAX_PREFIX_LEN:
return Remediation(name="common_prefix")
def remove_common_prefix(x, prefix, ws_prefix):
x["completion"] = x["completion"].str[len(prefix) :]
if ws_prefix:
# keep the single whitespace as prefix
x["completion"] = " " + x["completion"]
return x
if (df.completion == common_prefix).all():
# already handled by common_suffix_validator
return Remediation(name="common_prefix")
immediate_msg = f"\n- All completions start with prefix `{common_prefix}`. Most of the time you should only add the output data into the completion, without any prefix"
optional_msg = f"Remove prefix `{common_prefix}` from all completions"
def optional_fn(x):
return remove_common_prefix(x, common_prefix, ws_prefix)
return Remediation(
name="common_completion_prefix",
immediate_msg=immediate_msg,
optional_msg=optional_msg,
optional_fn=optional_fn,
)
def common_completion_suffix_validator(df):
"""
This validator will suggest to add a common suffix to the completion if one doesn't already exist in case of classification or conditional generation.
"""
error_msg = None
immediate_msg = None
optional_msg = None
optional_fn = None
ft_type = infer_task_type(df)
if ft_type == "open-ended generation" or ft_type == "classification":
return Remediation(name="common_suffix")
common_suffix = get_common_xfix(df.completion, xfix="suffix")
if (df.completion == common_suffix).all():
error_msg = f"All completions are identical: `{common_suffix}`\nEnsure completions are different, otherwise the model will just repeat `{common_suffix}`"
return Remediation(name="common_suffix", error_msg=error_msg)
# Find a suffix which is not contained within the completion otherwise
suggested_suffix = " [END]"
suffix_options = [
"\n",
".",
" END",
"***",
"+++",
"&&&",
"$$$",
"@@@",
"%%%",
]
for suffix_option in suffix_options:
if df.completion.str.contains(suffix_option, regex=False).any():
continue
suggested_suffix = suffix_option
break
display_suggested_suffix = suggested_suffix.replace("\n", "\\n")
def add_suffix(x, suffix):
x["completion"] += suffix
return x
if common_suffix != "":
common_suffix_new_line_handled = common_suffix.replace("\n", "\\n")
immediate_msg = (
f"\n- All completions end with suffix `{common_suffix_new_line_handled}`"
)
if len(common_suffix) > 10:
immediate_msg += f". This suffix seems very long. Consider replacing with a shorter suffix, such as `{display_suggested_suffix}`"
if (
df.completion.str[: -len(common_suffix)]
.str.contains(common_suffix, regex=False)
.any()
):
immediate_msg += f"\n WARNING: Some of your completions contain the suffix `{common_suffix}` more than once. We suggest that you review your completions and add a unique ending"
else:
immediate_msg = "\n- Your data does not contain a common ending at the end of your completions. Having a common ending string appended to the end of the completion makes it clearer to the fine-tuned model where the completion should end. See https://platform.apacai.com/docs/guides/fine-tuning/preparing-your-dataset for more detail and examples."
if common_suffix == "":
optional_msg = (
f"Add a suffix ending `{display_suggested_suffix}` to all completions"
)
def optional_fn(x):
return add_suffix(x, suggested_suffix)
return Remediation(
name="common_completion_suffix",
immediate_msg=immediate_msg,
optional_msg=optional_msg,
optional_fn=optional_fn,
error_msg=error_msg,
)
def completions_space_start_validator(df):
"""
This validator will suggest to add a space at the start of the completion if it doesn't already exist. This helps with tokenization.
"""
def add_space_start(x):
x["completion"] = x["completion"].apply(
lambda x: ("" if x[0] == " " else " ") + x
)
return x
optional_msg = None
optional_fn = None
immediate_msg = None
if df.completion.str[:1].nunique() != 1 or df.completion.values[0][0] != " ":
immediate_msg = "\n- The completion should start with a whitespace character (` `). This tends to produce better results due to the tokenization we use. See https://platform.apacai.com/docs/guides/fine-tuning/preparing-your-dataset for more details"
optional_msg = "Add a whitespace character to the beginning of the completion"
optional_fn = add_space_start
return Remediation(
name="completion_space_start",
immediate_msg=immediate_msg,
optional_msg=optional_msg,
optional_fn=optional_fn,
)
def lower_case_validator(df, column):
"""
This validator will suggest to lowercase the column values, if more than a third of letters are uppercase.
"""
def lower_case(x):
x[column] = x[column].str.lower()
return x
count_upper = (
df[column]
.apply(lambda x: sum(1 for c in x if c.isalpha() and c.isupper()))
.sum()
)
count_lower = (
df[column]
.apply(lambda x: sum(1 for c in x if c.isalpha() and c.islower()))
.sum()
)
if count_upper * 2 > count_lower:
return Remediation(
name="lower_case",
immediate_msg=f"\n- More than a third of your `{column}` column/key is uppercase. Uppercase {column}s tends to perform worse than a mixture of case encountered in normal language. We recommend to lower case the data if that makes sense in your domain. See https://platform.apacai.com/docs/guides/fine-tuning/preparing-your-dataset for more details",
optional_msg=f"Lowercase all your data in column/key `{column}`",
optional_fn=lower_case,
)
def read_any_format(fname, fields=["prompt", "completion"]):
"""
This function will read a file saved in .csv, .json, .txt, .xlsx or .tsv format using pandas.
- for .xlsx it will read the first sheet
- for .txt it will assume completions and split on newline
"""
assert_has_pandas()
remediation = None
necessary_msg = None
immediate_msg = None
error_msg = None
df = None
if os.path.isfile(fname):
try:
if fname.lower().endswith(".csv") or fname.lower().endswith(".tsv"):
file_extension_str, separator = (
("CSV", ",") if fname.lower().endswith(".csv") else ("TSV", "\t")
)
immediate_msg = f"\n- Based on your file extension, your file is formatted as a {file_extension_str} file"
necessary_msg = (
f"Your format `{file_extension_str}` will be converted to `JSONL`"
)
df = pd.read_csv(fname, sep=separator, dtype=str).fillna("")
elif fname.lower().endswith(".xlsx"):
immediate_msg = "\n- Based on your file extension, your file is formatted as an Excel file"
necessary_msg = "Your format `XLSX` will be converted to `JSONL`"
xls = pd.ExcelFile(fname)
sheets = xls.sheet_names
if len(sheets) > 1:
immediate_msg += "\n- Your Excel file contains more than one sheet. Please either save as csv or ensure all data is present in the first sheet. WARNING: Reading only the first sheet..."
df = pd.read_excel(fname, dtype=str).fillna("")
elif fname.lower().endswith(".txt"):
immediate_msg = (
"\n- Based on your file extension, you provided a text file"
)
necessary_msg = "Your format `TXT` will be converted to `JSONL`"
with open(fname, "r") as f:
content = f.read()
df = pd.DataFrame(
[["", line] for line in content.split("\n")],
columns=fields,
dtype=str,
).fillna("")
elif fname.lower().endswith(".jsonl"):
df = pd.read_json(fname, lines=True, dtype=str).fillna("")
if len(df) == 1:
# this is NOT what we expect for a .jsonl file
immediate_msg = "\n- Your JSONL file appears to be in a JSON format. Your file will be converted to JSONL format"
necessary_msg = "Your format `JSON` will be converted to `JSONL`"
df = pd.read_json(fname, dtype=str).fillna("")
else:
pass # this is what we expect for a .jsonl file
elif fname.lower().endswith(".json"):
try:
# to handle case where .json file is actually a .jsonl file
df = pd.read_json(fname, lines=True, dtype=str).fillna("")
if len(df) == 1:
# this code path corresponds to a .json file that has one line
df = pd.read_json(fname, dtype=str).fillna("")
else:
# this is NOT what we expect for a .json file
immediate_msg = "\n- Your JSON file appears to be in a JSONL format. Your file will be converted to JSONL format"
necessary_msg = (
"Your format `JSON` will be converted to `JSONL`"
)
except ValueError:
# this code path corresponds to a .json file that has multiple lines (i.e. it is indented)
df = pd.read_json(fname, dtype=str).fillna("")
else:
error_msg = "Your file must have one of the following extensions: .CSV, .TSV, .XLSX, .TXT, .JSON or .JSONL"
if "." in fname:
error_msg += f" Your file `{fname}` ends with the extension `.{fname.split('.')[-1]}` which is not supported."
else:
error_msg += f" Your file `{fname}` is missing a file extension."
except (ValueError, TypeError):
file_extension_str = fname.split(".")[-1].upper()
error_msg = f"Your file `{fname}` does not appear to be in valid {file_extension_str} format. Please ensure your file is formatted as a valid {file_extension_str} file."
else:
error_msg = f"File {fname} does not exist."
remediation = Remediation(
name="read_any_format",
necessary_msg=necessary_msg,
immediate_msg=immediate_msg,
error_msg=error_msg,
)
return df, remediation
def format_inferrer_validator(df):
"""
This validator will infer the likely fine-tuning format of the data, and display it to the user if it is classification.
It will also suggest to use ada and explain train/validation split benefits.
"""
ft_type = infer_task_type(df)
immediate_msg = None
if ft_type == "classification":
immediate_msg = f"\n- Based on your data it seems like you're trying to fine-tune a model for {ft_type}\n- For classification, we recommend you try one of the faster and cheaper models, such as `ada`\n- For classification, you can estimate the expected model performance by keeping a held out dataset, which is not used for training"
return Remediation(name="num_examples", immediate_msg=immediate_msg)
def apply_necessary_remediation(df, remediation):
"""
This function will apply a necessary remediation to a dataframe, or print an error message if one exists.
"""
if remediation.error_msg is not None:
sys.stderr.write(
f"\n\nERROR in {remediation.name} validator: {remediation.error_msg}\n\nAborting..."
)
sys.exit(1)
if remediation.immediate_msg is not None:
sys.stdout.write(remediation.immediate_msg)
if remediation.necessary_fn is not None:
df = remediation.necessary_fn(df)
return df
def accept_suggestion(input_text, auto_accept):
sys.stdout.write(input_text)
if auto_accept:
sys.stdout.write("Y\n")
return True
return input().lower() != "n"
def apply_optional_remediation(df, remediation, auto_accept):
"""
This function will apply an optional remediation to a dataframe, based on the user input.
"""
optional_applied = False
input_text = f"- [Recommended] {remediation.optional_msg} [Y/n]: "
if remediation.optional_msg is not None:
if accept_suggestion(input_text, auto_accept):
df = remediation.optional_fn(df)
optional_applied = True
if remediation.necessary_msg is not None:
sys.stdout.write(f"- [Necessary] {remediation.necessary_msg}\n")
return df, optional_applied
def estimate_fine_tuning_time(df):
"""
Estimate the time it'll take to fine-tune the dataset
"""
ft_format = infer_task_type(df)
expected_time = 1.0
if ft_format == "classification":
num_examples = len(df)
expected_time = num_examples * 1.44
else:
size = df.memory_usage(index=True).sum()
expected_time = size * 0.0515
def format_time(time):
if time < 60:
return f"{round(time, 2)} seconds"
elif time < 3600:
return f"{round(time / 60, 2)} minutes"
elif time < 86400:
return f"{round(time / 3600, 2)} hours"
else:
return f"{round(time / 86400, 2)} days"
time_string = format_time(expected_time + 140)
sys.stdout.write(
f"Once your model starts training, it'll approximately take {time_string} to train a `curie` model, and less for `ada` and `babbage`. Queue will approximately take half an hour per job ahead of you.\n"
)
def get_outfnames(fname, split):
suffixes = ["_train", "_valid"] if split else [""]
i = 0
while True:
index_suffix = f" ({i})" if i > 0 else ""
candidate_fnames = [
os.path.splitext(fname)[0] + "_prepared" + suffix + index_suffix + ".jsonl"
for suffix in suffixes
]
if not any(os.path.isfile(f) for f in candidate_fnames):
return candidate_fnames
i += 1
def get_classification_hyperparams(df):
n_classes = df.completion.nunique()
pos_class = None
if n_classes == 2:
pos_class = df.completion.value_counts().index[0]
return n_classes, pos_class
def write_out_file(df, fname, any_remediations, auto_accept):
"""
This function will write out a dataframe to a file, if the user would like to proceed, and also offer a fine-tuning command with the newly created file.
For classification it will optionally ask the user if they would like to split the data into train/valid files, and modify the suggested command to include the valid set.
"""
ft_format = infer_task_type(df)
common_prompt_suffix = get_common_xfix(df.prompt, xfix="suffix")
common_completion_suffix = get_common_xfix(df.completion, xfix="suffix")
split = False
input_text = "- [Recommended] Would you like to split into training and validation set? [Y/n]: "
if ft_format == "classification":
if accept_suggestion(input_text, auto_accept):
split = True
additional_params = ""
common_prompt_suffix_new_line_handled = common_prompt_suffix.replace("\n", "\\n")
common_completion_suffix_new_line_handled = common_completion_suffix.replace(
"\n", "\\n"
)
optional_ending_string = (
f' Make sure to include `stop=["{common_completion_suffix_new_line_handled}"]` so that the generated texts ends at the expected place.'
if len(common_completion_suffix_new_line_handled) > 0
else ""
)
input_text = "\n\nYour data will be written to a new JSONL file. Proceed [Y/n]: "
if not any_remediations and not split:
sys.stdout.write(
f'\nYou can use your file for fine-tuning:\n> apacai api fine_tunes.create -t "{fname}"{additional_params}\n\nAfter you’ve fine-tuned a model, remember that your prompt has to end with the indicator string `{common_prompt_suffix_new_line_handled}` for the model to start generating completions, rather than continuing with the prompt.{optional_ending_string}\n'
)
estimate_fine_tuning_time(df)
elif accept_suggestion(input_text, auto_accept):
fnames = get_outfnames(fname, split)
if split:
assert len(fnames) == 2 and "train" in fnames[0] and "valid" in fnames[1]
MAX_VALID_EXAMPLES = 1000
n_train = max(len(df) - MAX_VALID_EXAMPLES, int(len(df) * 0.8))
df_train = df.sample(n=n_train, random_state=42)
df_valid = df.drop(df_train.index)
df_train[["prompt", "completion"]].to_json(
fnames[0], lines=True, orient="records", force_ascii=False
)
df_valid[["prompt", "completion"]].to_json(
fnames[1], lines=True, orient="records", force_ascii=False
)
n_classes, pos_class = get_classification_hyperparams(df)
additional_params += " --compute_classification_metrics"
if n_classes == 2:
additional_params += f' --classification_positive_class "{pos_class}"'
else:
additional_params += f" --classification_n_classes {n_classes}"
else:
assert len(fnames) == 1
df[["prompt", "completion"]].to_json(
fnames[0], lines=True, orient="records", force_ascii=False
)
# Add -v VALID_FILE if we split the file into train / valid
files_string = ("s" if split else "") + " to `" + ("` and `".join(fnames))
valid_string = f' -v "{fnames[1]}"' if split else ""
separator_reminder = (
""
if len(common_prompt_suffix_new_line_handled) == 0
else f"After you’ve fine-tuned a model, remember that your prompt has to end with the indicator string `{common_prompt_suffix_new_line_handled}` for the model to start generating completions, rather than continuing with the prompt."
)
sys.stdout.write(
f'\nWrote modified file{files_string}`\nFeel free to take a look!\n\nNow use that file when fine-tuning:\n> apacai api fine_tunes.create -t "{fnames[0]}"{valid_string}{additional_params}\n\n{separator_reminder}{optional_ending_string}\n'
)
estimate_fine_tuning_time(df)
else:
sys.stdout.write("Aborting... did not write the file\n")
def infer_task_type(df):
"""
Infer the likely fine-tuning task type from the data
"""
CLASSIFICATION_THRESHOLD = 3 # min_average instances of each class
if sum(df.prompt.str.len()) == 0:
return "open-ended generation"
if len(df.completion.unique()) < len(df) / CLASSIFICATION_THRESHOLD:
return "classification"
return "conditional generation"
def get_common_xfix(series, xfix="suffix"):
"""
Finds the longest common suffix or prefix of all the values in a series
"""
common_xfix = ""
while True:
common_xfixes = (
series.str[-(len(common_xfix) + 1) :]
if xfix == "suffix"
else series.str[: len(common_xfix) + 1]
) # first few or last few characters
if (
common_xfixes.nunique() != 1
): # we found the character at which we don't have a unique xfix anymore
break
elif (
common_xfix == common_xfixes.values[0]
): # the entire first row is a prefix of every other row
break
else: # the first or last few characters are still common across all rows - let's try to add one more
common_xfix = common_xfixes.values[0]
return common_xfix
def get_validators():
return [
num_examples_validator,
lambda x: necessary_column_validator(x, "prompt"),
lambda x: necessary_column_validator(x, "completion"),
additional_column_validator,
non_empty_field_validator,
format_inferrer_validator,
duplicated_rows_validator,
long_examples_validator,
lambda x: lower_case_validator(x, "prompt"),
lambda x: lower_case_validator(x, "completion"),
common_prompt_suffix_validator,
common_prompt_prefix_validator,
common_completion_prefix_validator,
common_completion_suffix_validator,
completions_space_start_validator,
]
def apply_validators(
df,
fname,
remediation,
validators,
auto_accept,
write_out_file_func,
):
optional_remediations = []
if remediation is not None:
optional_remediations.append(remediation)
for validator in validators:
remediation = validator(df)
if remediation is not None:
optional_remediations.append(remediation)
df = apply_necessary_remediation(df, remediation)
any_optional_or_necessary_remediations = any(
[
remediation
for remediation in optional_remediations
if remediation.optional_msg is not None
or remediation.necessary_msg is not None
]
)
any_necessary_applied = any(
[
remediation
for remediation in optional_remediations
if remediation.necessary_msg is not None
]
)
any_optional_applied = False
if any_optional_or_necessary_remediations:
sys.stdout.write(
"\n\nBased on the analysis we will perform the following actions:\n"
)
for remediation in optional_remediations:
df, optional_applied = apply_optional_remediation(
df, remediation, auto_accept
)
any_optional_applied = any_optional_applied or optional_applied
else:
sys.stdout.write("\n\nNo remediations found.\n")
any_optional_or_necessary_applied = any_optional_applied or any_necessary_applied
write_out_file_func(df, fname, any_optional_or_necessary_applied, auto_accept)
| APACAI-API-main | apacai/validators.py |
import io
class CancelledError(Exception):
def __init__(self, msg):
self.msg = msg
Exception.__init__(self, msg)
def __str__(self):
return self.msg
__repr__ = __str__
class BufferReader(io.BytesIO):
def __init__(self, buf=b"", desc=None):
self._len = len(buf)
io.BytesIO.__init__(self, buf)
self._progress = 0
self._callback = progress(len(buf), desc=desc)
def __len__(self):
return self._len
def read(self, n=-1):
chunk = io.BytesIO.read(self, n)
self._progress += len(chunk)
if self._callback:
try:
self._callback(self._progress)
except Exception as e: # catches exception from the callback
raise CancelledError("The upload was cancelled: {}".format(e))
return chunk
def progress(total, desc):
import tqdm # type: ignore
meter = tqdm.tqdm(total=total, unit_scale=True, desc=desc)
def incr(progress):
meter.n = progress
if progress == total:
meter.close()
else:
meter.refresh()
return incr
def MB(i):
return int(i // 1024**2)
| APACAI-API-main | apacai/upload_progress.py |
#!/usr/bin/env python
import argparse
import logging
import sys
import apacai
from apacai import version
from apacai.cli import api_register, display_error, tools_register, wandb_register
logger = logging.getLogger()
formatter = logging.Formatter("[%(asctime)s] %(message)s")
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
logger.addHandler(handler)
def main():
parser = argparse.ArgumentParser(description=None)
parser.add_argument(
"-V",
"--version",
action="version",
version="%(prog)s " + version.VERSION,
)
parser.add_argument(
"-v",
"--verbose",
action="count",
dest="verbosity",
default=0,
help="Set verbosity.",
)
parser.add_argument("-b", "--api-base", help="What API base url to use.")
parser.add_argument("-k", "--api-key", help="What API key to use.")
parser.add_argument("-p", "--proxy", nargs='+', help="What proxy to use.")
parser.add_argument(
"-o",
"--organization",
help="Which organization to run as (will use your default organization if not specified)",
)
def help(args):
parser.print_help()
parser.set_defaults(func=help)
subparsers = parser.add_subparsers()
sub_api = subparsers.add_parser("api", help="Direct API calls")
sub_tools = subparsers.add_parser("tools", help="Client side tools for convenience")
sub_wandb = subparsers.add_parser("wandb", help="Logging with Weights & Biases")
api_register(sub_api)
tools_register(sub_tools)
wandb_register(sub_wandb)
args = parser.parse_args()
if args.verbosity == 1:
logger.setLevel(logging.INFO)
elif args.verbosity >= 2:
logger.setLevel(logging.DEBUG)
apacai.debug = True
if args.api_key is not None:
apacai.api_key = args.api_key
if args.api_base is not None:
apacai.api_base = args.api_base
if args.organization is not None:
apacai.organization = args.organization
if args.proxy is not None:
apacai.proxy = {}
for proxy in args.proxy:
if proxy.startswith('https'):
apacai.proxy['https'] = proxy
elif proxy.startswith('http'):
apacai.proxy['http'] = proxy
try:
args.func(args)
except apacai.error.ApacAIError as e:
display_error(e)
return 1
except KeyboardInterrupt:
sys.stderr.write("\n")
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
| APACAI-API-main | apacai/_apacai_scripts.py |
from apacai import api_resources
from apacai.api_resources.experimental.completion_config import CompletionConfig
OBJECT_CLASSES = {
"engine": api_resources.Engine,
"experimental.completion_config": CompletionConfig,
"file": api_resources.File,
"fine-tune": api_resources.FineTune,
"model": api_resources.Model,
"deployment": api_resources.Deployment,
}
| APACAI-API-main | apacai/object_classes.py |
# APACAI Python bindings.
#
# Originally forked from the MIT-licensed Stripe Python bindings.
import os
import sys
from typing import TYPE_CHECKING, Optional, Union, Callable
from contextvars import ContextVar
if "pkg_resources" not in sys.modules:
# workaround for the following:
# https://github.com/benoitc/gunicorn/pull/2539
sys.modules["pkg_resources"] = object() # type: ignore[assignment]
import aiohttp
del sys.modules["pkg_resources"]
from apacai.api_resources import (
Audio,
ChatCompletion,
Completion,
Customer,
Deployment,
Edit,
Embedding,
Engine,
ErrorObject,
File,
FineTune,
Image,
Model,
Moderation,
)
from apacai.error import APIError, InvalidRequestError, ApacAIError
from apacai.version import VERSION
if TYPE_CHECKING:
import requests
from aiohttp import ClientSession
api_key = os.environ.get("APACAI_API_KEY")
# Path of a file with an API key, whose contents can change. Supercedes
# `api_key` if set. The main use case is volume-mounted Kubernetes secrets,
# which are updated automatically.
api_key_path: Optional[str] = os.environ.get("APACAI_API_KEY_PATH")
organization = os.environ.get("APACAI_ORGANIZATION")
api_base = os.environ.get("APACAI_API_BASE", "https://api.apacai.com/v1")
api_type = os.environ.get("APACAI_API_TYPE", "open_ai")
api_version = os.environ.get(
"APACAI_API_VERSION",
("2023-05-15" if api_type in ("azure", "azure_ad", "azuread") else None),
)
verify_ssl_certs = True # No effect. Certificates are always verified.
proxy = None
app_info = None
enable_telemetry = False # Ignored; the telemetry feature was removed.
ca_bundle_path = None # No longer used, feature was removed
debug = False
log = None # Set to either 'debug' or 'info', controls console logging
requestssession: Optional[
Union["requests.Session", Callable[[], "requests.Session"]]
] = None # Provide a requests.Session or Session factory.
aiosession: ContextVar[Optional["ClientSession"]] = ContextVar(
"aiohttp-session", default=None
) # Acts as a global aiohttp ClientSession that reuses connections.
# This is user-supplied; otherwise, a session is remade for each request.
__version__ = VERSION
__all__ = [
"APIError",
"Audio",
"ChatCompletion",
"Completion",
"Customer",
"Edit",
"Image",
"Deployment",
"Embedding",
"Engine",
"ErrorObject",
"File",
"FineTune",
"InvalidRequestError",
"Model",
"Moderation",
"ApacAIError",
"api_base",
"api_key",
"api_type",
"api_key_path",
"api_version",
"app_info",
"ca_bundle_path",
"debug",
"enable_telemetry",
"log",
"organization",
"proxy",
"verify_ssl_certs",
]
| APACAI-API-main | apacai/__init__.py |
import asyncio
import json
import time
import platform
import sys
import threading
import time
import warnings
from contextlib import asynccontextmanager
from json import JSONDecodeError
from typing import (
AsyncGenerator,
AsyncIterator,
Callable,
Dict,
Iterator,
Optional,
Tuple,
Union,
overload,
)
from urllib.parse import urlencode, urlsplit, urlunsplit
import aiohttp
import requests
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
import apacai
from apacai import error, util, version
from apacai.apacai_response import ApacAIResponse
from apacai.util import ApiType
TIMEOUT_SECS = 600
MAX_SESSION_LIFETIME_SECS = 180
MAX_CONNECTION_RETRIES = 2
# Has one attribute per thread, 'session'.
_thread_context = threading.local()
def _build_api_url(url, query):
scheme, netloc, path, base_query, fragment = urlsplit(url)
if base_query:
query = "%s&%s" % (base_query, query)
return urlunsplit((scheme, netloc, path, query, fragment))
def _requests_proxies_arg(proxy) -> Optional[Dict[str, str]]:
"""Returns a value suitable for the 'proxies' argument to 'requests.request."""
if proxy is None:
return None
elif isinstance(proxy, str):
return {"http": proxy, "https": proxy}
elif isinstance(proxy, dict):
return proxy.copy()
else:
raise ValueError(
"'apacai.proxy' must be specified as either a string URL or a dict with string URL under the https and/or http keys."
)
def _aiohttp_proxies_arg(proxy) -> Optional[str]:
"""Returns a value suitable for the 'proxies' argument to 'aiohttp.ClientSession.request."""
if proxy is None:
return None
elif isinstance(proxy, str):
return proxy
elif isinstance(proxy, dict):
return proxy["https"] if "https" in proxy else proxy["http"]
else:
raise ValueError(
"'apacai.proxy' must be specified as either a string URL or a dict with string URL under the https and/or http keys."
)
def _make_session() -> requests.Session:
if apacai.requestssession:
if isinstance(apacai.requestssession, requests.Session):
return apacai.requestssession
return apacai.requestssession()
if not apacai.verify_ssl_certs:
warnings.warn("verify_ssl_certs is ignored; apacai always verifies.")
s = requests.Session()
proxies = _requests_proxies_arg(apacai.proxy)
if proxies:
s.proxies = proxies
s.mount(
"https://",
requests.adapters.HTTPAdapter(max_retries=MAX_CONNECTION_RETRIES),
)
return s
def parse_stream_helper(line: bytes) -> Optional[str]:
if line:
if line.strip() == b"data: [DONE]":
# return here will cause GeneratorExit exception in urllib3
# and it will close http connection with TCP Reset
return None
if line.startswith(b"data: "):
line = line[len(b"data: "):]
return line.decode("utf-8")
else:
return None
return None
def parse_stream(rbody: Iterator[bytes]) -> Iterator[str]:
for line in rbody:
_line = parse_stream_helper(line)
if _line is not None:
yield _line
async def parse_stream_async(rbody: aiohttp.StreamReader):
async for line in rbody:
_line = parse_stream_helper(line)
if _line is not None:
yield _line
class APIRequestor:
def __init__(
self,
key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
):
self.api_base = api_base or apacai.api_base
self.api_key = key or util.default_api_key()
self.api_type = (
ApiType.from_str(api_type)
if api_type
else ApiType.from_str(apacai.api_type)
)
self.api_version = api_version or apacai.api_version
self.organization = organization or apacai.organization
@classmethod
def format_app_info(cls, info):
str = info["name"]
if info["version"]:
str += "/%s" % (info["version"],)
if info["url"]:
str += " (%s)" % (info["url"],)
return str
def _check_polling_response(self, response: ApacAIResponse, predicate: Callable[[ApacAIResponse], bool]):
if not predicate(response):
return
error_data = response.data['error']
message = error_data.get('message', 'Operation failed')
code = error_data.get('code')
raise error.ApacAIError(message=message, code=code)
def _poll(
self,
method,
url,
until,
failed,
params = None,
headers = None,
interval = None,
delay = None
) -> Tuple[Iterator[ApacAIResponse], bool, str]:
if delay:
time.sleep(delay)
response, b, api_key = self.request(method, url, params, headers)
self._check_polling_response(response, failed)
start_time = time.time()
while not until(response):
if time.time() - start_time > TIMEOUT_SECS:
raise error.Timeout("Operation polling timed out.")
time.sleep(interval or response.retry_after or 10)
response, b, api_key = self.request(method, url, params, headers)
self._check_polling_response(response, failed)
response.data = response.data['result']
return response, b, api_key
async def _apoll(
self,
method,
url,
until,
failed,
params = None,
headers = None,
interval = None,
delay = None
) -> Tuple[Iterator[ApacAIResponse], bool, str]:
if delay:
await asyncio.sleep(delay)
response, b, api_key = await self.arequest(method, url, params, headers)
self._check_polling_response(response, failed)
start_time = time.time()
while not until(response):
if time.time() - start_time > TIMEOUT_SECS:
raise error.Timeout("Operation polling timed out.")
await asyncio.sleep(interval or response.retry_after or 10)
response, b, api_key = await self.arequest(method, url, params, headers)
self._check_polling_response(response, failed)
response.data = response.data['result']
return response, b, api_key
@overload
def request(
self,
method,
url,
params,
headers,
files,
stream: Literal[True],
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[Iterator[ApacAIResponse], bool, str]:
pass
@overload
def request(
self,
method,
url,
params=...,
headers=...,
files=...,
*,
stream: Literal[True],
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[Iterator[ApacAIResponse], bool, str]:
pass
@overload
def request(
self,
method,
url,
params=...,
headers=...,
files=...,
stream: Literal[False] = ...,
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[ApacAIResponse, bool, str]:
pass
@overload
def request(
self,
method,
url,
params=...,
headers=...,
files=...,
stream: bool = ...,
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[Union[ApacAIResponse, Iterator[ApacAIResponse]], bool, str]:
pass
def request(
self,
method,
url,
params=None,
headers=None,
files=None,
stream: bool = False,
request_id: Optional[str] = None,
request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
) -> Tuple[Union[ApacAIResponse, Iterator[ApacAIResponse]], bool, str]:
result = self.request_raw(
method.lower(),
url,
params=params,
supplied_headers=headers,
files=files,
stream=stream,
request_id=request_id,
request_timeout=request_timeout,
)
resp, got_stream = self._interpret_response(result, stream)
return resp, got_stream, self.api_key
@overload
async def arequest(
self,
method,
url,
params,
headers,
files,
stream: Literal[True],
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[AsyncGenerator[ApacAIResponse, None], bool, str]:
pass
@overload
async def arequest(
self,
method,
url,
params=...,
headers=...,
files=...,
*,
stream: Literal[True],
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[AsyncGenerator[ApacAIResponse, None], bool, str]:
pass
@overload
async def arequest(
self,
method,
url,
params=...,
headers=...,
files=...,
stream: Literal[False] = ...,
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[ApacAIResponse, bool, str]:
pass
@overload
async def arequest(
self,
method,
url,
params=...,
headers=...,
files=...,
stream: bool = ...,
request_id: Optional[str] = ...,
request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,
) -> Tuple[Union[ApacAIResponse, AsyncGenerator[ApacAIResponse, None]], bool, str]:
pass
async def arequest(
self,
method,
url,
params=None,
headers=None,
files=None,
stream: bool = False,
request_id: Optional[str] = None,
request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
) -> Tuple[Union[ApacAIResponse, AsyncGenerator[ApacAIResponse, None]], bool, str]:
ctx = aiohttp_session()
session = await ctx.__aenter__()
try:
result = await self.arequest_raw(
method.lower(),
url,
session,
params=params,
supplied_headers=headers,
files=files,
request_id=request_id,
request_timeout=request_timeout,
)
resp, got_stream = await self._interpret_async_response(result, stream)
except Exception:
await ctx.__aexit__(None, None, None)
raise
if got_stream:
async def wrap_resp():
assert isinstance(resp, AsyncGenerator)
try:
async for r in resp:
yield r
finally:
await ctx.__aexit__(None, None, None)
return wrap_resp(), got_stream, self.api_key
else:
await ctx.__aexit__(None, None, None)
return resp, got_stream, self.api_key
def handle_error_response(self, rbody, rcode, resp, rheaders, stream_error=False):
try:
error_data = resp["error"]
except (KeyError, TypeError):
raise error.APIError(
"Invalid response object from API: %r (HTTP response code "
"was %d)" % (rbody, rcode),
rbody,
rcode,
resp,
)
if "internal_message" in error_data:
error_data["message"] += "\n\n" + error_data["internal_message"]
util.log_info(
"APACAI API error received",
error_code=error_data.get("code"),
error_type=error_data.get("type"),
error_message=error_data.get("message"),
error_param=error_data.get("param"),
stream_error=stream_error,
)
# Rate limits were previously coded as 400's with code 'rate_limit'
if rcode == 429:
return error.RateLimitError(
error_data.get("message"), rbody, rcode, resp, rheaders
)
elif rcode in [400, 404, 415]:
return error.InvalidRequestError(
error_data.get("message"),
error_data.get("param"),
error_data.get("code"),
rbody,
rcode,
resp,
rheaders,
)
elif rcode == 401:
return error.AuthenticationError(
error_data.get("message"), rbody, rcode, resp, rheaders
)
elif rcode == 403:
return error.PermissionError(
error_data.get("message"), rbody, rcode, resp, rheaders
)
elif rcode == 409:
return error.TryAgain(
error_data.get("message"), rbody, rcode, resp, rheaders
)
elif stream_error:
# TODO: we will soon attach status codes to stream errors
parts = [error_data.get("message"), "(Error occurred while streaming.)"]
message = " ".join([p for p in parts if p is not None])
return error.APIError(message, rbody, rcode, resp, rheaders)
else:
return error.APIError(
f"{error_data.get('message')} {rbody} {rcode} {resp} {rheaders}",
rbody,
rcode,
resp,
rheaders,
)
def request_headers(
self, method: str, extra, request_id: Optional[str]
) -> Dict[str, str]:
user_agent = "APACAI/v1 PythonBindings/%s" % (version.VERSION,)
if apacai.app_info:
user_agent += " " + self.format_app_info(apacai.app_info)
uname_without_node = " ".join(
v for k, v in platform.uname()._asdict().items() if k != "node"
)
ua = {
"bindings_version": version.VERSION,
"httplib": "requests",
"lang": "python",
"lang_version": platform.python_version(),
"platform": platform.platform(),
"publisher": "apacai",
"uname": uname_without_node,
}
if apacai.app_info:
ua["application"] = apacai.app_info
headers = {
"X-APACAI-Client-User-Agent": json.dumps(ua),
"User-Agent": user_agent,
}
headers.update(util.api_key_to_header(self.api_type, self.api_key))
if self.organization:
headers["APACAI-Organization"] = self.organization
if self.api_version is not None and self.api_type == ApiType.OPEN_AI:
headers["APACAI-Version"] = self.api_version
if request_id is not None:
headers["X-Request-Id"] = request_id
if apacai.debug:
headers["APACAI-Debug"] = "true"
headers.update(extra)
return headers
def _validate_headers(
self, supplied_headers: Optional[Dict[str, str]]
) -> Dict[str, str]:
headers: Dict[str, str] = {}
if supplied_headers is None:
return headers
if not isinstance(supplied_headers, dict):
raise TypeError("Headers must be a dictionary")
for k, v in supplied_headers.items():
if not isinstance(k, str):
raise TypeError("Header keys must be strings")
if not isinstance(v, str):
raise TypeError("Header values must be strings")
headers[k] = v
# NOTE: It is possible to do more validation of the headers, but a request could always
# be made to the API manually with invalid headers, so we need to handle them server side.
return headers
def _prepare_request_raw(
self,
url,
supplied_headers,
method,
params,
files,
request_id: Optional[str],
) -> Tuple[str, Dict[str, str], Optional[bytes]]:
abs_url = "%s%s" % (self.api_base, url)
headers = self._validate_headers(supplied_headers)
data = None
if method == "get" or method == "delete":
if params:
encoded_params = urlencode(
[(k, v) for k, v in params.items() if v is not None]
)
abs_url = _build_api_url(abs_url, encoded_params)
elif method in {"post", "put"}:
if params and files:
data = params
if params and not files:
data = json.dumps(params).encode()
headers["Content-Type"] = "application/json"
else:
raise error.APIConnectionError(
"Unrecognized HTTP method %r. This may indicate a bug in the "
"APACAI bindings. Please contact us through our help center at help.apacai.com for "
"assistance." % (method,)
)
headers = self.request_headers(method, headers, request_id)
util.log_debug("Request to APACAI API", method=method, path=abs_url)
util.log_debug("Post details", data=data, api_version=self.api_version)
return abs_url, headers, data
def request_raw(
self,
method,
url,
*,
params=None,
supplied_headers: Optional[Dict[str, str]] = None,
files=None,
stream: bool = False,
request_id: Optional[str] = None,
request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
) -> requests.Response:
abs_url, headers, data = self._prepare_request_raw(
url, supplied_headers, method, params, files, request_id
)
if not hasattr(_thread_context, "session"):
_thread_context.session = _make_session()
_thread_context.session_create_time = time.time()
elif (
time.time() - getattr(_thread_context, "session_create_time", 0)
>= MAX_SESSION_LIFETIME_SECS
):
_thread_context.session.close()
_thread_context.session = _make_session()
_thread_context.session_create_time = time.time()
try:
result = _thread_context.session.request(
method,
abs_url,
headers=headers,
data=data,
files=files,
stream=stream,
timeout=request_timeout if request_timeout else TIMEOUT_SECS,
proxies=_thread_context.session.proxies,
)
except requests.exceptions.Timeout as e:
raise error.Timeout("Request timed out: {}".format(e)) from e
except requests.exceptions.RequestException as e:
raise error.APIConnectionError(
"Error communicating with APACAI: {}".format(e)
) from e
util.log_debug(
"APACAI API response",
path=abs_url,
response_code=result.status_code,
processing_ms=result.headers.get("APACAI-Processing-Ms"),
request_id=result.headers.get("X-Request-Id"),
)
# Don't read the whole stream for debug logging unless necessary.
if apacai.log == "debug":
util.log_debug(
"API response body", body=result.content, headers=result.headers
)
return result
async def arequest_raw(
self,
method,
url,
session,
*,
params=None,
supplied_headers: Optional[Dict[str, str]] = None,
files=None,
request_id: Optional[str] = None,
request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
) -> aiohttp.ClientResponse:
abs_url, headers, data = self._prepare_request_raw(
url, supplied_headers, method, params, files, request_id
)
if isinstance(request_timeout, tuple):
timeout = aiohttp.ClientTimeout(
connect=request_timeout[0],
total=request_timeout[1],
)
else:
timeout = aiohttp.ClientTimeout(
total=request_timeout if request_timeout else TIMEOUT_SECS
)
if files:
# TODO: Use `aiohttp.MultipartWriter` to create the multipart form data here.
# For now we use the private `requests` method that is known to have worked so far.
data, content_type = requests.models.RequestEncodingMixin._encode_files( # type: ignore
files, data
)
headers["Content-Type"] = content_type
request_kwargs = {
"method": method,
"url": abs_url,
"headers": headers,
"data": data,
"proxy": _aiohttp_proxies_arg(apacai.proxy),
"timeout": timeout,
}
try:
result = await session.request(**request_kwargs)
util.log_info(
"APACAI API response",
path=abs_url,
response_code=result.status,
processing_ms=result.headers.get("APACAI-Processing-Ms"),
request_id=result.headers.get("X-Request-Id"),
)
# Don't read the whole stream for debug logging unless necessary.
if apacai.log == "debug":
util.log_debug(
"API response body", body=result.content, headers=result.headers
)
return result
except (aiohttp.ServerTimeoutError, asyncio.TimeoutError) as e:
raise error.Timeout("Request timed out") from e
except aiohttp.ClientError as e:
raise error.APIConnectionError("Error communicating with APACAI") from e
def _interpret_response(
self, result: requests.Response, stream: bool
) -> Tuple[Union[ApacAIResponse, Iterator[ApacAIResponse]], bool]:
"""Returns the response(s) and a bool indicating whether it is a stream."""
if stream and "text/event-stream" in result.headers.get("Content-Type", ""):
return (
self._interpret_response_line(
line, result.status_code, result.headers, stream=True
)
for line in parse_stream(result.iter_lines())
), True
else:
return (
self._interpret_response_line(
result.content.decode("utf-8"),
result.status_code,
result.headers,
stream=False,
),
False,
)
async def _interpret_async_response(
self, result: aiohttp.ClientResponse, stream: bool
) -> Tuple[Union[ApacAIResponse, AsyncGenerator[ApacAIResponse, None]], bool]:
"""Returns the response(s) and a bool indicating whether it is a stream."""
if stream and "text/event-stream" in result.headers.get("Content-Type", ""):
return (
self._interpret_response_line(
line, result.status, result.headers, stream=True
)
async for line in parse_stream_async(result.content)
), True
else:
try:
await result.read()
except (aiohttp.ServerTimeoutError, asyncio.TimeoutError) as e:
raise error.Timeout("Request timed out") from e
except aiohttp.ClientError as e:
util.log_warn(e, body=result.content)
return (
self._interpret_response_line(
(await result.read()).decode("utf-8"),
result.status,
result.headers,
stream=False,
),
False,
)
def _interpret_response_line(
self, rbody: str, rcode: int, rheaders, stream: bool
) -> ApacAIResponse:
# HTTP 204 response code does not have any content in the body.
if rcode == 204:
return ApacAIResponse(None, rheaders)
if rcode == 503:
raise error.ServiceUnavailableError(
"The server is overloaded or not ready yet.",
rbody,
rcode,
headers=rheaders,
)
try:
if 'text/plain' in rheaders.get('Content-Type', ''):
data = rbody
else:
data = json.loads(rbody)
except (JSONDecodeError, UnicodeDecodeError) as e:
raise error.APIError(
f"HTTP code {rcode} from API ({rbody})", rbody, rcode, headers=rheaders
) from e
resp = ApacAIResponse(data, rheaders)
# In the future, we might add a "status" parameter to errors
# to better handle the "error while streaming" case.
stream_error = stream and "error" in resp.data
if stream_error or not 200 <= rcode < 300:
raise self.handle_error_response(
rbody, rcode, resp.data, rheaders, stream_error=stream_error
)
return resp
@asynccontextmanager
async def aiohttp_session() -> AsyncIterator[aiohttp.ClientSession]:
user_set_session = apacai.aiosession.get()
if user_set_session:
yield user_set_session
else:
async with aiohttp.ClientSession() as session:
yield session
| APACAI-API-main | apacai/api_requestor.py |
import datetime
import os
import signal
import sys
import warnings
from typing import Optional
import requests
import apacai
from apacai.upload_progress import BufferReader
from apacai.validators import (
apply_necessary_remediation,
apply_validators,
get_validators,
read_any_format,
write_out_file,
)
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def organization_info(obj):
organization = getattr(obj, "organization", None)
if organization is not None:
return "[organization={}] ".format(organization)
else:
return ""
def display(obj):
sys.stderr.write(organization_info(obj))
sys.stderr.flush()
print(obj)
def display_error(e):
extra = (
" (HTTP status code: {})".format(e.http_status)
if e.http_status is not None
else ""
)
sys.stderr.write(
"{}{}Error:{} {}{}\n".format(
organization_info(e), bcolors.FAIL, bcolors.ENDC, e, extra
)
)
class Engine:
@classmethod
def get(cls, args):
engine = apacai.Engine.retrieve(id=args.id)
display(engine)
@classmethod
def update(cls, args):
engine = apacai.Engine.modify(args.id, replicas=args.replicas)
display(engine)
@classmethod
def generate(cls, args):
warnings.warn(
"Engine.generate is deprecated, use Completion.create", DeprecationWarning
)
if args.completions and args.completions > 1 and args.stream:
raise ValueError("Can't stream multiple completions with apacai CLI")
kwargs = {}
if args.model is not None:
kwargs["model"] = args.model
resp = apacai.Engine(id=args.id).generate(
completions=args.completions,
context=args.context,
length=args.length,
stream=args.stream,
temperature=args.temperature,
top_p=args.top_p,
logprobs=args.logprobs,
stop=args.stop,
**kwargs,
)
if not args.stream:
resp = [resp]
for part in resp:
completions = len(part["data"])
for c_idx, c in enumerate(part["data"]):
if completions > 1:
sys.stdout.write("===== Completion {} =====\n".format(c_idx))
sys.stdout.write("".join(c["text"]))
if completions > 1:
sys.stdout.write("\n")
sys.stdout.flush()
@classmethod
def list(cls, args):
engines = apacai.Engine.list()
display(engines)
class ChatCompletion:
@classmethod
def create(cls, args):
if args.n is not None and args.n > 1 and args.stream:
raise ValueError(
"Can't stream chat completions with n>1 with the current CLI"
)
messages = [
{"role": role, "content": content} for role, content in args.message
]
resp = apacai.ChatCompletion.create(
# Required
model=args.model,
engine=args.engine,
messages=messages,
# Optional
n=args.n,
max_tokens=args.max_tokens,
temperature=args.temperature,
top_p=args.top_p,
stop=args.stop,
stream=args.stream,
)
if not args.stream:
resp = [resp]
for part in resp:
choices = part["choices"]
for c_idx, c in enumerate(sorted(choices, key=lambda s: s["index"])):
if len(choices) > 1:
sys.stdout.write("===== Chat Completion {} =====\n".format(c_idx))
if args.stream:
delta = c["delta"]
if "content" in delta:
sys.stdout.write(delta["content"])
else:
sys.stdout.write(c["message"]["content"])
if len(choices) > 1: # not in streams
sys.stdout.write("\n")
sys.stdout.flush()
class Completion:
@classmethod
def create(cls, args):
if args.n is not None and args.n > 1 and args.stream:
raise ValueError("Can't stream completions with n>1 with the current CLI")
if args.engine and args.model:
warnings.warn(
"In most cases, you should not be specifying both engine and model."
)
resp = apacai.Completion.create(
engine=args.engine,
model=args.model,
n=args.n,
max_tokens=args.max_tokens,
logprobs=args.logprobs,
prompt=args.prompt,
stream=args.stream,
temperature=args.temperature,
top_p=args.top_p,
stop=args.stop,
echo=True,
)
if not args.stream:
resp = [resp]
for part in resp:
choices = part["choices"]
for c_idx, c in enumerate(sorted(choices, key=lambda s: s["index"])):
if len(choices) > 1:
sys.stdout.write("===== Completion {} =====\n".format(c_idx))
sys.stdout.write(c["text"])
if len(choices) > 1:
sys.stdout.write("\n")
sys.stdout.flush()
class Deployment:
@classmethod
def get(cls, args):
resp = apacai.Deployment.retrieve(id=args.id)
print(resp)
@classmethod
def delete(cls, args):
model = apacai.Deployment.delete(args.id)
print(model)
@classmethod
def list(cls, args):
models = apacai.Deployment.list()
print(models)
@classmethod
def create(cls, args):
models = apacai.Deployment.create(
model=args.model, scale_settings={"scale_type": args.scale_type}
)
print(models)
class Model:
@classmethod
def get(cls, args):
resp = apacai.Model.retrieve(id=args.id)
print(resp)
@classmethod
def delete(cls, args):
model = apacai.Model.delete(args.id)
print(model)
@classmethod
def list(cls, args):
models = apacai.Model.list()
print(models)
class File:
@classmethod
def create(cls, args):
with open(args.file, "rb") as file_reader:
buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
resp = apacai.File.create(
file=buffer_reader,
purpose=args.purpose,
user_provided_filename=args.file,
)
print(resp)
@classmethod
def get(cls, args):
resp = apacai.File.retrieve(id=args.id)
print(resp)
@classmethod
def delete(cls, args):
file = apacai.File.delete(args.id)
print(file)
@classmethod
def list(cls, args):
file = apacai.File.list()
print(file)
class Image:
@classmethod
def create(cls, args):
resp = apacai.Image.create(
prompt=args.prompt,
size=args.size,
n=args.num_images,
response_format=args.response_format,
)
print(resp)
@classmethod
def create_variation(cls, args):
with open(args.image, "rb") as file_reader:
buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
resp = apacai.Image.create_variation(
image=buffer_reader,
size=args.size,
n=args.num_images,
response_format=args.response_format,
)
print(resp)
@classmethod
def create_edit(cls, args):
with open(args.image, "rb") as file_reader:
image_reader = BufferReader(file_reader.read(), desc="Upload progress")
mask_reader = None
if args.mask is not None:
with open(args.mask, "rb") as file_reader:
mask_reader = BufferReader(file_reader.read(), desc="Upload progress")
resp = apacai.Image.create_edit(
image=image_reader,
mask=mask_reader,
prompt=args.prompt,
size=args.size,
n=args.num_images,
response_format=args.response_format,
)
print(resp)
class Audio:
@classmethod
def transcribe(cls, args):
with open(args.file, "rb") as r:
file_reader = BufferReader(r.read(), desc="Upload progress")
resp = apacai.Audio.transcribe_raw(
# Required
model=args.model,
file=file_reader,
filename=args.file,
# Optional
response_format=args.response_format,
language=args.language,
temperature=args.temperature,
prompt=args.prompt,
)
print(resp)
@classmethod
def translate(cls, args):
with open(args.file, "rb") as r:
file_reader = BufferReader(r.read(), desc="Upload progress")
resp = apacai.Audio.translate_raw(
# Required
model=args.model,
file=file_reader,
filename=args.file,
# Optional
response_format=args.response_format,
language=args.language,
temperature=args.temperature,
prompt=args.prompt,
)
print(resp)
class FineTune:
@classmethod
def list(cls, args):
resp = apacai.FineTune.list()
print(resp)
@classmethod
def _is_url(cls, file: str):
return file.lower().startswith("http")
@classmethod
def _download_file_from_public_url(cls, url: str) -> Optional[bytes]:
resp = requests.get(url)
if resp.status_code == 200:
return resp.content
else:
return None
@classmethod
def _maybe_upload_file(
cls,
file: Optional[str] = None,
content: Optional[bytes] = None,
user_provided_file: Optional[str] = None,
check_if_file_exists: bool = True,
):
# Exactly one of `file` or `content` must be provided
if (file is None) == (content is None):
raise ValueError("Exactly one of `file` or `content` must be provided")
if content is None:
assert file is not None
with open(file, "rb") as f:
content = f.read()
if check_if_file_exists:
bytes = len(content)
matching_files = apacai.File.find_matching_files(
name=user_provided_file or f.name, bytes=bytes, purpose="fine-tune"
)
if len(matching_files) > 0:
file_ids = [f["id"] for f in matching_files]
sys.stdout.write(
"Found potentially duplicated files with name '{name}', purpose 'fine-tune' and size {size} bytes\n".format(
name=os.path.basename(matching_files[0]["filename"]),
size=matching_files[0]["bytes"]
if "bytes" in matching_files[0]
else matching_files[0]["size"],
)
)
sys.stdout.write("\n".join(file_ids))
while True:
sys.stdout.write(
"\nEnter file ID to reuse an already uploaded file, or an empty string to upload this file anyway: "
)
inp = sys.stdin.readline().strip()
if inp in file_ids:
sys.stdout.write(
"Reusing already uploaded file: {id}\n".format(id=inp)
)
return inp
elif inp == "":
break
else:
sys.stdout.write(
"File id '{id}' is not among the IDs of the potentially duplicated files\n".format(
id=inp
)
)
buffer_reader = BufferReader(content, desc="Upload progress")
resp = apacai.File.create(
file=buffer_reader,
purpose="fine-tune",
user_provided_filename=user_provided_file or file,
)
sys.stdout.write(
"Uploaded file from {file}: {id}\n".format(
file=user_provided_file or file, id=resp["id"]
)
)
return resp["id"]
@classmethod
def _get_or_upload(cls, file, check_if_file_exists=True):
try:
# 1. If it's a valid file, use it
apacai.File.retrieve(file)
return file
except apacai.error.InvalidRequestError:
pass
if os.path.isfile(file):
# 2. If it's a file on the filesystem, upload it
return cls._maybe_upload_file(
file=file, check_if_file_exists=check_if_file_exists
)
if cls._is_url(file):
# 3. If it's a URL, download it temporarily
content = cls._download_file_from_public_url(file)
if content is not None:
return cls._maybe_upload_file(
content=content,
check_if_file_exists=check_if_file_exists,
user_provided_file=file,
)
return file
@classmethod
def create(cls, args):
create_args = {
"training_file": cls._get_or_upload(
args.training_file, args.check_if_files_exist
),
}
if args.validation_file:
create_args["validation_file"] = cls._get_or_upload(
args.validation_file, args.check_if_files_exist
)
for hparam in (
"model",
"suffix",
"n_epochs",
"batch_size",
"learning_rate_multiplier",
"prompt_loss_weight",
"compute_classification_metrics",
"classification_n_classes",
"classification_positive_class",
"classification_betas",
):
attr = getattr(args, hparam)
if attr is not None:
create_args[hparam] = attr
resp = apacai.FineTune.create(**create_args)
if args.no_follow:
print(resp)
return
sys.stdout.write(
"Created fine-tune: {job_id}\n"
"Streaming events until fine-tuning is complete...\n\n"
"(Ctrl-C will interrupt the stream, but not cancel the fine-tune)\n".format(
job_id=resp["id"]
)
)
cls._stream_events(resp["id"])
@classmethod
def get(cls, args):
resp = apacai.FineTune.retrieve(id=args.id)
print(resp)
@classmethod
def results(cls, args):
fine_tune = apacai.FineTune.retrieve(id=args.id)
if "result_files" not in fine_tune or len(fine_tune["result_files"]) == 0:
raise apacai.error.InvalidRequestError(
f"No results file available for fine-tune {args.id}", "id"
)
result_file = apacai.FineTune.retrieve(id=args.id)["result_files"][0]
resp = apacai.File.download(id=result_file["id"])
print(resp.decode("utf-8"))
@classmethod
def events(cls, args):
if args.stream:
raise apacai.error.ApacAIError(
message=(
"The --stream parameter is deprecated, use fine_tunes.follow "
"instead:\n\n"
" apacai api fine_tunes.follow -i {id}\n".format(id=args.id)
),
)
resp = apacai.FineTune.list_events(id=args.id) # type: ignore
print(resp)
@classmethod
def follow(cls, args):
cls._stream_events(args.id)
@classmethod
def _stream_events(cls, job_id):
def signal_handler(sig, frame):
status = apacai.FineTune.retrieve(job_id).status
sys.stdout.write(
"\nStream interrupted. Job is still {status}.\n"
"To resume the stream, run:\n\n"
" apacai api fine_tunes.follow -i {job_id}\n\n"
"To cancel your job, run:\n\n"
" apacai api fine_tunes.cancel -i {job_id}\n\n".format(
status=status, job_id=job_id
)
)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
events = apacai.FineTune.stream_events(job_id)
# TODO(rachel): Add a nifty spinner here.
try:
for event in events:
sys.stdout.write(
"[%s] %s"
% (
datetime.datetime.fromtimestamp(event["created_at"]),
event["message"],
)
)
sys.stdout.write("\n")
sys.stdout.flush()
except Exception:
sys.stdout.write(
"\nStream interrupted (client disconnected).\n"
"To resume the stream, run:\n\n"
" apacai api fine_tunes.follow -i {job_id}\n\n".format(job_id=job_id)
)
return
resp = apacai.FineTune.retrieve(id=job_id)
status = resp["status"]
if status == "succeeded":
sys.stdout.write("\nJob complete! Status: succeeded 🎉")
sys.stdout.write(
"\nTry out your fine-tuned model:\n\n"
"apacai api completions.create -m {model} -p <YOUR_PROMPT>".format(
model=resp["fine_tuned_model"]
)
)
elif status == "failed":
sys.stdout.write(
"\nJob failed. Please contact us through our help center at help.apacai.com if you need assistance."
)
sys.stdout.write("\n")
@classmethod
def cancel(cls, args):
resp = apacai.FineTune.cancel(id=args.id)
print(resp)
@classmethod
def delete(cls, args):
resp = apacai.FineTune.delete(sid=args.id)
print(resp)
@classmethod
def prepare_data(cls, args):
sys.stdout.write("Analyzing...\n")
fname = args.file
auto_accept = args.quiet
df, remediation = read_any_format(fname)
apply_necessary_remediation(None, remediation)
validators = get_validators()
apply_validators(
df,
fname,
remediation,
validators,
auto_accept,
write_out_file_func=write_out_file,
)
class WandbLogger:
@classmethod
def sync(cls, args):
import apacai.wandb_logger
resp = apacai.wandb_logger.WandbLogger.sync(
id=args.id,
n_fine_tunes=args.n_fine_tunes,
project=args.project,
entity=args.entity,
force=args.force,
)
print(resp)
def tools_register(parser):
subparsers = parser.add_subparsers(
title="Tools", help="Convenience client side tools"
)
def help(args):
parser.print_help()
parser.set_defaults(func=help)
sub = subparsers.add_parser("fine_tunes.prepare_data")
sub.add_argument(
"-f",
"--file",
required=True,
help="JSONL, JSON, CSV, TSV, TXT or XLSX file containing prompt-completion examples to be analyzed."
"This should be the local file path.",
)
sub.add_argument(
"-q",
"--quiet",
required=False,
action="store_true",
help="Auto accepts all suggestions, without asking for user input. To be used within scripts.",
)
sub.set_defaults(func=FineTune.prepare_data)
def api_register(parser):
# Engine management
subparsers = parser.add_subparsers(help="All API subcommands")
def help(args):
parser.print_help()
parser.set_defaults(func=help)
sub = subparsers.add_parser("engines.list")
sub.set_defaults(func=Engine.list)
sub = subparsers.add_parser("engines.get")
sub.add_argument("-i", "--id", required=True)
sub.set_defaults(func=Engine.get)
sub = subparsers.add_parser("engines.update")
sub.add_argument("-i", "--id", required=True)
sub.add_argument("-r", "--replicas", type=int)
sub.set_defaults(func=Engine.update)
sub = subparsers.add_parser("engines.generate")
sub.add_argument("-i", "--id", required=True)
sub.add_argument(
"--stream", help="Stream tokens as they're ready.", action="store_true"
)
sub.add_argument("-c", "--context", help="An optional context to generate from")
sub.add_argument("-l", "--length", help="How many tokens to generate", type=int)
sub.add_argument(
"-t",
"--temperature",
help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
Mutually exclusive with `top_p`.""",
type=float,
)
sub.add_argument(
"-p",
"--top_p",
help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered.
Mutually exclusive with `temperature`.""",
type=float,
)
sub.add_argument(
"-n",
"--completions",
help="How many parallel completions to run on this context",
type=int,
)
sub.add_argument(
"--logprobs",
help="Include the log probabilites on the `logprobs` most likely tokens. So for example, if `logprobs` is 10, the API will return a list of the 10 most likely tokens. If `logprobs` is supplied, the API will always return the logprob of the generated token, so there may be up to `logprobs+1` elements in the response.",
type=int,
)
sub.add_argument(
"--stop", help="A stop sequence at which to stop generating tokens."
)
sub.add_argument(
"-m",
"--model",
required=False,
help="A model (most commonly a model ID) to generate from. Defaults to the engine's default model.",
)
sub.set_defaults(func=Engine.generate)
# Chat Completions
sub = subparsers.add_parser("chat_completions.create")
sub._action_groups.pop()
req = sub.add_argument_group("required arguments")
opt = sub.add_argument_group("optional arguments")
req.add_argument(
"-g",
"--message",
action="append",
nargs=2,
metavar=("ROLE", "CONTENT"),
help="A message in `{role} {content}` format. Use this argument multiple times to add multiple messages.",
required=True,
)
group = opt.add_mutually_exclusive_group()
group.add_argument(
"-e",
"--engine",
help="The engine to use. See https://learn.microsoft.com/en-us/azure/cognitive-services/apacai/chatgpt-quickstart?pivots=programming-language-python for more about what engines are available.",
)
group.add_argument(
"-m",
"--model",
help="The model to use.",
)
opt.add_argument(
"-n",
"--n",
help="How many completions to generate for the conversation.",
type=int,
)
opt.add_argument(
"-M", "--max-tokens", help="The maximum number of tokens to generate.", type=int
)
opt.add_argument(
"-t",
"--temperature",
help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
Mutually exclusive with `top_p`.""",
type=float,
)
opt.add_argument(
"-P",
"--top_p",
help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered.
Mutually exclusive with `temperature`.""",
type=float,
)
opt.add_argument(
"--stop",
help="A stop sequence at which to stop generating tokens for the message.",
)
opt.add_argument(
"--stream", help="Stream messages as they're ready.", action="store_true"
)
sub.set_defaults(func=ChatCompletion.create)
# Completions
sub = subparsers.add_parser("completions.create")
sub.add_argument(
"-e",
"--engine",
help="The engine to use. See https://platform.apacai.com/docs/engines for more about what engines are available.",
)
sub.add_argument(
"-m",
"--model",
help="The model to use. At most one of `engine` or `model` should be specified.",
)
sub.add_argument(
"--stream", help="Stream tokens as they're ready.", action="store_true"
)
sub.add_argument("-p", "--prompt", help="An optional prompt to complete from")
sub.add_argument(
"-M", "--max-tokens", help="The maximum number of tokens to generate", type=int
)
sub.add_argument(
"-t",
"--temperature",
help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
Mutually exclusive with `top_p`.""",
type=float,
)
sub.add_argument(
"-P",
"--top_p",
help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered.
Mutually exclusive with `temperature`.""",
type=float,
)
sub.add_argument(
"-n",
"--n",
help="How many sub-completions to generate for each prompt.",
type=int,
)
sub.add_argument(
"--logprobs",
help="Include the log probabilites on the `logprobs` most likely tokens, as well the chosen tokens. So for example, if `logprobs` is 10, the API will return a list of the 10 most likely tokens. If `logprobs` is 0, only the chosen tokens will have logprobs returned.",
type=int,
)
sub.add_argument(
"--stop", help="A stop sequence at which to stop generating tokens."
)
sub.set_defaults(func=Completion.create)
# Deployments
sub = subparsers.add_parser("deployments.list")
sub.set_defaults(func=Deployment.list)
sub = subparsers.add_parser("deployments.get")
sub.add_argument("-i", "--id", required=True, help="The deployment ID")
sub.set_defaults(func=Deployment.get)
sub = subparsers.add_parser("deployments.delete")
sub.add_argument("-i", "--id", required=True, help="The deployment ID")
sub.set_defaults(func=Deployment.delete)
sub = subparsers.add_parser("deployments.create")
sub.add_argument("-m", "--model", required=True, help="The model ID")
sub.add_argument(
"-s",
"--scale_type",
required=True,
help="The scale type. Either 'manual' or 'standard'",
)
sub.set_defaults(func=Deployment.create)
# Models
sub = subparsers.add_parser("models.list")
sub.set_defaults(func=Model.list)
sub = subparsers.add_parser("models.get")
sub.add_argument("-i", "--id", required=True, help="The model ID")
sub.set_defaults(func=Model.get)
sub = subparsers.add_parser("models.delete")
sub.add_argument("-i", "--id", required=True, help="The model ID")
sub.set_defaults(func=Model.delete)
# Files
sub = subparsers.add_parser("files.create")
sub.add_argument(
"-f",
"--file",
required=True,
help="File to upload",
)
sub.add_argument(
"-p",
"--purpose",
help="Why are you uploading this file? (see https://platform.apacai.com/docs/api-reference/ for purposes)",
required=True,
)
sub.set_defaults(func=File.create)
sub = subparsers.add_parser("files.get")
sub.add_argument("-i", "--id", required=True, help="The files ID")
sub.set_defaults(func=File.get)
sub = subparsers.add_parser("files.delete")
sub.add_argument("-i", "--id", required=True, help="The files ID")
sub.set_defaults(func=File.delete)
sub = subparsers.add_parser("files.list")
sub.set_defaults(func=File.list)
# Finetune
sub = subparsers.add_parser("fine_tunes.list")
sub.set_defaults(func=FineTune.list)
sub = subparsers.add_parser("fine_tunes.create")
sub.add_argument(
"-t",
"--training_file",
required=True,
help="JSONL file containing prompt-completion examples for training. This can "
"be the ID of a file uploaded through the APACAI API (e.g. file-abcde12345), "
'a local file path, or a URL that starts with "http".',
)
sub.add_argument(
"-v",
"--validation_file",
help="JSONL file containing prompt-completion examples for validation. This can "
"be the ID of a file uploaded through the APACAI API (e.g. file-abcde12345), "
'a local file path, or a URL that starts with "http".',
)
sub.add_argument(
"--no_check_if_files_exist",
dest="check_if_files_exist",
action="store_false",
help="If this argument is set and training_file or validation_file are file paths, immediately upload them. If this argument is not set, check if they may be duplicates of already uploaded files before uploading, based on file name and file size.",
)
sub.add_argument(
"-m",
"--model",
help="The model to start fine-tuning from",
)
sub.add_argument(
"--suffix",
help="If set, this argument can be used to customize the generated fine-tuned model name."
"All punctuation and whitespace in `suffix` will be replaced with a "
"single dash, and the string will be lower cased. The max "
"length of `suffix` is 40 chars. "
"The generated name will match the form `{base_model}:ft-{org-title}:{suffix}-{timestamp}`. "
'For example, `apacai api fine_tunes.create -t test.jsonl -m ada --suffix "custom model name" '
"could generate a model with the name "
"ada:ft-your-org:custom-model-name-2022-02-15-04-21-04",
)
sub.add_argument(
"--no_follow",
action="store_true",
help="If set, returns immediately after creating the job. Otherwise, streams events and waits for the job to complete.",
)
sub.add_argument(
"--n_epochs",
type=int,
help="The number of epochs to train the model for. An epoch refers to one "
"full cycle through the training dataset.",
)
sub.add_argument(
"--batch_size",
type=int,
help="The batch size to use for training. The batch size is the number of "
"training examples used to train a single forward and backward pass.",
)
sub.add_argument(
"--learning_rate_multiplier",
type=float,
help="The learning rate multiplier to use for training. The fine-tuning "
"learning rate is determined by the original learning rate used for "
"pretraining multiplied by this value.",
)
sub.add_argument(
"--prompt_loss_weight",
type=float,
help="The weight to use for the prompt loss. The optimum value here depends "
"depends on your use case. This determines how much the model prioritizes "
"learning from prompt tokens vs learning from completion tokens.",
)
sub.add_argument(
"--compute_classification_metrics",
action="store_true",
help="If set, we calculate classification-specific metrics such as accuracy "
"and F-1 score using the validation set at the end of every epoch.",
)
sub.set_defaults(compute_classification_metrics=None)
sub.add_argument(
"--classification_n_classes",
type=int,
help="The number of classes in a classification task. This parameter is "
"required for multiclass classification.",
)
sub.add_argument(
"--classification_positive_class",
help="The positive class in binary classification. This parameter is needed "
"to generate precision, recall and F-1 metrics when doing binary "
"classification.",
)
sub.add_argument(
"--classification_betas",
type=float,
nargs="+",
help="If this is provided, we calculate F-beta scores at the specified beta "
"values. The F-beta score is a generalization of F-1 score. This is only "
"used for binary classification.",
)
sub.set_defaults(func=FineTune.create)
sub = subparsers.add_parser("fine_tunes.get")
sub.add_argument("-i", "--id", required=True, help="The id of the fine-tune job")
sub.set_defaults(func=FineTune.get)
sub = subparsers.add_parser("fine_tunes.results")
sub.add_argument("-i", "--id", required=True, help="The id of the fine-tune job")
sub.set_defaults(func=FineTune.results)
sub = subparsers.add_parser("fine_tunes.events")
sub.add_argument("-i", "--id", required=True, help="The id of the fine-tune job")
# TODO(rachel): Remove this in 1.0
sub.add_argument(
"-s",
"--stream",
action="store_true",
help="[DEPRECATED] If set, events will be streamed until the job is done. Otherwise, "
"displays the event history to date.",
)
sub.set_defaults(func=FineTune.events)
sub = subparsers.add_parser("fine_tunes.follow")
sub.add_argument("-i", "--id", required=True, help="The id of the fine-tune job")
sub.set_defaults(func=FineTune.follow)
sub = subparsers.add_parser("fine_tunes.cancel")
sub.add_argument("-i", "--id", required=True, help="The id of the fine-tune job")
sub.set_defaults(func=FineTune.cancel)
sub = subparsers.add_parser("fine_tunes.delete")
sub.add_argument("-i", "--id", required=True, help="The id of the fine-tune job")
sub.set_defaults(func=FineTune.delete)
# Image
sub = subparsers.add_parser("image.create")
sub.add_argument("-p", "--prompt", type=str, required=True)
sub.add_argument("-n", "--num-images", type=int, default=1)
sub.add_argument(
"-s", "--size", type=str, default="1024x1024", help="Size of the output image"
)
sub.add_argument("--response-format", type=str, default="url")
sub.set_defaults(func=Image.create)
sub = subparsers.add_parser("image.create_edit")
sub.add_argument("-p", "--prompt", type=str, required=True)
sub.add_argument("-n", "--num-images", type=int, default=1)
sub.add_argument(
"-I",
"--image",
type=str,
required=True,
help="Image to modify. Should be a local path and a PNG encoded image.",
)
sub.add_argument(
"-s", "--size", type=str, default="1024x1024", help="Size of the output image"
)
sub.add_argument("--response-format", type=str, default="url")
sub.add_argument(
"-M",
"--mask",
type=str,
required=False,
help="Path to a mask image. It should be the same size as the image you're editing and a RGBA PNG image. The Alpha channel acts as the mask.",
)
sub.set_defaults(func=Image.create_edit)
sub = subparsers.add_parser("image.create_variation")
sub.add_argument("-n", "--num-images", type=int, default=1)
sub.add_argument(
"-I",
"--image",
type=str,
required=True,
help="Image to modify. Should be a local path and a PNG encoded image.",
)
sub.add_argument(
"-s", "--size", type=str, default="1024x1024", help="Size of the output image"
)
sub.add_argument("--response-format", type=str, default="url")
sub.set_defaults(func=Image.create_variation)
# Audio
# transcriptions
sub = subparsers.add_parser("audio.transcribe")
# Required
sub.add_argument("-m", "--model", type=str, default="whisper-1")
sub.add_argument("-f", "--file", type=str, required=True)
# Optional
sub.add_argument("--response-format", type=str)
sub.add_argument("--language", type=str)
sub.add_argument("-t", "--temperature", type=float)
sub.add_argument("--prompt", type=str)
sub.set_defaults(func=Audio.transcribe)
# translations
sub = subparsers.add_parser("audio.translate")
# Required
sub.add_argument("-m", "--model", type=str, default="whisper-1")
sub.add_argument("-f", "--file", type=str, required=True)
# Optional
sub.add_argument("--response-format", type=str)
sub.add_argument("--language", type=str)
sub.add_argument("-t", "--temperature", type=float)
sub.add_argument("--prompt", type=str)
sub.set_defaults(func=Audio.translate)
def wandb_register(parser):
subparsers = parser.add_subparsers(
title="wandb", help="Logging with Weights & Biases"
)
def help(args):
parser.print_help()
parser.set_defaults(func=help)
sub = subparsers.add_parser("sync")
sub.add_argument("-i", "--id", help="The id of the fine-tune job (optional)")
sub.add_argument(
"-n",
"--n_fine_tunes",
type=int,
default=None,
help="Number of most recent fine-tunes to log when an id is not provided. By default, every fine-tune is synced.",
)
sub.add_argument(
"--project",
default="GPT-3",
help="""Name of the project where you're sending runs. By default, it is "GPT-3".""",
)
sub.add_argument(
"--entity",
help="Username or team name where you're sending runs. By default, your default entity is used, which is usually your username.",
)
sub.add_argument(
"--force",
action="store_true",
help="Forces logging and overwrite existing wandb run of the same fine-tune.",
)
sub.set_defaults(force=False)
sub.set_defaults(func=WandbLogger.sync)
| APACAI-API-main | apacai/cli.py |
from typing import Optional
class ApacAIResponse:
def __init__(self, data, headers):
self._headers = headers
self.data = data
@property
def request_id(self) -> Optional[str]:
return self._headers.get("request-id")
@property
def retry_after(self) -> Optional[int]:
try:
return int(self._headers.get("retry-after"))
except TypeError:
return None
@property
def operation_location(self) -> Optional[str]:
return self._headers.get("operation-location")
@property
def organization(self) -> Optional[str]:
return self._headers.get("APACAI-Organization")
@property
def response_ms(self) -> Optional[int]:
h = self._headers.get("APACAI-Processing-Ms")
return None if h is None else round(float(h))
| APACAI-API-main | apacai/apacai_response.py |
import textwrap as tr
from typing import List, Optional
import matplotlib.pyplot as plt
import plotly.express as px
from scipy import spatial
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import average_precision_score, precision_recall_curve
from tenacity import retry, stop_after_attempt, wait_random_exponential
import apacai
from apacai.datalib.numpy_helper import numpy as np
from apacai.datalib.pandas_helper import pandas as pd
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
def get_embedding(text: str, engine="text-similarity-davinci-001", **kwargs) -> List[float]:
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
return apacai.Embedding.create(input=[text], engine=engine, **kwargs)["data"][0]["embedding"]
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
async def aget_embedding(
text: str, engine="text-similarity-davinci-001", **kwargs
) -> List[float]:
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
return (await apacai.Embedding.acreate(input=[text], engine=engine, **kwargs))["data"][0][
"embedding"
]
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
def get_embeddings(
list_of_text: List[str], engine="text-similarity-babbage-001", **kwargs
) -> List[List[float]]:
assert len(list_of_text) <= 2048, "The batch size should not be larger than 2048."
# replace newlines, which can negatively affect performance.
list_of_text = [text.replace("\n", " ") for text in list_of_text]
data = apacai.Embedding.create(input=list_of_text, engine=engine, **kwargs).data
return [d["embedding"] for d in data]
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
async def aget_embeddings(
list_of_text: List[str], engine="text-similarity-babbage-001", **kwargs
) -> List[List[float]]:
assert len(list_of_text) <= 2048, "The batch size should not be larger than 2048."
# replace newlines, which can negatively affect performance.
list_of_text = [text.replace("\n", " ") for text in list_of_text]
data = (await apacai.Embedding.acreate(input=list_of_text, engine=engine, **kwargs)).data
return [d["embedding"] for d in data]
def cosine_similarity(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
def plot_multiclass_precision_recall(
y_score, y_true_untransformed, class_list, classifier_name
):
"""
Precision-Recall plotting for a multiclass problem. It plots average precision-recall, per class precision recall and reference f1 contours.
Code slightly modified, but heavily based on https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html
"""
n_classes = len(class_list)
y_true = pd.concat(
[(y_true_untransformed == class_list[i]) for i in range(n_classes)], axis=1
).values
# For each class
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_true[:, i], y_score[:, i])
average_precision[i] = average_precision_score(y_true[:, i], y_score[:, i])
# A "micro-average": quantifying score on all classes jointly
precision_micro, recall_micro, _ = precision_recall_curve(
y_true.ravel(), y_score.ravel()
)
average_precision_micro = average_precision_score(y_true, y_score, average="micro")
print(
str(classifier_name)
+ " - Average precision score over all classes: {0:0.2f}".format(
average_precision_micro
)
)
# setup plot details
plt.figure(figsize=(9, 10))
f_scores = np.linspace(0.2, 0.8, num=4)
lines = []
labels = []
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
(l,) = plt.plot(x[y >= 0], y[y >= 0], color="gray", alpha=0.2)
plt.annotate("f1={0:0.1f}".format(f_score), xy=(0.9, y[45] + 0.02))
lines.append(l)
labels.append("iso-f1 curves")
(l,) = plt.plot(recall_micro, precision_micro, color="gold", lw=2)
lines.append(l)
labels.append(
"average Precision-recall (auprc = {0:0.2f})" "".format(average_precision_micro)
)
for i in range(n_classes):
(l,) = plt.plot(recall[i], precision[i], lw=2)
lines.append(l)
labels.append(
"Precision-recall for class `{0}` (auprc = {1:0.2f})"
"".format(class_list[i], average_precision[i])
)
fig = plt.gcf()
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title(f"{classifier_name}: Precision-Recall curve for each class")
plt.legend(lines, labels)
def distances_from_embeddings(
query_embedding: List[float],
embeddings: List[List[float]],
distance_metric="cosine",
) -> List[List]:
"""Return the distances between a query embedding and a list of embeddings."""
distance_metrics = {
"cosine": spatial.distance.cosine,
"L1": spatial.distance.cityblock,
"L2": spatial.distance.euclidean,
"Linf": spatial.distance.chebyshev,
}
distances = [
distance_metrics[distance_metric](query_embedding, embedding)
for embedding in embeddings
]
return distances
def indices_of_nearest_neighbors_from_distances(distances) -> np.ndarray:
"""Return a list of indices of nearest neighbors from a list of distances."""
return np.argsort(distances)
def pca_components_from_embeddings(
embeddings: List[List[float]], n_components=2
) -> np.ndarray:
"""Return the PCA components of a list of embeddings."""
pca = PCA(n_components=n_components)
array_of_embeddings = np.array(embeddings)
return pca.fit_transform(array_of_embeddings)
def tsne_components_from_embeddings(
embeddings: List[List[float]], n_components=2, **kwargs
) -> np.ndarray:
"""Returns t-SNE components of a list of embeddings."""
# use better defaults if not specified
if "init" not in kwargs.keys():
kwargs["init"] = "pca"
if "learning_rate" not in kwargs.keys():
kwargs["learning_rate"] = "auto"
tsne = TSNE(n_components=n_components, **kwargs)
array_of_embeddings = np.array(embeddings)
return tsne.fit_transform(array_of_embeddings)
def chart_from_components(
components: np.ndarray,
labels: Optional[List[str]] = None,
strings: Optional[List[str]] = None,
x_title="Component 0",
y_title="Component 1",
mark_size=5,
**kwargs,
):
"""Return an interactive 2D chart of embedding components."""
empty_list = ["" for _ in components]
data = pd.DataFrame(
{
x_title: components[:, 0],
y_title: components[:, 1],
"label": labels if labels else empty_list,
"string": ["<br>".join(tr.wrap(string, width=30)) for string in strings]
if strings
else empty_list,
}
)
chart = px.scatter(
data,
x=x_title,
y=y_title,
color="label" if labels else None,
symbol="label" if labels else None,
hover_data=["string"] if strings else None,
**kwargs,
).update_traces(marker=dict(size=mark_size))
return chart
def chart_from_components_3D(
components: np.ndarray,
labels: Optional[List[str]] = None,
strings: Optional[List[str]] = None,
x_title: str = "Component 0",
y_title: str = "Component 1",
z_title: str = "Compontent 2",
mark_size: int = 5,
**kwargs,
):
"""Return an interactive 3D chart of embedding components."""
empty_list = ["" for _ in components]
data = pd.DataFrame(
{
x_title: components[:, 0],
y_title: components[:, 1],
z_title: components[:, 2],
"label": labels if labels else empty_list,
"string": ["<br>".join(tr.wrap(string, width=30)) for string in strings]
if strings
else empty_list,
}
)
chart = px.scatter_3d(
data,
x=x_title,
y=y_title,
z=z_title,
color="label" if labels else None,
symbol="label" if labels else None,
hover_data=["string"] if strings else None,
**kwargs,
).update_traces(marker=dict(size=mark_size))
return chart
| APACAI-API-main | apacai/embeddings_utils.py |
import json
from copy import deepcopy
from typing import Optional, Tuple, Union
import apacai
from apacai import api_requestor, util
from apacai.apacai_response import ApacAIResponse
from apacai.util import ApiType
class ApacAIObject(dict):
api_base_override = None
def __init__(
self,
id=None,
api_key=None,
api_version=None,
api_type=None,
organization=None,
response_ms: Optional[int] = None,
api_base=None,
engine=None,
**params,
):
super(ApacAIObject, self).__init__()
if response_ms is not None and not isinstance(response_ms, int):
raise TypeError(f"response_ms is a {type(response_ms).__name__}.")
self._response_ms = response_ms
self._retrieve_params = params
object.__setattr__(self, "api_key", api_key)
object.__setattr__(self, "api_version", api_version)
object.__setattr__(self, "api_type", api_type)
object.__setattr__(self, "organization", organization)
object.__setattr__(self, "api_base_override", api_base)
object.__setattr__(self, "engine", engine)
if id:
self["id"] = id
@property
def response_ms(self) -> Optional[int]:
return self._response_ms
def __setattr__(self, k, v):
if k[0] == "_" or k in self.__dict__:
return super(ApacAIObject, self).__setattr__(k, v)
self[k] = v
return None
def __getattr__(self, k):
if k[0] == "_":
raise AttributeError(k)
try:
return self[k]
except KeyError as err:
raise AttributeError(*err.args)
def __delattr__(self, k):
if k[0] == "_" or k in self.__dict__:
return super(ApacAIObject, self).__delattr__(k)
else:
del self[k]
def __setitem__(self, k, v):
if v == "":
raise ValueError(
"You cannot set %s to an empty string. "
"We interpret empty strings as None in requests."
"You may set %s.%s = None to delete the property" % (k, str(self), k)
)
super(ApacAIObject, self).__setitem__(k, v)
def __delitem__(self, k):
raise NotImplementedError("del is not supported")
# Custom unpickling method that uses `update` to update the dictionary
# without calling __setitem__, which would fail if any value is an empty
# string
def __setstate__(self, state):
self.update(state)
# Custom pickling method to ensure the instance is pickled as a custom
# class and not as a dict, otherwise __setstate__ would not be called when
# unpickling.
def __reduce__(self):
reduce_value = (
type(self), # callable
( # args
self.get("id", None),
self.api_key,
self.api_version,
self.api_type,
self.organization,
),
dict(self), # state
)
return reduce_value
@classmethod
def construct_from(
cls,
values,
api_key: Optional[str] = None,
api_version=None,
organization=None,
engine=None,
response_ms: Optional[int] = None,
):
instance = cls(
values.get("id"),
api_key=api_key,
api_version=api_version,
organization=organization,
engine=engine,
response_ms=response_ms,
)
instance.refresh_from(
values,
api_key=api_key,
api_version=api_version,
organization=organization,
response_ms=response_ms,
)
return instance
def refresh_from(
self,
values,
api_key=None,
api_version=None,
api_type=None,
organization=None,
response_ms: Optional[int] = None,
):
self.api_key = api_key or getattr(values, "api_key", None)
self.api_version = api_version or getattr(values, "api_version", None)
self.api_type = api_type or getattr(values, "api_type", None)
self.organization = organization or getattr(values, "organization", None)
self._response_ms = response_ms or getattr(values, "_response_ms", None)
# Wipe old state before setting new.
self.clear()
for k, v in values.items():
super(ApacAIObject, self).__setitem__(
k, util.convert_to_apacai_object(v, api_key, api_version, organization)
)
self._previous = values
@classmethod
def api_base(cls):
return None
def request(
self,
method,
url,
params=None,
headers=None,
stream=False,
plain_old_data=False,
request_id: Optional[str] = None,
request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
):
if params is None:
params = self._retrieve_params
requestor = api_requestor.APIRequestor(
key=self.api_key,
api_base=self.api_base_override or self.api_base(),
api_type=self.api_type,
api_version=self.api_version,
organization=self.organization,
)
response, stream, api_key = requestor.request(
method,
url,
params=params,
stream=stream,
headers=headers,
request_id=request_id,
request_timeout=request_timeout,
)
if stream:
assert not isinstance(response, ApacAIResponse) # must be an iterator
return (
util.convert_to_apacai_object(
line,
api_key,
self.api_version,
self.organization,
plain_old_data=plain_old_data,
)
for line in response
)
else:
return util.convert_to_apacai_object(
response,
api_key,
self.api_version,
self.organization,
plain_old_data=plain_old_data,
)
async def arequest(
self,
method,
url,
params=None,
headers=None,
stream=False,
plain_old_data=False,
request_id: Optional[str] = None,
request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
):
if params is None:
params = self._retrieve_params
requestor = api_requestor.APIRequestor(
key=self.api_key,
api_base=self.api_base_override or self.api_base(),
api_type=self.api_type,
api_version=self.api_version,
organization=self.organization,
)
response, stream, api_key = await requestor.arequest(
method,
url,
params=params,
stream=stream,
headers=headers,
request_id=request_id,
request_timeout=request_timeout,
)
if stream:
assert not isinstance(response, ApacAIResponse) # must be an iterator
return (
util.convert_to_apacai_object(
line,
api_key,
self.api_version,
self.organization,
plain_old_data=plain_old_data,
)
for line in response
)
else:
return util.convert_to_apacai_object(
response,
api_key,
self.api_version,
self.organization,
plain_old_data=plain_old_data,
)
def __repr__(self):
ident_parts = [type(self).__name__]
obj = self.get("object")
if isinstance(obj, str):
ident_parts.append(obj)
if isinstance(self.get("id"), str):
ident_parts.append("id=%s" % (self.get("id"),))
unicode_repr = "<%s at %s> JSON: %s" % (
" ".join(ident_parts),
hex(id(self)),
str(self),
)
return unicode_repr
def __str__(self):
obj = self.to_dict_recursive()
return json.dumps(obj, indent=2)
def to_dict(self):
return dict(self)
def to_dict_recursive(self):
d = dict(self)
for k, v in d.items():
if isinstance(v, ApacAIObject):
d[k] = v.to_dict_recursive()
elif isinstance(v, list):
d[k] = [
e.to_dict_recursive() if isinstance(e, ApacAIObject) else e
for e in v
]
return d
@property
def apacai_id(self):
return self.id
@property
def typed_api_type(self):
return (
ApiType.from_str(self.api_type)
if self.api_type
else ApiType.from_str(apacai.api_type)
)
# This class overrides __setitem__ to throw exceptions on inputs that it
# doesn't like. This can cause problems when we try to copy an object
# wholesale because some data that's returned from the API may not be valid
# if it was set to be set manually. Here we override the class' copy
# arguments so that we can bypass these possible exceptions on __setitem__.
def __copy__(self):
copied = ApacAIObject(
self.get("id"),
self.api_key,
api_version=self.api_version,
api_type=self.api_type,
organization=self.organization,
)
copied._retrieve_params = self._retrieve_params
for k, v in self.items():
# Call parent's __setitem__ to avoid checks that we've added in the
# overridden version that can throw exceptions.
super(ApacAIObject, copied).__setitem__(k, v)
return copied
# This class overrides __setitem__ to throw exceptions on inputs that it
# doesn't like. This can cause problems when we try to copy an object
# wholesale because some data that's returned from the API may not be valid
# if it was set to be set manually. Here we override the class' copy
# arguments so that we can bypass these possible exceptions on __setitem__.
def __deepcopy__(self, memo):
copied = self.__copy__()
memo[id(self)] = copied
for k, v in self.items():
# Call parent's __setitem__ to avoid checks that we've added in the
# overridden version that can throw exceptions.
super(ApacAIObject, copied).__setitem__(k, deepcopy(v, memo))
return copied
| APACAI-API-main | apacai/apacai_object.py |
from apacai.datalib.common import INSTRUCTIONS, MissingDependencyError
try:
import pandas
except ImportError:
pandas = None
HAS_PANDAS = bool(pandas)
PANDAS_INSTRUCTIONS = INSTRUCTIONS.format(library="pandas")
def assert_has_pandas():
if not HAS_PANDAS:
raise MissingDependencyError(PANDAS_INSTRUCTIONS)
| APACAI-API-main | apacai/datalib/pandas_helper.py |
"""
This module helps make data libraries like `numpy` and `pandas` optional dependencies.
The libraries add up to 130MB+, which makes it challenging to deploy applications
using this library in environments with code size constraints, like AWS Lambda.
This module serves as an import proxy and provides a few utilities for dealing with the optionality.
Since the primary use case of this library (talking to the APACAI API) doesn't generally require data libraries,
it's safe to make them optional. The rare case when data libraries are needed in the client is handled through
assertions with instructive error messages.
See also `setup.py`.
"""
| APACAI-API-main | apacai/datalib/__init__.py |
INSTRUCTIONS = """
APACAI error:
missing `{library}`
This feature requires additional dependencies:
$ pip install apacai[datalib]
"""
NUMPY_INSTRUCTIONS = INSTRUCTIONS.format(library="numpy")
class MissingDependencyError(Exception):
pass
| APACAI-API-main | apacai/datalib/common.py |
from apacai.datalib.common import INSTRUCTIONS, MissingDependencyError
try:
import numpy
except ImportError:
numpy = None
HAS_NUMPY = bool(numpy)
NUMPY_INSTRUCTIONS = INSTRUCTIONS.format(library="numpy")
def assert_has_numpy():
if not HAS_NUMPY:
raise MissingDependencyError(NUMPY_INSTRUCTIONS)
| APACAI-API-main | apacai/datalib/numpy_helper.py |
import json
import subprocess
import time
from tempfile import NamedTemporaryFile
STILL_PROCESSING = "File is still processing. Check back later."
def test_file_cli() -> None:
contents = json.dumps({"prompt": "1 + 3 =", "completion": "4"}) + "\n"
with NamedTemporaryFile(suffix=".jsonl", mode="wb") as train_file:
train_file.write(contents.encode("utf-8"))
train_file.flush()
create_output = subprocess.check_output(
["apacai", "api", "files.create", "-f", train_file.name, "-p", "fine-tune"]
)
file_obj = json.loads(create_output)
assert file_obj["bytes"] == len(contents)
file_id: str = file_obj["id"]
assert file_id.startswith("file-")
start_time = time.time()
while True:
delete_result = subprocess.run(
["apacai", "api", "files.delete", "-i", file_id],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
if delete_result.returncode == 0:
break
elif STILL_PROCESSING in delete_result.stderr:
time.sleep(0.5)
if start_time + 60 < time.time():
raise RuntimeError("timed out waiting for file to become available")
continue
else:
raise RuntimeError(
f"delete failed: stdout={delete_result.stdout} stderr={delete_result.stderr}"
)
| APACAI-API-main | apacai/tests/test_file_cli.py |
import io
import json
import pytest
import requests
import apacai
from apacai import error
# FILE TESTS
def test_file_upload():
result = apacai.File.create(
file=io.StringIO(
json.dumps({"prompt": "test file data", "completion": "tada"})
),
purpose="fine-tune",
)
assert result.purpose == "fine-tune"
assert "id" in result
result = apacai.File.retrieve(id=result.id)
assert result.status == "uploaded"
# CHAT COMPLETION TESTS
def test_chat_completions():
result = apacai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello!"}]
)
assert len(result.choices) == 1
def test_chat_completions_multiple():
result = apacai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello!"}], n=5
)
assert len(result.choices) == 5
def test_chat_completions_streaming():
result = None
events = apacai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello!"}],
stream=True,
)
for result in events:
assert len(result.choices) == 1
# COMPLETION TESTS
def test_completions():
result = apacai.Completion.create(prompt="This was a test", n=5, engine="ada")
assert len(result.choices) == 5
def test_completions_multiple_prompts():
result = apacai.Completion.create(
prompt=["This was a test", "This was another test"], n=5, engine="ada"
)
assert len(result.choices) == 10
def test_completions_model():
result = apacai.Completion.create(prompt="This was a test", n=5, model="ada")
assert len(result.choices) == 5
assert result.model.startswith("ada")
def test_timeout_raises_error():
# A query that should take awhile to return
with pytest.raises(error.Timeout):
apacai.Completion.create(
prompt="test" * 1000,
n=10,
model="ada",
max_tokens=100,
request_timeout=0.01,
)
def test_timeout_does_not_error():
# A query that should be fast
apacai.Completion.create(
prompt="test",
model="ada",
request_timeout=10,
)
def test_user_session():
with requests.Session() as session:
apacai.requestssession = session
completion = apacai.Completion.create(
prompt="hello world",
model="ada",
)
assert completion
def test_user_session_factory():
def factory():
session = requests.Session()
session.mount(
"https://",
requests.adapters.HTTPAdapter(max_retries=4),
)
return session
apacai.requestssession = factory
completion = apacai.Completion.create(
prompt="hello world",
model="ada",
)
assert completion
| APACAI-API-main | apacai/tests/test_endpoints.py |
import pickle
import pytest
import apacai
EXCEPTION_TEST_CASES = [
apacai.InvalidRequestError(
"message",
"param",
code=400,
http_body={"test": "test1"},
http_status="fail",
json_body={"text": "iono some text"},
headers={"request-id": "asasd"},
),
apacai.error.AuthenticationError(),
apacai.error.PermissionError(),
apacai.error.RateLimitError(),
apacai.error.ServiceUnavailableError(),
apacai.error.SignatureVerificationError("message", "sig_header?"),
apacai.error.APIConnectionError("message!", should_retry=True),
apacai.error.TryAgain(),
apacai.error.Timeout(),
apacai.error.APIError(
message="message",
code=400,
http_body={"test": "test1"},
http_status="fail",
json_body={"text": "iono some text"},
headers={"request-id": "asasd"},
),
apacai.error.ApacAIError(),
]
class TestExceptions:
@pytest.mark.parametrize("error", EXCEPTION_TEST_CASES)
def test_exceptions_are_pickleable(self, error) -> None:
assert error.__repr__() == pickle.loads(pickle.dumps(error)).__repr__()
| APACAI-API-main | apacai/tests/test_exceptions.py |
APACAI-API-main | apacai/tests/__init__.py |
|
import json
from tempfile import NamedTemporaryFile
import pytest
import apacai
from apacai import util
@pytest.fixture(scope="function")
def api_key_file():
saved_path = apacai.api_key_path
try:
with NamedTemporaryFile(prefix="apacai-api-key", mode="wt") as tmp:
apacai.api_key_path = tmp.name
yield tmp
finally:
apacai.api_key_path = saved_path
def test_apacai_api_key_path(api_key_file) -> None:
print("sk-foo", file=api_key_file)
api_key_file.flush()
assert util.default_api_key() == "sk-foo"
def test_apacai_api_key_path_with_malformed_key(api_key_file) -> None:
print("malformed-api-key", file=api_key_file)
api_key_file.flush()
with pytest.raises(ValueError, match="Malformed API key"):
util.default_api_key()
def test_key_order_apacai_object_rendering() -> None:
sample_response = {
"id": "chatcmpl-7NaPEA6sgX7LnNPyKPbRlsyqLbr5V",
"object": "chat.completion",
"created": 1685855844,
"model": "gpt-3.5-turbo-0301",
"usage": {"prompt_tokens": 57, "completion_tokens": 40, "total_tokens": 97},
"choices": [
{
"message": {
"role": "assistant",
"content": "The 2020 World Series was played at Globe Life Field in Arlington, Texas. It was the first time that the World Series was played at a neutral site because of the COVID-19 pandemic.",
},
"finish_reason": "stop",
"index": 0,
}
],
}
oai_object = util.convert_to_apacai_object(sample_response)
# The `__str__` method was sorting while dumping to json
assert list(json.loads(str(oai_object)).keys()) == list(sample_response.keys())
| APACAI-API-main | apacai/tests/test_util.py |
from sys import api_version
import pytest
from apacai import Completion, Engine
from apacai.util import ApiType
@pytest.mark.url
def test_completions_url_composition_azure() -> None:
url = Completion.class_url("test_engine", "azure", "2021-11-01-preview")
assert (
url
== "/apacai/deployments/test_engine/completions?api-version=2021-11-01-preview"
)
@pytest.mark.url
def test_completions_url_composition_azure_ad() -> None:
url = Completion.class_url("test_engine", "azure_ad", "2021-11-01-preview")
assert (
url
== "/apacai/deployments/test_engine/completions?api-version=2021-11-01-preview"
)
@pytest.mark.url
def test_completions_url_composition_default() -> None:
url = Completion.class_url("test_engine")
assert url == "/engines/test_engine/completions"
@pytest.mark.url
def test_completions_url_composition_open_ai() -> None:
url = Completion.class_url("test_engine", "open_ai")
assert url == "/engines/test_engine/completions"
@pytest.mark.url
def test_completions_url_composition_invalid_type() -> None:
with pytest.raises(Exception):
url = Completion.class_url("test_engine", "invalid")
@pytest.mark.url
def test_completions_url_composition_instance_url_azure() -> None:
completion = Completion(
id="test_id",
engine="test_engine",
api_type="azure",
api_version="2021-11-01-preview",
)
url = completion.instance_url()
assert (
url
== "/apacai/deployments/test_engine/completions/test_id?api-version=2021-11-01-preview"
)
@pytest.mark.url
def test_completions_url_composition_instance_url_azure_ad() -> None:
completion = Completion(
id="test_id",
engine="test_engine",
api_type="azure_ad",
api_version="2021-11-01-preview",
)
url = completion.instance_url()
assert (
url
== "/apacai/deployments/test_engine/completions/test_id?api-version=2021-11-01-preview"
)
@pytest.mark.url
def test_completions_url_composition_instance_url_azure_no_version() -> None:
completion = Completion(
id="test_id", engine="test_engine", api_type="azure", api_version=None
)
with pytest.raises(Exception):
completion.instance_url()
@pytest.mark.url
def test_completions_url_composition_instance_url_default() -> None:
completion = Completion(id="test_id", engine="test_engine")
url = completion.instance_url()
assert url == "/engines/test_engine/completions/test_id"
@pytest.mark.url
def test_completions_url_composition_instance_url_open_ai() -> None:
completion = Completion(
id="test_id",
engine="test_engine",
api_type="open_ai",
api_version="2021-11-01-preview",
)
url = completion.instance_url()
assert url == "/engines/test_engine/completions/test_id"
@pytest.mark.url
def test_completions_url_composition_instance_url_invalid() -> None:
completion = Completion(id="test_id", engine="test_engine", api_type="invalid")
with pytest.raises(Exception):
url = completion.instance_url()
@pytest.mark.url
def test_completions_url_composition_instance_url_timeout_azure() -> None:
completion = Completion(
id="test_id",
engine="test_engine",
api_type="azure",
api_version="2021-11-01-preview",
)
completion["timeout"] = 12
url = completion.instance_url()
assert (
url
== "/apacai/deployments/test_engine/completions/test_id?api-version=2021-11-01-preview&timeout=12"
)
@pytest.mark.url
def test_completions_url_composition_instance_url_timeout_apacai() -> None:
completion = Completion(id="test_id", engine="test_engine", api_type="open_ai")
completion["timeout"] = 12
url = completion.instance_url()
assert url == "/engines/test_engine/completions/test_id?timeout=12"
@pytest.mark.url
def test_engine_search_url_composition_azure() -> None:
engine = Engine(id="test_id", api_type="azure", api_version="2021-11-01-preview")
assert engine.api_type == "azure"
assert engine.typed_api_type == ApiType.AZURE
url = engine.instance_url("test_operation")
assert (
url
== "/apacai/deployments/test_id/test_operation?api-version=2021-11-01-preview"
)
@pytest.mark.url
def test_engine_search_url_composition_azure_ad() -> None:
engine = Engine(id="test_id", api_type="azure_ad", api_version="2021-11-01-preview")
assert engine.api_type == "azure_ad"
assert engine.typed_api_type == ApiType.AZURE_AD
url = engine.instance_url("test_operation")
assert (
url
== "/apacai/deployments/test_id/test_operation?api-version=2021-11-01-preview"
)
@pytest.mark.url
def test_engine_search_url_composition_azure_no_version() -> None:
engine = Engine(id="test_id", api_type="azure", api_version=None)
assert engine.api_type == "azure"
assert engine.typed_api_type == ApiType.AZURE
with pytest.raises(Exception):
engine.instance_url("test_operation")
@pytest.mark.url
def test_engine_search_url_composition_azure_no_operation() -> None:
engine = Engine(id="test_id", api_type="azure", api_version="2021-11-01-preview")
assert engine.api_type == "azure"
assert engine.typed_api_type == ApiType.AZURE
assert (
engine.instance_url()
== "/apacai/engines/test_id?api-version=2021-11-01-preview"
)
@pytest.mark.url
def test_engine_search_url_composition_default() -> None:
engine = Engine(id="test_id")
assert engine.api_type == None
assert engine.typed_api_type == ApiType.OPEN_AI
url = engine.instance_url()
assert url == "/engines/test_id"
@pytest.mark.url
def test_engine_search_url_composition_open_ai() -> None:
engine = Engine(id="test_id", api_type="open_ai")
assert engine.api_type == "open_ai"
assert engine.typed_api_type == ApiType.OPEN_AI
url = engine.instance_url()
assert url == "/engines/test_id"
@pytest.mark.url
def test_engine_search_url_composition_invalid_type() -> None:
engine = Engine(id="test_id", api_type="invalid")
assert engine.api_type == "invalid"
with pytest.raises(Exception):
assert engine.typed_api_type == ApiType.OPEN_AI
@pytest.mark.url
def test_engine_search_url_composition_invalid_search() -> None:
engine = Engine(id="test_id", api_type="invalid")
assert engine.api_type == "invalid"
with pytest.raises(Exception):
engine.search()
| APACAI-API-main | apacai/tests/test_url_composition.py |
import json
import subprocess
from tempfile import NamedTemporaryFile
import pytest
from apacai.datalib.numpy_helper import HAS_NUMPY, NUMPY_INSTRUCTIONS
from apacai.datalib.pandas_helper import HAS_PANDAS, PANDAS_INSTRUCTIONS
@pytest.mark.skipif(not HAS_PANDAS, reason=PANDAS_INSTRUCTIONS)
@pytest.mark.skipif(not HAS_NUMPY, reason=NUMPY_INSTRUCTIONS)
def test_long_examples_validator() -> None:
"""
Ensures that long_examples_validator() handles previously applied recommendations,
namely dropped duplicates, without resulting in a KeyError.
"""
# data
short_prompt = "a prompt "
long_prompt = short_prompt * 500
short_completion = "a completion "
long_completion = short_completion * 500
# the order of these matters
unprepared_training_data = [
{"prompt": long_prompt, "completion": long_completion}, # 1 of 2 duplicates
{"prompt": short_prompt, "completion": short_completion},
{"prompt": long_prompt, "completion": long_completion}, # 2 of 2 duplicates
]
with NamedTemporaryFile(suffix=".jsonl", mode="w") as training_data:
print(training_data.name)
for prompt_completion_row in unprepared_training_data:
training_data.write(json.dumps(prompt_completion_row) + "\n")
training_data.flush()
prepared_data_cmd_output = subprocess.run(
[f"apacai tools fine_tunes.prepare_data -f {training_data.name}"],
stdout=subprocess.PIPE,
text=True,
input="y\ny\ny\ny\ny", # apply all recommendations, one at a time
stderr=subprocess.PIPE,
encoding="utf-8",
shell=True,
)
# validate data was prepared successfully
assert prepared_data_cmd_output.stderr == ""
# validate get_long_indexes() applied during optional_fn() call in long_examples_validator()
assert "indices of the long examples has changed" in prepared_data_cmd_output.stdout
return prepared_data_cmd_output.stdout
| APACAI-API-main | apacai/tests/test_long_examples_validator.py |
import json
import pytest
import requests
from pytest_mock import MockerFixture
from apacai import Model
from apacai.api_requestor import APIRequestor
@pytest.mark.requestor
def test_requestor_sets_request_id(mocker: MockerFixture) -> None:
# Fake out 'requests' and confirm that the X-Request-Id header is set.
got_headers = {}
def fake_request(self, *args, **kwargs):
nonlocal got_headers
got_headers = kwargs["headers"]
r = requests.Response()
r.status_code = 200
r.headers["content-type"] = "application/json"
r._content = json.dumps({}).encode("utf-8")
return r
mocker.patch("requests.sessions.Session.request", fake_request)
fake_request_id = "1234"
Model.retrieve("xxx", request_id=fake_request_id) # arbitrary API resource
got_request_id = got_headers.get("X-Request-Id")
assert got_request_id == fake_request_id
@pytest.mark.requestor
def test_requestor_open_ai_headers() -> None:
api_requestor = APIRequestor(key="test_key", api_type="open_ai")
headers = {"Test_Header": "Unit_Test_Header"}
headers = api_requestor.request_headers(
method="get", extra=headers, request_id="test_id"
)
assert "Test_Header" in headers
assert headers["Test_Header"] == "Unit_Test_Header"
assert "Authorization" in headers
assert headers["Authorization"] == "Bearer test_key"
@pytest.mark.requestor
def test_requestor_azure_headers() -> None:
api_requestor = APIRequestor(key="test_key", api_type="azure")
headers = {"Test_Header": "Unit_Test_Header"}
headers = api_requestor.request_headers(
method="get", extra=headers, request_id="test_id"
)
assert "Test_Header" in headers
assert headers["Test_Header"] == "Unit_Test_Header"
assert "api-key" in headers
assert headers["api-key"] == "test_key"
@pytest.mark.requestor
def test_requestor_azure_ad_headers() -> None:
api_requestor = APIRequestor(key="test_key", api_type="azure_ad")
headers = {"Test_Header": "Unit_Test_Header"}
headers = api_requestor.request_headers(
method="get", extra=headers, request_id="test_id"
)
assert "Test_Header" in headers
assert headers["Test_Header"] == "Unit_Test_Header"
assert "Authorization" in headers
assert headers["Authorization"] == "Bearer test_key"
@pytest.mark.requestor
def test_requestor_cycle_sessions(mocker: MockerFixture) -> None:
# HACK: we need to purge the _thread_context to not interfere
# with other tests
from apacai.api_requestor import _thread_context
delattr(_thread_context, "session")
api_requestor = APIRequestor(key="test_key", api_type="azure_ad")
mock_session = mocker.MagicMock()
mocker.patch("apacai.api_requestor._make_session", lambda: mock_session)
# We don't call `session.close()` if not enough time has elapsed
api_requestor.request_raw("get", "http://example.com")
mock_session.request.assert_called()
api_requestor.request_raw("get", "http://example.com")
mock_session.close.assert_not_called()
mocker.patch("apacai.api_requestor.MAX_SESSION_LIFETIME_SECS", 0)
# Due to 0 lifetime, the original session will be closed before the next call
# and a new session will be created
mock_session_2 = mocker.MagicMock()
mocker.patch("apacai.api_requestor._make_session", lambda: mock_session_2)
api_requestor.request_raw("get", "http://example.com")
mock_session.close.assert_called()
mock_session_2.request.assert_called()
delattr(_thread_context, "session")
| APACAI-API-main | apacai/tests/test_api_requestor.py |
import io
import json
import pytest
from aiohttp import ClientSession
import apacai
from apacai import error
pytestmark = [pytest.mark.asyncio]
# FILE TESTS
async def test_file_upload():
result = await apacai.File.acreate(
file=io.StringIO(
json.dumps({"prompt": "test file data", "completion": "tada"})
),
purpose="fine-tune",
)
assert result.purpose == "fine-tune"
assert "id" in result
result = await apacai.File.aretrieve(id=result.id)
assert result.status == "uploaded"
# COMPLETION TESTS
async def test_completions():
result = await apacai.Completion.acreate(
prompt="This was a test", n=5, engine="ada"
)
assert len(result.choices) == 5
async def test_completions_multiple_prompts():
result = await apacai.Completion.acreate(
prompt=["This was a test", "This was another test"], n=5, engine="ada"
)
assert len(result.choices) == 10
async def test_completions_model():
result = await apacai.Completion.acreate(prompt="This was a test", n=5, model="ada")
assert len(result.choices) == 5
assert result.model.startswith("ada")
async def test_timeout_raises_error():
# A query that should take awhile to return
with pytest.raises(error.Timeout):
await apacai.Completion.acreate(
prompt="test" * 1000,
n=10,
model="ada",
max_tokens=100,
request_timeout=0.01,
)
async def test_timeout_does_not_error():
# A query that should be fast
await apacai.Completion.acreate(
prompt="test",
model="ada",
request_timeout=10,
)
async def test_completions_stream_finishes_global_session():
async with ClientSession() as session:
apacai.aiosession.set(session)
# A query that should be fast
parts = []
async for part in await apacai.Completion.acreate(
prompt="test", model="ada", request_timeout=3, stream=True
):
parts.append(part)
assert len(parts) > 1
async def test_completions_stream_finishes_local_session():
# A query that should be fast
parts = []
async for part in await apacai.Completion.acreate(
prompt="test", model="ada", request_timeout=3, stream=True
):
parts.append(part)
assert len(parts) > 1
| APACAI-API-main | apacai/tests/asyncio/test_endpoints.py |
APACAI-API-main | apacai/tests/asyncio/__init__.py |
|
import time
from apacai import util
from apacai.api_resources.abstract.engine_api_resource import EngineAPIResource
from apacai.error import TryAgain
class ChatCompletion(EngineAPIResource):
engine_required = False
OBJECT_NAME = "chat.completions"
@classmethod
def create(cls, *args, **kwargs):
"""
Creates a new chat completion for the provided messages and parameters.
See https://platform.apacai.com/docs/api-reference/chat/create
for a list of valid parameters.
"""
start = time.time()
timeout = kwargs.pop("timeout", None)
while True:
try:
return super().create(*args, **kwargs)
except TryAgain as e:
if timeout is not None and time.time() > start + timeout:
raise
util.log_info("Waiting for model to warm up", error=e)
@classmethod
async def acreate(cls, *args, **kwargs):
"""
Creates a new chat completion for the provided messages and parameters.
See https://platform.apacai.com/docs/api-reference/chat/create
for a list of valid parameters.
"""
start = time.time()
timeout = kwargs.pop("timeout", None)
while True:
try:
return await super().acreate(*args, **kwargs)
except TryAgain as e:
if timeout is not None and time.time() > start + timeout:
raise
util.log_info("Waiting for model to warm up", error=e)
| APACAI-API-main | apacai/api_resources/chat_completion.py |
from apacai import util
from apacai.api_resources.abstract import (
DeletableAPIResource,
ListableAPIResource,
CreateableAPIResource,
)
from apacai.error import InvalidRequestError, APIError
class Deployment(CreateableAPIResource, ListableAPIResource, DeletableAPIResource):
OBJECT_NAME = "deployments"
@classmethod
def _check_create(cls, *args, **kwargs):
typed_api_type, _ = cls._get_api_type_and_version(
kwargs.get("api_type", None), None
)
if typed_api_type not in (util.ApiType.AZURE, util.ApiType.AZURE_AD):
raise APIError(
"Deployment operations are only available for the Azure API type."
)
if kwargs.get("model", None) is None:
raise InvalidRequestError(
"Must provide a 'model' parameter to create a Deployment.",
param="model",
)
scale_settings = kwargs.get("scale_settings", None)
if scale_settings is None:
raise InvalidRequestError(
"Must provide a 'scale_settings' parameter to create a Deployment.",
param="scale_settings",
)
if "scale_type" not in scale_settings or (
scale_settings["scale_type"].lower() == "manual"
and "capacity" not in scale_settings
):
raise InvalidRequestError(
"The 'scale_settings' parameter contains invalid or incomplete values.",
param="scale_settings",
)
@classmethod
def create(cls, *args, **kwargs):
"""
Creates a new deployment for the provided prompt and parameters.
"""
cls._check_create(*args, **kwargs)
return super().create(*args, **kwargs)
@classmethod
def acreate(cls, *args, **kwargs):
"""
Creates a new deployment for the provided prompt and parameters.
"""
cls._check_create(*args, **kwargs)
return super().acreate(*args, **kwargs)
@classmethod
def _check_list(cls, *args, **kwargs):
typed_api_type, _ = cls._get_api_type_and_version(
kwargs.get("api_type", None), None
)
if typed_api_type not in (util.ApiType.AZURE, util.ApiType.AZURE_AD):
raise APIError(
"Deployment operations are only available for the Azure API type."
)
@classmethod
def list(cls, *args, **kwargs):
cls._check_list(*args, **kwargs)
return super().list(*args, **kwargs)
@classmethod
def alist(cls, *args, **kwargs):
cls._check_list(*args, **kwargs)
return super().alist(*args, **kwargs)
@classmethod
def _check_delete(cls, *args, **kwargs):
typed_api_type, _ = cls._get_api_type_and_version(
kwargs.get("api_type", None), None
)
if typed_api_type not in (util.ApiType.AZURE, util.ApiType.AZURE_AD):
raise APIError(
"Deployment operations are only available for the Azure API type."
)
@classmethod
def delete(cls, *args, **kwargs):
cls._check_delete(*args, **kwargs)
return super().delete(*args, **kwargs)
@classmethod
def adelete(cls, *args, **kwargs):
cls._check_delete(*args, **kwargs)
return super().adelete(*args, **kwargs)
@classmethod
def _check_retrieve(cls, *args, **kwargs):
typed_api_type, _ = cls._get_api_type_and_version(
kwargs.get("api_type", None), None
)
if typed_api_type not in (util.ApiType.AZURE, util.ApiType.AZURE_AD):
raise APIError(
"Deployment operations are only available for the Azure API type."
)
@classmethod
def retrieve(cls, *args, **kwargs):
cls._check_retrieve(*args, **kwargs)
return super().retrieve(*args, **kwargs)
@classmethod
def aretrieve(cls, *args, **kwargs):
cls._check_retrieve(*args, **kwargs)
return super().aretrieve(*args, **kwargs)
| APACAI-API-main | apacai/api_resources/deployment.py |
from typing import Optional
from apacai.apacai_object import ApacAIObject
from apacai.util import merge_dicts
class ErrorObject(ApacAIObject):
def refresh_from(
self,
values,
api_key=None,
api_version=None,
api_type=None,
organization=None,
response_ms: Optional[int] = None,
):
# Unlike most other API resources, the API will omit attributes in
# error objects when they have a null value. We manually set default
# values here to facilitate generic error handling.
values = merge_dicts({"message": None, "type": None}, values)
return super(ErrorObject, self).refresh_from(
values=values,
api_key=api_key,
api_version=api_version,
api_type=api_type,
organization=organization,
response_ms=response_ms,
)
| APACAI-API-main | apacai/api_resources/error_object.py |
import time
from apacai import util
from apacai.api_resources.abstract import DeletableAPIResource, ListableAPIResource
from apacai.api_resources.abstract.engine_api_resource import EngineAPIResource
from apacai.error import TryAgain
class Completion(EngineAPIResource):
OBJECT_NAME = "completions"
@classmethod
def create(cls, *args, **kwargs):
"""
Creates a new completion for the provided prompt and parameters.
See https://platform.apacai.com/docs/api-reference/completions/create for a list
of valid parameters.
"""
start = time.time()
timeout = kwargs.pop("timeout", None)
while True:
try:
return super().create(*args, **kwargs)
except TryAgain as e:
if timeout is not None and time.time() > start + timeout:
raise
util.log_info("Waiting for model to warm up", error=e)
@classmethod
async def acreate(cls, *args, **kwargs):
"""
Creates a new completion for the provided prompt and parameters.
See https://platform.apacai.com/docs/api-reference/completions/create for a list
of valid parameters.
"""
start = time.time()
timeout = kwargs.pop("timeout", None)
while True:
try:
return await super().acreate(*args, **kwargs)
except TryAgain as e:
if timeout is not None and time.time() > start + timeout:
raise
util.log_info("Waiting for model to warm up", error=e)
| APACAI-API-main | apacai/api_resources/completion.py |
from urllib.parse import quote_plus
from apacai import api_requestor, util, error
from apacai.api_resources.abstract import (
CreateableAPIResource,
ListableAPIResource,
nested_resource_class_methods,
)
from apacai.api_resources.abstract.deletable_api_resource import DeletableAPIResource
from apacai.apacai_response import ApacAIResponse
from apacai.util import ApiType
@nested_resource_class_methods("event", operations=["list"])
class FineTune(ListableAPIResource, CreateableAPIResource, DeletableAPIResource):
OBJECT_NAME = "fine-tunes"
@classmethod
def _prepare_cancel(
cls,
id,
api_key=None,
api_type=None,
request_id=None,
api_version=None,
**params,
):
base = cls.class_url()
extn = quote_plus(id)
typed_api_type, api_version = cls._get_api_type_and_version(
api_type, api_version
)
if typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):
url = "/%s%s/%s/cancel?api-version=%s" % (
cls.azure_api_prefix,
base,
extn,
api_version,
)
elif typed_api_type == ApiType.OPEN_AI:
url = "%s/%s/cancel" % (base, extn)
else:
raise error.InvalidAPIType("Unsupported API type %s" % api_type)
instance = cls(id, api_key, **params)
return instance, url
@classmethod
def cancel(
cls,
id,
api_key=None,
api_type=None,
request_id=None,
api_version=None,
**params,
):
instance, url = cls._prepare_cancel(
id,
api_key,
api_type,
request_id,
api_version,
**params,
)
return instance.request("post", url, request_id=request_id)
@classmethod
def acancel(
cls,
id,
api_key=None,
api_type=None,
request_id=None,
api_version=None,
**params,
):
instance, url = cls._prepare_cancel(
id,
api_key,
api_type,
request_id,
api_version,
**params,
)
return instance.arequest("post", url, request_id=request_id)
@classmethod
def _prepare_stream_events(
cls,
id,
api_key=None,
api_base=None,
api_type=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
base = cls.class_url()
extn = quote_plus(id)
requestor = api_requestor.APIRequestor(
api_key,
api_base=api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
)
typed_api_type, api_version = cls._get_api_type_and_version(
api_type, api_version
)
if typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):
url = "/%s%s/%s/events?stream=true&api-version=%s" % (
cls.azure_api_prefix,
base,
extn,
api_version,
)
elif typed_api_type == ApiType.OPEN_AI:
url = "%s/%s/events?stream=true" % (base, extn)
else:
raise error.InvalidAPIType("Unsupported API type %s" % api_type)
return requestor, url
@classmethod
def stream_events(
cls,
id,
api_key=None,
api_base=None,
api_type=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
requestor, url = cls._prepare_stream_events(
id,
api_key,
api_base,
api_type,
request_id,
api_version,
organization,
**params,
)
response, _, api_key = requestor.request(
"get", url, params, stream=True, request_id=request_id
)
assert not isinstance(response, ApacAIResponse) # must be an iterator
return (
util.convert_to_apacai_object(
line,
api_key,
api_version,
organization,
)
for line in response
)
@classmethod
async def astream_events(
cls,
id,
api_key=None,
api_base=None,
api_type=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
requestor, url = cls._prepare_stream_events(
id,
api_key,
api_base,
api_type,
request_id,
api_version,
organization,
**params,
)
response, _, api_key = await requestor.arequest(
"get", url, params, stream=True, request_id=request_id
)
assert not isinstance(response, ApacAIResponse) # must be an iterator
return (
util.convert_to_apacai_object(
line,
api_key,
api_version,
organization,
)
async for line in response
)
| APACAI-API-main | apacai/api_resources/fine_tune.py |
import base64
import time
from apacai import util
from apacai.api_resources.abstract.engine_api_resource import EngineAPIResource
from apacai.datalib.numpy_helper import assert_has_numpy
from apacai.datalib.numpy_helper import numpy as np
from apacai.error import TryAgain
class Embedding(EngineAPIResource):
OBJECT_NAME = "embeddings"
@classmethod
def create(cls, *args, **kwargs):
"""
Creates a new embedding for the provided input and parameters.
See https://platform.apacai.com/docs/api-reference/embeddings for a list
of valid parameters.
"""
start = time.time()
timeout = kwargs.pop("timeout", None)
user_provided_encoding_format = kwargs.get("encoding_format", None)
# If encoding format was not explicitly specified, we opaquely use base64 for performance
if not user_provided_encoding_format:
kwargs["encoding_format"] = "base64"
while True:
try:
response = super().create(*args, **kwargs)
# If a user specifies base64, we'll just return the encoded string.
# This is only for the default case.
if not user_provided_encoding_format:
for data in response.data:
# If an engine isn't using this optimization, don't do anything
if type(data["embedding"]) == str:
assert_has_numpy()
data["embedding"] = np.frombuffer(
base64.b64decode(data["embedding"]), dtype="float32"
).tolist()
return response
except TryAgain as e:
if timeout is not None and time.time() > start + timeout:
raise
util.log_info("Waiting for model to warm up", error=e)
@classmethod
async def acreate(cls, *args, **kwargs):
"""
Creates a new embedding for the provided input and parameters.
See https://platform.apacai.com/docs/api-reference/embeddings for a list
of valid parameters.
"""
start = time.time()
timeout = kwargs.pop("timeout", None)
user_provided_encoding_format = kwargs.get("encoding_format", None)
# If encoding format was not explicitly specified, we opaquely use base64 for performance
if not user_provided_encoding_format:
kwargs["encoding_format"] = "base64"
while True:
try:
response = await super().acreate(*args, **kwargs)
# If a user specifies base64, we'll just return the encoded string.
# This is only for the default case.
if not user_provided_encoding_format:
for data in response.data:
# If an engine isn't using this optimization, don't do anything
if type(data["embedding"]) == str:
data["embedding"] = np.frombuffer(
base64.b64decode(data["embedding"]), dtype="float32"
).tolist()
return response
except TryAgain as e:
if timeout is not None and time.time() > start + timeout:
raise
util.log_info("Waiting for model to warm up", error=e)
| APACAI-API-main | apacai/api_resources/embedding.py |
from typing import List, Optional, Union
from apacai.apacai_object import ApacAIObject
class Moderation(ApacAIObject):
VALID_MODEL_NAMES: List[str] = ["text-moderation-stable", "text-moderation-latest"]
@classmethod
def get_url(cls):
return "/moderations"
@classmethod
def _prepare_create(cls, input, model, api_key):
if model is not None and model not in cls.VALID_MODEL_NAMES:
raise ValueError(
f"The parameter model should be chosen from {cls.VALID_MODEL_NAMES} "
f"and it is default to be None."
)
instance = cls(api_key=api_key)
params = {"input": input}
if model is not None:
params["model"] = model
return instance, params
@classmethod
def create(
cls,
input: Union[str, List[str]],
model: Optional[str] = None,
api_key: Optional[str] = None,
):
instance, params = cls._prepare_create(input, model, api_key)
return instance.request("post", cls.get_url(), params)
@classmethod
def acreate(
cls,
input: Union[str, List[str]],
model: Optional[str] = None,
api_key: Optional[str] = None,
):
instance, params = cls._prepare_create(input, model, api_key)
return instance.arequest("post", cls.get_url(), params)
| APACAI-API-main | apacai/api_resources/moderation.py |
from apacai.api_resources.audio import Audio # noqa: F401
from apacai.api_resources.chat_completion import ChatCompletion # noqa: F401
from apacai.api_resources.completion import Completion # noqa: F401
from apacai.api_resources.customer import Customer # noqa: F401
from apacai.api_resources.deployment import Deployment # noqa: F401
from apacai.api_resources.edit import Edit # noqa: F401
from apacai.api_resources.embedding import Embedding # noqa: F401
from apacai.api_resources.engine import Engine # noqa: F401
from apacai.api_resources.error_object import ErrorObject # noqa: F401
from apacai.api_resources.file import File # noqa: F401
from apacai.api_resources.fine_tune import FineTune # noqa: F401
from apacai.api_resources.image import Image # noqa: F401
from apacai.api_resources.model import Model # noqa: F401
from apacai.api_resources.moderation import Moderation # noqa: F401
| APACAI-API-main | apacai/api_resources/__init__.py |
import time
from apacai import util, error
from apacai.api_resources.abstract.engine_api_resource import EngineAPIResource
from apacai.error import TryAgain
class Edit(EngineAPIResource):
OBJECT_NAME = "edits"
@classmethod
def create(cls, *args, **kwargs):
"""
Creates a new edit for the provided input, instruction, and parameters.
"""
start = time.time()
timeout = kwargs.pop("timeout", None)
api_type = kwargs.pop("api_type", None)
typed_api_type = cls._get_api_type_and_version(api_type=api_type)[0]
if typed_api_type in (util.ApiType.AZURE, util.ApiType.AZURE_AD):
raise error.InvalidAPIType(
"This operation is not supported by the Azure APACAI API yet."
)
while True:
try:
return super().create(*args, **kwargs)
except TryAgain as e:
if timeout is not None and time.time() > start + timeout:
raise
util.log_info("Waiting for model to warm up", error=e)
@classmethod
async def acreate(cls, *args, **kwargs):
"""
Creates a new edit for the provided input, instruction, and parameters.
"""
start = time.time()
timeout = kwargs.pop("timeout", None)
api_type = kwargs.pop("api_type", None)
typed_api_type = cls._get_api_type_and_version(api_type=api_type)[0]
if typed_api_type in (util.ApiType.AZURE, util.ApiType.AZURE_AD):
raise error.InvalidAPIType(
"This operation is not supported by the Azure APACAI API yet."
)
while True:
try:
return await super().acreate(*args, **kwargs)
except TryAgain as e:
if timeout is not None and time.time() > start + timeout:
raise
util.log_info("Waiting for model to warm up", error=e)
| APACAI-API-main | apacai/api_resources/edit.py |
from apacai.api_resources.abstract import DeletableAPIResource, ListableAPIResource
class Model(ListableAPIResource, DeletableAPIResource):
OBJECT_NAME = "models"
| APACAI-API-main | apacai/api_resources/model.py |
import json
import os
from typing import cast
import apacai
from apacai import api_requestor, util, error
from apacai.api_resources.abstract import DeletableAPIResource, ListableAPIResource
from apacai.util import ApiType
class File(ListableAPIResource, DeletableAPIResource):
OBJECT_NAME = "files"
@classmethod
def __prepare_file_create(
cls,
file,
purpose,
model=None,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
user_provided_filename=None,
):
requestor = api_requestor.APIRequestor(
api_key,
api_base=api_base or apacai.api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
)
typed_api_type, api_version = cls._get_api_type_and_version(
api_type, api_version
)
if typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):
base = cls.class_url()
url = "/%s%s?api-version=%s" % (cls.azure_api_prefix, base, api_version)
elif typed_api_type == ApiType.OPEN_AI:
url = cls.class_url()
else:
raise error.InvalidAPIType("Unsupported API type %s" % api_type)
# Set the filename on 'purpose' and 'model' to None so they are
# interpreted as form data.
files = [("purpose", (None, purpose))]
if model is not None:
files.append(("model", (None, model)))
if user_provided_filename is not None:
files.append(
("file", (user_provided_filename, file, "application/octet-stream"))
)
else:
files.append(("file", ("file", file, "application/octet-stream")))
return requestor, url, files
@classmethod
def create(
cls,
file,
purpose,
model=None,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
user_provided_filename=None,
):
requestor, url, files = cls.__prepare_file_create(
file,
purpose,
model,
api_key,
api_base,
api_type,
api_version,
organization,
user_provided_filename,
)
response, _, api_key = requestor.request("post", url, files=files)
return util.convert_to_apacai_object(
response, api_key, api_version, organization
)
@classmethod
async def acreate(
cls,
file,
purpose,
model=None,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
user_provided_filename=None,
):
requestor, url, files = cls.__prepare_file_create(
file,
purpose,
model,
api_key,
api_base,
api_type,
api_version,
organization,
user_provided_filename,
)
response, _, api_key = await requestor.arequest("post", url, files=files)
return util.convert_to_apacai_object(
response, api_key, api_version, organization
)
@classmethod
def __prepare_file_download(
cls,
id,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
):
requestor = api_requestor.APIRequestor(
api_key,
api_base=api_base or apacai.api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
)
typed_api_type, api_version = cls._get_api_type_and_version(
api_type, api_version
)
if typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):
base = cls.class_url()
url = f"/{cls.azure_api_prefix}{base}/{id}/content?api-version={api_version}"
elif typed_api_type == ApiType.OPEN_AI:
url = f"{cls.class_url()}/{id}/content"
else:
raise error.InvalidAPIType("Unsupported API type %s" % api_type)
return requestor, url
@classmethod
def download(
cls,
id,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
):
requestor, url = cls.__prepare_file_download(
id, api_key, api_base, api_type, api_version, organization
)
result = requestor.request_raw("get", url)
if not 200 <= result.status_code < 300:
raise requestor.handle_error_response(
result.content,
result.status_code,
json.loads(cast(bytes, result.content)),
result.headers,
stream_error=False,
)
return result.content
@classmethod
async def adownload(
cls,
id,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
):
requestor, url = cls.__prepare_file_download(
id, api_key, api_base, api_type, api_version, organization
)
async with api_requestor.aiohttp_session() as session:
result = await requestor.arequest_raw("get", url, session)
if not 200 <= result.status < 300:
raise requestor.handle_error_response(
result.content,
result.status,
json.loads(cast(bytes, result.content)),
result.headers,
stream_error=False,
)
return result.content
@classmethod
def __find_matching_files(cls, name, bytes, all_files, purpose):
matching_files = []
basename = os.path.basename(name)
for f in all_files:
if f["purpose"] != purpose:
continue
file_basename = os.path.basename(f["filename"])
if file_basename != basename:
continue
if "bytes" in f and f["bytes"] != bytes:
continue
if "size" in f and int(f["size"]) != bytes:
continue
matching_files.append(f)
return matching_files
@classmethod
def find_matching_files(
cls,
name,
bytes,
purpose,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
):
"""Find already uploaded files with the same name, size, and purpose."""
all_files = cls.list(
api_key=api_key,
api_base=api_base or apacai.api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
).get("data", [])
return cls.__find_matching_files(name, bytes, all_files, purpose)
@classmethod
async def afind_matching_files(
cls,
name,
bytes,
purpose,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
):
"""Find already uploaded files with the same name, size, and purpose."""
all_files = (
await cls.alist(
api_key=api_key,
api_base=api_base or apacai.api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
)
).get("data", [])
return cls.__find_matching_files(name, bytes, all_files, purpose)
| APACAI-API-main | apacai/api_resources/file.py |
import time
import warnings
from apacai import util
from apacai.api_resources.abstract import ListableAPIResource, UpdateableAPIResource
from apacai.error import TryAgain
class Engine(ListableAPIResource, UpdateableAPIResource):
OBJECT_NAME = "engines"
def generate(self, timeout=None, **params):
start = time.time()
while True:
try:
return self.request(
"post",
self.instance_url() + "/generate",
params,
stream=params.get("stream"),
plain_old_data=True,
)
except TryAgain as e:
if timeout is not None and time.time() > start + timeout:
raise
util.log_info("Waiting for model to warm up", error=e)
async def agenerate(self, timeout=None, **params):
start = time.time()
while True:
try:
return await self.arequest(
"post",
self.instance_url() + "/generate",
params,
stream=params.get("stream"),
plain_old_data=True,
)
except TryAgain as e:
if timeout is not None and time.time() > start + timeout:
raise
util.log_info("Waiting for model to warm up", error=e)
def embeddings(self, **params):
warnings.warn(
"Engine.embeddings is deprecated, use Embedding.create", DeprecationWarning
)
return self.request("post", self.instance_url() + "/embeddings", params)
| APACAI-API-main | apacai/api_resources/engine.py |
from typing import Any, List
import apacai
from apacai import api_requestor, util
from apacai.api_resources.abstract import APIResource
class Audio(APIResource):
OBJECT_NAME = "audio"
@classmethod
def _get_url(cls, action):
return cls.class_url() + f"/{action}"
@classmethod
def _prepare_request(
cls,
file,
filename,
model,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
requestor = api_requestor.APIRequestor(
api_key,
api_base=api_base or apacai.api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
)
files: List[Any] = []
data = {
"model": model,
**params,
}
files.append(("file", (filename, file, "application/octet-stream")))
return requestor, files, data
@classmethod
def transcribe(
cls,
model,
file,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
requestor, files, data = cls._prepare_request(
file=file,
filename=file.name,
model=model,
api_key=api_key,
api_base=api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
**params,
)
url = cls._get_url("transcriptions")
response, _, api_key = requestor.request("post", url, files=files, params=data)
return util.convert_to_apacai_object(
response, api_key, api_version, organization
)
@classmethod
def translate(
cls,
model,
file,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
requestor, files, data = cls._prepare_request(
file=file,
filename=file.name,
model=model,
api_key=api_key,
api_base=api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
**params,
)
url = cls._get_url("translations")
response, _, api_key = requestor.request("post", url, files=files, params=data)
return util.convert_to_apacai_object(
response, api_key, api_version, organization
)
@classmethod
def transcribe_raw(
cls,
model,
file,
filename,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
requestor, files, data = cls._prepare_request(
file=file,
filename=filename,
model=model,
api_key=api_key,
api_base=api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
**params,
)
url = cls._get_url("transcriptions")
response, _, api_key = requestor.request("post", url, files=files, params=data)
return util.convert_to_apacai_object(
response, api_key, api_version, organization
)
@classmethod
def translate_raw(
cls,
model,
file,
filename,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
requestor, files, data = cls._prepare_request(
file=file,
filename=filename,
model=model,
api_key=api_key,
api_base=api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
**params,
)
url = cls._get_url("translations")
response, _, api_key = requestor.request("post", url, files=files, params=data)
return util.convert_to_apacai_object(
response, api_key, api_version, organization
)
@classmethod
async def atranscribe(
cls,
model,
file,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
requestor, files, data = cls._prepare_request(
file=file,
filename=file.name,
model=model,
api_key=api_key,
api_base=api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
**params,
)
url = cls._get_url("transcriptions")
response, _, api_key = await requestor.arequest(
"post", url, files=files, params=data
)
return util.convert_to_apacai_object(
response, api_key, api_version, organization
)
@classmethod
async def atranslate(
cls,
model,
file,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
requestor, files, data = cls._prepare_request(
file=file,
filename=file.name,
model=model,
api_key=api_key,
api_base=api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
**params,
)
url = cls._get_url("translations")
response, _, api_key = await requestor.arequest(
"post", url, files=files, params=data
)
return util.convert_to_apacai_object(
response, api_key, api_version, organization
)
@classmethod
async def atranscribe_raw(
cls,
model,
file,
filename,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
requestor, files, data = cls._prepare_request(
file=file,
filename=filename,
model=model,
api_key=api_key,
api_base=api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
**params,
)
url = cls._get_url("transcriptions")
response, _, api_key = await requestor.arequest(
"post", url, files=files, params=data
)
return util.convert_to_apacai_object(
response, api_key, api_version, organization
)
@classmethod
async def atranslate_raw(
cls,
model,
file,
filename,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
requestor, files, data = cls._prepare_request(
file=file,
filename=filename,
model=model,
api_key=api_key,
api_base=api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
**params,
)
url = cls._get_url("translations")
response, _, api_key = await requestor.arequest(
"post", url, files=files, params=data
)
return util.convert_to_apacai_object(
response, api_key, api_version, organization
)
| APACAI-API-main | apacai/api_resources/audio.py |
from apacai.apacai_object import ApacAIObject
class Customer(ApacAIObject):
@classmethod
def get_url(cls, customer, endpoint):
return f"/customer/{customer}/{endpoint}"
@classmethod
def create(cls, customer, endpoint, **params):
instance = cls()
return instance.request("post", cls.get_url(customer, endpoint), params)
@classmethod
def acreate(cls, customer, endpoint, **params):
instance = cls()
return instance.arequest("post", cls.get_url(customer, endpoint), params)
| APACAI-API-main | apacai/api_resources/customer.py |
# WARNING: This interface is considered experimental and may changed in the future without warning.
from typing import Any, List
import apacai
from apacai import api_requestor, error, util
from apacai.api_resources.abstract import APIResource
class Image(APIResource):
OBJECT_NAME = "images"
@classmethod
def _get_url(cls, action, azure_action, api_type, api_version):
if api_type in (util.ApiType.AZURE, util.ApiType.AZURE_AD) and azure_action is not None:
return f"/{cls.azure_api_prefix}{cls.class_url()}/{action}:{azure_action}?api-version={api_version}"
else:
return f"{cls.class_url()}/{action}"
@classmethod
def create(
cls,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
requestor = api_requestor.APIRequestor(
api_key,
api_base=api_base or apacai.api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
)
api_type, api_version = cls._get_api_type_and_version(api_type, api_version)
response, _, api_key = requestor.request(
"post", cls._get_url("generations", azure_action="submit", api_type=api_type, api_version=api_version), params
)
if api_type in (util.ApiType.AZURE, util.ApiType.AZURE_AD):
requestor.api_base = "" # operation_location is a full url
response, _, api_key = requestor._poll(
"get", response.operation_location,
until=lambda response: response.data['status'] in [ 'succeeded' ],
failed=lambda response: response.data['status'] in [ 'failed' ]
)
return util.convert_to_apacai_object(
response, api_key, api_version, organization
)
@classmethod
async def acreate(
cls,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
requestor = api_requestor.APIRequestor(
api_key,
api_base=api_base or apacai.api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
)
api_type, api_version = cls._get_api_type_and_version(api_type, api_version)
response, _, api_key = await requestor.arequest(
"post", cls._get_url("generations", azure_action="submit", api_type=api_type, api_version=api_version), params
)
if api_type in (util.ApiType.AZURE, util.ApiType.AZURE_AD):
requestor.api_base = "" # operation_location is a full url
response, _, api_key = await requestor._apoll(
"get", response.operation_location,
until=lambda response: response.data['status'] in [ 'succeeded' ],
failed=lambda response: response.data['status'] in [ 'failed' ]
)
return util.convert_to_apacai_object(
response, api_key, api_version, organization
)
@classmethod
def _prepare_create_variation(
cls,
image,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
requestor = api_requestor.APIRequestor(
api_key,
api_base=api_base or apacai.api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
)
api_type, api_version = cls._get_api_type_and_version(api_type, api_version)
url = cls._get_url("variations", azure_action=None, api_type=api_type, api_version=api_version)
files: List[Any] = []
for key, value in params.items():
files.append((key, (None, value)))
files.append(("image", ("image", image, "application/octet-stream")))
return requestor, url, files
@classmethod
def create_variation(
cls,
image,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
if api_type in (util.ApiType.AZURE, util.ApiType.AZURE_AD):
raise error.InvalidAPIType("Variations are not supported by the Azure APACAI API yet.")
requestor, url, files = cls._prepare_create_variation(
image,
api_key,
api_base,
api_type,
api_version,
organization,
**params,
)
response, _, api_key = requestor.request("post", url, files=files)
return util.convert_to_apacai_object(
response, api_key, api_version, organization
)
@classmethod
async def acreate_variation(
cls,
image,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
if api_type in (util.ApiType.AZURE, util.ApiType.AZURE_AD):
raise error.InvalidAPIType("Variations are not supported by the Azure APACAI API yet.")
requestor, url, files = cls._prepare_create_variation(
image,
api_key,
api_base,
api_type,
api_version,
organization,
**params,
)
response, _, api_key = await requestor.arequest("post", url, files=files)
return util.convert_to_apacai_object(
response, api_key, api_version, organization
)
@classmethod
def _prepare_create_edit(
cls,
image,
mask=None,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
requestor = api_requestor.APIRequestor(
api_key,
api_base=api_base or apacai.api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
)
api_type, api_version = cls._get_api_type_and_version(api_type, api_version)
url = cls._get_url("edits", azure_action=None, api_type=api_type, api_version=api_version)
files: List[Any] = []
for key, value in params.items():
files.append((key, (None, value)))
files.append(("image", ("image", image, "application/octet-stream")))
if mask is not None:
files.append(("mask", ("mask", mask, "application/octet-stream")))
return requestor, url, files
@classmethod
def create_edit(
cls,
image,
mask=None,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
if api_type in (util.ApiType.AZURE, util.ApiType.AZURE_AD):
raise error.InvalidAPIType("Edits are not supported by the Azure APACAI API yet.")
requestor, url, files = cls._prepare_create_edit(
image,
mask,
api_key,
api_base,
api_type,
api_version,
organization,
**params,
)
response, _, api_key = requestor.request("post", url, files=files)
return util.convert_to_apacai_object(
response, api_key, api_version, organization
)
@classmethod
async def acreate_edit(
cls,
image,
mask=None,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
if api_type in (util.ApiType.AZURE, util.ApiType.AZURE_AD):
raise error.InvalidAPIType("Edits are not supported by the Azure APACAI API yet.")
requestor, url, files = cls._prepare_create_edit(
image,
mask,
api_key,
api_base,
api_type,
api_version,
organization,
**params,
)
response, _, api_key = await requestor.arequest("post", url, files=files)
return util.convert_to_apacai_object(
response, api_key, api_version, organization
)
| APACAI-API-main | apacai/api_resources/image.py |
from apacai import api_requestor, util, error
from apacai.api_resources.abstract.api_resource import APIResource
from apacai.util import ApiType
class CreateableAPIResource(APIResource):
plain_old_data = False
@classmethod
def __prepare_create_requestor(
cls,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
):
requestor = api_requestor.APIRequestor(
api_key,
api_base=api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
)
typed_api_type, api_version = cls._get_api_type_and_version(
api_type, api_version
)
if typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):
base = cls.class_url()
url = "/%s%s?api-version=%s" % (cls.azure_api_prefix, base, api_version)
elif typed_api_type == ApiType.OPEN_AI:
url = cls.class_url()
else:
raise error.InvalidAPIType("Unsupported API type %s" % api_type)
return requestor, url
@classmethod
def create(
cls,
api_key=None,
api_base=None,
api_type=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
requestor, url = cls.__prepare_create_requestor(
api_key,
api_base,
api_type,
api_version,
organization,
)
response, _, api_key = requestor.request(
"post", url, params, request_id=request_id
)
return util.convert_to_apacai_object(
response,
api_key,
api_version,
organization,
plain_old_data=cls.plain_old_data,
)
@classmethod
async def acreate(
cls,
api_key=None,
api_base=None,
api_type=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
requestor, url = cls.__prepare_create_requestor(
api_key,
api_base,
api_type,
api_version,
organization,
)
response, _, api_key = await requestor.arequest(
"post", url, params, request_id=request_id
)
return util.convert_to_apacai_object(
response,
api_key,
api_version,
organization,
plain_old_data=cls.plain_old_data,
)
| APACAI-API-main | apacai/api_resources/abstract/createable_api_resource.py |
from apacai import api_requestor, util, error
from apacai.api_resources.abstract.api_resource import APIResource
from apacai.util import ApiType
class ListableAPIResource(APIResource):
@classmethod
def auto_paging_iter(cls, *args, **params):
return cls.list(*args, **params).auto_paging_iter()
@classmethod
def __prepare_list_requestor(
cls,
api_key=None,
api_version=None,
organization=None,
api_base=None,
api_type=None,
):
requestor = api_requestor.APIRequestor(
api_key,
api_base=api_base or cls.api_base(),
api_version=api_version,
api_type=api_type,
organization=organization,
)
typed_api_type, api_version = cls._get_api_type_and_version(
api_type, api_version
)
if typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):
base = cls.class_url()
url = "/%s%s?api-version=%s" % (cls.azure_api_prefix, base, api_version)
elif typed_api_type == ApiType.OPEN_AI:
url = cls.class_url()
else:
raise error.InvalidAPIType("Unsupported API type %s" % api_type)
return requestor, url
@classmethod
def list(
cls,
api_key=None,
request_id=None,
api_version=None,
organization=None,
api_base=None,
api_type=None,
**params,
):
requestor, url = cls.__prepare_list_requestor(
api_key,
api_version,
organization,
api_base,
api_type,
)
response, _, api_key = requestor.request(
"get", url, params, request_id=request_id
)
apacai_object = util.convert_to_apacai_object(
response, api_key, api_version, organization
)
apacai_object._retrieve_params = params
return apacai_object
@classmethod
async def alist(
cls,
api_key=None,
request_id=None,
api_version=None,
organization=None,
api_base=None,
api_type=None,
**params,
):
requestor, url = cls.__prepare_list_requestor(
api_key,
api_version,
organization,
api_base,
api_type,
)
response, _, api_key = await requestor.arequest(
"get", url, params, request_id=request_id
)
apacai_object = util.convert_to_apacai_object(
response, api_key, api_version, organization
)
apacai_object._retrieve_params = params
return apacai_object
| APACAI-API-main | apacai/api_resources/abstract/listable_api_resource.py |
# flake8: noqa
from apacai.api_resources.abstract.api_resource import APIResource
from apacai.api_resources.abstract.createable_api_resource import CreateableAPIResource
from apacai.api_resources.abstract.deletable_api_resource import DeletableAPIResource
from apacai.api_resources.abstract.listable_api_resource import ListableAPIResource
from apacai.api_resources.abstract.nested_resource_class_methods import (
nested_resource_class_methods,
)
from apacai.api_resources.abstract.updateable_api_resource import UpdateableAPIResource
| APACAI-API-main | apacai/api_resources/abstract/__init__.py |
from urllib.parse import quote_plus
from typing import Awaitable
from apacai.api_resources.abstract.api_resource import APIResource
class UpdateableAPIResource(APIResource):
@classmethod
def modify(cls, sid, **params):
url = "%s/%s" % (cls.class_url(), quote_plus(sid))
return cls._static_request("post", url, **params)
@classmethod
def amodify(cls, sid, **params) -> Awaitable:
url = "%s/%s" % (cls.class_url(), quote_plus(sid))
return cls._astatic_request("patch", url, **params)
| APACAI-API-main | apacai/api_resources/abstract/updateable_api_resource.py |
from urllib.parse import quote_plus
from apacai import api_requestor, util
def _nested_resource_class_methods(
resource,
path=None,
operations=None,
resource_plural=None,
async_=False,
):
if resource_plural is None:
resource_plural = "%ss" % resource
if path is None:
path = resource_plural
if operations is None:
raise ValueError("operations list required")
def wrapper(cls):
def nested_resource_url(cls, id, nested_id=None):
url = "%s/%s/%s" % (cls.class_url(), quote_plus(id), quote_plus(path))
if nested_id is not None:
url += "/%s" % quote_plus(nested_id)
return url
resource_url_method = "%ss_url" % resource
setattr(cls, resource_url_method, classmethod(nested_resource_url))
def nested_resource_request(
cls,
method,
url,
api_key=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
requestor = api_requestor.APIRequestor(
api_key, api_version=api_version, organization=organization
)
response, _, api_key = requestor.request(
method, url, params, request_id=request_id
)
return util.convert_to_apacai_object(
response, api_key, api_version, organization
)
async def anested_resource_request(
cls,
method,
url,
api_key=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
requestor = api_requestor.APIRequestor(
api_key, api_version=api_version, organization=organization
)
response, _, api_key = await requestor.arequest(
method, url, params, request_id=request_id
)
return util.convert_to_apacai_object(
response, api_key, api_version, organization
)
resource_request_method = "%ss_request" % resource
setattr(
cls,
resource_request_method,
classmethod(
anested_resource_request if async_ else nested_resource_request
),
)
for operation in operations:
if operation == "create":
def create_nested_resource(cls, id, **params):
url = getattr(cls, resource_url_method)(id)
return getattr(cls, resource_request_method)("post", url, **params)
create_method = "create_%s" % resource
setattr(cls, create_method, classmethod(create_nested_resource))
elif operation == "retrieve":
def retrieve_nested_resource(cls, id, nested_id, **params):
url = getattr(cls, resource_url_method)(id, nested_id)
return getattr(cls, resource_request_method)("get", url, **params)
retrieve_method = "retrieve_%s" % resource
setattr(cls, retrieve_method, classmethod(retrieve_nested_resource))
elif operation == "update":
def modify_nested_resource(cls, id, nested_id, **params):
url = getattr(cls, resource_url_method)(id, nested_id)
return getattr(cls, resource_request_method)("post", url, **params)
modify_method = "modify_%s" % resource
setattr(cls, modify_method, classmethod(modify_nested_resource))
elif operation == "delete":
def delete_nested_resource(cls, id, nested_id, **params):
url = getattr(cls, resource_url_method)(id, nested_id)
return getattr(cls, resource_request_method)(
"delete", url, **params
)
delete_method = "delete_%s" % resource
setattr(cls, delete_method, classmethod(delete_nested_resource))
elif operation == "list":
def list_nested_resources(cls, id, **params):
url = getattr(cls, resource_url_method)(id)
return getattr(cls, resource_request_method)("get", url, **params)
list_method = "list_%s" % resource_plural
setattr(cls, list_method, classmethod(list_nested_resources))
else:
raise ValueError("Unknown operation: %s" % operation)
return cls
return wrapper
def nested_resource_class_methods(
resource,
path=None,
operations=None,
resource_plural=None,
):
return _nested_resource_class_methods(
resource, path, operations, resource_plural, async_=False
)
def anested_resource_class_methods(
resource,
path=None,
operations=None,
resource_plural=None,
):
return _nested_resource_class_methods(
resource, path, operations, resource_plural, async_=True
)
| APACAI-API-main | apacai/api_resources/abstract/nested_resource_class_methods.py |
from urllib.parse import quote_plus
import apacai
from apacai import api_requestor, error, util
from apacai.apacai_object import ApacAIObject
from apacai.util import ApiType
from typing import Optional
class APIResource(ApacAIObject):
api_prefix = ""
azure_api_prefix = "apacai"
azure_deployments_prefix = "deployments"
@classmethod
def retrieve(
cls, id, api_key=None, request_id=None, request_timeout=None, **params
):
instance = cls(id=id, api_key=api_key, **params)
instance.refresh(request_id=request_id, request_timeout=request_timeout)
return instance
@classmethod
def aretrieve(
cls, id, api_key=None, request_id=None, request_timeout=None, **params
):
instance = cls(id=id, api_key=api_key, **params)
return instance.arefresh(request_id=request_id, request_timeout=request_timeout)
def refresh(self, request_id=None, request_timeout=None):
self.refresh_from(
self.request(
"get",
self.instance_url(),
request_id=request_id,
request_timeout=request_timeout,
)
)
return self
async def arefresh(self, request_id=None, request_timeout=None):
self.refresh_from(
await self.arequest(
"get",
self.instance_url(operation="refresh"),
request_id=request_id,
request_timeout=request_timeout,
)
)
return self
@classmethod
def class_url(cls):
if cls == APIResource:
raise NotImplementedError(
"APIResource is an abstract class. You should perform actions on its subclasses."
)
# Namespaces are separated in object names with periods (.) and in URLs
# with forward slashes (/), so replace the former with the latter.
base = cls.OBJECT_NAME.replace(".", "/") # type: ignore
if cls.api_prefix:
return "/%s/%s" % (cls.api_prefix, base)
return "/%s" % (base)
def instance_url(self, operation=None):
id = self.get("id")
if not isinstance(id, str):
raise error.InvalidRequestError(
"Could not determine which URL to request: %s instance "
"has invalid ID: %r, %s. ID should be of type `str` (or"
" `unicode`)" % (type(self).__name__, id, type(id)),
"id",
)
api_version = self.api_version or apacai.api_version
extn = quote_plus(id)
if self.typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):
if not api_version:
raise error.InvalidRequestError(
"An API version is required for the Azure API type."
)
if not operation:
base = self.class_url()
return "/%s%s/%s?api-version=%s" % (
self.azure_api_prefix,
base,
extn,
api_version,
)
return "/%s/%s/%s/%s?api-version=%s" % (
self.azure_api_prefix,
self.azure_deployments_prefix,
extn,
operation,
api_version,
)
elif self.typed_api_type == ApiType.OPEN_AI:
base = self.class_url()
return "%s/%s" % (base, extn)
else:
raise error.InvalidAPIType("Unsupported API type %s" % self.api_type)
# The `method_` and `url_` arguments are suffixed with an underscore to
# avoid conflicting with actual request parameters in `params`.
@classmethod
def _static_request(
cls,
method_,
url_,
api_key=None,
api_base=None,
api_type=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
requestor = api_requestor.APIRequestor(
api_key,
api_version=api_version,
organization=organization,
api_base=api_base,
api_type=api_type,
)
response, _, api_key = requestor.request(
method_, url_, params, request_id=request_id
)
return util.convert_to_apacai_object(
response, api_key, api_version, organization
)
@classmethod
async def _astatic_request(
cls,
method_,
url_,
api_key=None,
api_base=None,
api_type=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
requestor = api_requestor.APIRequestor(
api_key,
api_version=api_version,
organization=organization,
api_base=api_base,
api_type=api_type,
)
response, _, api_key = await requestor.arequest(
method_, url_, params, request_id=request_id
)
return response
@classmethod
def _get_api_type_and_version(
cls, api_type: Optional[str] = None, api_version: Optional[str] = None
):
typed_api_type = (
ApiType.from_str(api_type)
if api_type
else ApiType.from_str(apacai.api_type)
)
typed_api_version = api_version or apacai.api_version
return (typed_api_type, typed_api_version)
| APACAI-API-main | apacai/api_resources/abstract/api_resource.py |
from urllib.parse import quote_plus
from typing import Awaitable
from apacai import error
from apacai.api_resources.abstract.api_resource import APIResource
from apacai.util import ApiType
class DeletableAPIResource(APIResource):
@classmethod
def __prepare_delete(cls, sid, api_type=None, api_version=None):
if isinstance(cls, APIResource):
raise ValueError(".delete may only be called as a class method now.")
base = cls.class_url()
extn = quote_plus(sid)
typed_api_type, api_version = cls._get_api_type_and_version(
api_type, api_version
)
if typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):
url = "/%s%s/%s?api-version=%s" % (
cls.azure_api_prefix,
base,
extn,
api_version,
)
elif typed_api_type == ApiType.OPEN_AI:
url = "%s/%s" % (base, extn)
else:
raise error.InvalidAPIType("Unsupported API type %s" % api_type)
return url
@classmethod
def delete(cls, sid, api_type=None, api_version=None, **params):
url = cls.__prepare_delete(sid, api_type, api_version)
return cls._static_request(
"delete", url, api_type=api_type, api_version=api_version, **params
)
@classmethod
def adelete(cls, sid, api_type=None, api_version=None, **params) -> Awaitable:
url = cls.__prepare_delete(sid, api_type, api_version)
return cls._astatic_request(
"delete", url, api_type=api_type, api_version=api_version, **params
)
| APACAI-API-main | apacai/api_resources/abstract/deletable_api_resource.py |
import time
from pydoc import apropos
from typing import Optional
from urllib.parse import quote_plus
import apacai
from apacai import api_requestor, error, util
from apacai.api_resources.abstract.api_resource import APIResource
from apacai.apacai_response import ApacAIResponse
from apacai.util import ApiType
MAX_TIMEOUT = 20
class EngineAPIResource(APIResource):
plain_old_data = False
def __init__(self, engine: Optional[str] = None, **kwargs):
super().__init__(engine=engine, **kwargs)
@classmethod
def class_url(
cls,
engine: Optional[str] = None,
api_type: Optional[str] = None,
api_version: Optional[str] = None,
):
# Namespaces are separated in object names with periods (.) and in URLs
# with forward slashes (/), so replace the former with the latter.
base = cls.OBJECT_NAME.replace(".", "/") # type: ignore
typed_api_type, api_version = cls._get_api_type_and_version(
api_type, api_version
)
if typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):
if not api_version:
raise error.InvalidRequestError(
"An API version is required for the Azure API type."
)
if engine is None:
raise error.InvalidRequestError(
"You must provide the deployment name in the 'engine' parameter to access the Azure APACAI service"
)
extn = quote_plus(engine)
return "/%s/%s/%s/%s?api-version=%s" % (
cls.azure_api_prefix,
cls.azure_deployments_prefix,
extn,
base,
api_version,
)
elif typed_api_type == ApiType.OPEN_AI:
if engine is None:
return "/%s" % (base)
extn = quote_plus(engine)
return "/engines/%s/%s" % (extn, base)
else:
raise error.InvalidAPIType("Unsupported API type %s" % api_type)
@classmethod
def __prepare_create_request(
cls,
api_key=None,
api_base=None,
api_type=None,
api_version=None,
organization=None,
**params,
):
deployment_id = params.pop("deployment_id", None)
engine = params.pop("engine", deployment_id)
model = params.get("model", None)
timeout = params.pop("timeout", None)
stream = params.get("stream", False)
headers = params.pop("headers", None)
request_timeout = params.pop("request_timeout", None)
typed_api_type = cls._get_api_type_and_version(api_type=api_type)[0]
if typed_api_type in (util.ApiType.AZURE, util.ApiType.AZURE_AD):
if deployment_id is None and engine is None:
raise error.InvalidRequestError(
"Must provide an 'engine' or 'deployment_id' parameter to create a %s"
% cls,
"engine",
)
else:
if model is None and engine is None:
raise error.InvalidRequestError(
"Must provide an 'engine' or 'model' parameter to create a %s"
% cls,
"engine",
)
if timeout is None:
# No special timeout handling
pass
elif timeout > 0:
# API only supports timeouts up to MAX_TIMEOUT
params["timeout"] = min(timeout, MAX_TIMEOUT)
timeout = (timeout - params["timeout"]) or None
elif timeout == 0:
params["timeout"] = MAX_TIMEOUT
requestor = api_requestor.APIRequestor(
api_key,
api_base=api_base,
api_type=api_type,
api_version=api_version,
organization=organization,
)
url = cls.class_url(engine, api_type, api_version)
return (
deployment_id,
engine,
timeout,
stream,
headers,
request_timeout,
typed_api_type,
requestor,
url,
params,
)
@classmethod
def create(
cls,
api_key=None,
api_base=None,
api_type=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
(
deployment_id,
engine,
timeout,
stream,
headers,
request_timeout,
typed_api_type,
requestor,
url,
params,
) = cls.__prepare_create_request(
api_key, api_base, api_type, api_version, organization, **params
)
response, _, api_key = requestor.request(
"post",
url,
params=params,
headers=headers,
stream=stream,
request_id=request_id,
request_timeout=request_timeout,
)
if stream:
# must be an iterator
assert not isinstance(response, ApacAIResponse)
return (
util.convert_to_apacai_object(
line,
api_key,
api_version,
organization,
engine=engine,
plain_old_data=cls.plain_old_data,
)
for line in response
)
else:
obj = util.convert_to_apacai_object(
response,
api_key,
api_version,
organization,
engine=engine,
plain_old_data=cls.plain_old_data,
)
if timeout is not None:
obj.wait(timeout=timeout or None)
return obj
@classmethod
async def acreate(
cls,
api_key=None,
api_base=None,
api_type=None,
request_id=None,
api_version=None,
organization=None,
**params,
):
(
deployment_id,
engine,
timeout,
stream,
headers,
request_timeout,
typed_api_type,
requestor,
url,
params,
) = cls.__prepare_create_request(
api_key, api_base, api_type, api_version, organization, **params
)
response, _, api_key = await requestor.arequest(
"post",
url,
params=params,
headers=headers,
stream=stream,
request_id=request_id,
request_timeout=request_timeout,
)
if stream:
# must be an iterator
assert not isinstance(response, ApacAIResponse)
return (
util.convert_to_apacai_object(
line,
api_key,
api_version,
organization,
engine=engine,
plain_old_data=cls.plain_old_data,
)
async for line in response
)
else:
obj = util.convert_to_apacai_object(
response,
api_key,
api_version,
organization,
engine=engine,
plain_old_data=cls.plain_old_data,
)
if timeout is not None:
await obj.await_(timeout=timeout or None)
return obj
def instance_url(self):
id = self.get("id")
if not isinstance(id, str):
raise error.InvalidRequestError(
f"Could not determine which URL to request: {type(self).__name__} instance has invalid ID: {id}, {type(id)}. ID should be of type str.",
"id",
)
extn = quote_plus(id)
params_connector = "?"
if self.typed_api_type in (ApiType.AZURE, ApiType.AZURE_AD):
api_version = self.api_version or apacai.api_version
if not api_version:
raise error.InvalidRequestError(
"An API version is required for the Azure API type."
)
base = self.OBJECT_NAME.replace(".", "/")
url = "/%s/%s/%s/%s/%s?api-version=%s" % (
self.azure_api_prefix,
self.azure_deployments_prefix,
self.engine,
base,
extn,
api_version,
)
params_connector = "&"
elif self.typed_api_type == ApiType.OPEN_AI:
base = self.class_url(self.engine, self.api_type, self.api_version)
url = "%s/%s" % (base, extn)
else:
raise error.InvalidAPIType("Unsupported API type %s" % self.api_type)
timeout = self.get("timeout")
if timeout is not None:
timeout = quote_plus(str(timeout))
url += params_connector + "timeout={}".format(timeout)
return url
def wait(self, timeout=None):
start = time.time()
while self.status != "complete":
self.timeout = (
min(timeout + start - time.time(), MAX_TIMEOUT)
if timeout is not None
else MAX_TIMEOUT
)
if self.timeout < 0:
del self.timeout
break
self.refresh()
return self
async def await_(self, timeout=None):
"""Async version of `EngineApiResource.wait`"""
start = time.time()
while self.status != "complete":
self.timeout = (
min(timeout + start - time.time(), MAX_TIMEOUT)
if timeout is not None
else MAX_TIMEOUT
)
if self.timeout < 0:
del self.timeout
break
await self.arefresh()
return self
| APACAI-API-main | apacai/api_resources/abstract/engine_api_resource.py |
from apacai.api_resources.abstract import (
CreateableAPIResource,
DeletableAPIResource,
ListableAPIResource,
)
class CompletionConfig(
CreateableAPIResource, ListableAPIResource, DeletableAPIResource
):
OBJECT_NAME = "experimental.completion_configs"
| APACAI-API-main | apacai/api_resources/experimental/completion_config.py |
from apacai.api_resources.experimental.completion_config import ( # noqa: F401
CompletionConfig,
)
| APACAI-API-main | apacai/api_resources/experimental/__init__.py |
import os
from setuptools import setup
if os.getenv("APACAI_UPLOAD") != "y":
raise RuntimeError(
"This package is a placeholder package on the public PyPI instance, and is not the correct version to install. If you are having trouble figuring out the correct package to install, please contact us."
)
setup(name="apacai", description="Placeholder package", version="0.0.1")
| APACAI-API-main | public/setup.py |
# This code example has moved. You can now find it in the [APACAI Cookbook](https://github.com/apacai/apacai-cookbook)
# at [examples/Backtranslation_of_SQL_queries](https://github.com/apacai/apacai-cookbook/blob/main/examples/Backtranslation_of_SQL_queries.py)
| APACAI-API-main | examples/codex/backtranslation.py |
# This code example has moved. You can now find it in the [APACAI Cookbook](https://github.com/apacai/apacai-cookbook)
# at [examples/fine-tuned_qa](https://github.com/apacai/apacai-cookbook/tree/main/examples/fine-tuned_qa)
| APACAI-API-main | examples/finetuning/answers_with_ft.py |
import dotenv
import hydra
from omegaconf import DictConfig
# load environment variables from `.env` file if it exists
# recursively searches for `.env` in all folders starting from work dir
dotenv.load_dotenv(override=True)
@hydra.main(config_path="configs/", config_name="test.yaml")
def main(config: DictConfig):
# Imports can be nested inside @hydra.main to optimize tab completion
# https://github.com/facebookresearch/hydra/issues/934
from src import utils
from src.testing_pipeline import test
# Applies optional utilities
utils.extras(config)
# Evaluate model
return test(config)
if __name__ == "__main__":
main() | multimodal-self-distillation-main | test.py |
import dotenv
import hydra
from omegaconf import DictConfig
# load environment variables from `.env` file if it exists
# recursively searches for `.env` in all folders starting from work dir
dotenv.load_dotenv(override=True)
@hydra.main(config_path="configs/", config_name="train.yaml", version_base='1.1')
def main(config: DictConfig):
# Imports can be nested inside @hydra.main to optimize tab completion
# https://github.com/facebookresearch/hydra/issues/934
from src import utils
from src.training_pipeline import train
# Applies optional utilities
utils.extras(config)
# Train model
return train(config)
if __name__ == "__main__":
main() | multimodal-self-distillation-main | train.py |
multimodal-self-distillation-main | tests/__init__.py |
|
import sys
import os
import io
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
import pytest
import torch
import hydra
from typing import List
from src.models.module import LatentPredictionPretraining
from src.models.components.perceiver import PerceiverModel
from src.models.components.ema import EMA
from src.models.components.outputs import ModelOutput, ForwardPassOutput
from src.models.components.masking import mask_hidden_states
from tests.helpers import get_input_features
def test_model_instantiation():
"""
Test that the model can be instantiated.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='flat_perceiver')
model = hydra.utils.instantiate(cfg.model)
assert isinstance(model, PerceiverModel)
def test_ema_instatiation():
"""
Test that the model can instantiate an EMA object.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='flat_perceiver')
model = hydra.utils.instantiate(cfg.model)
ema = EMA(model)
assert isinstance(ema, EMA)
def test_lightning_module_instantiation():
"""
Test that the model can instantiate a LightningModule object.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='lit_module')
module = hydra.utils.instantiate(cfg)
assert isinstance(module, LatentPredictionPretraining)
def test_text_throughput():
"""
Test that the model can process text.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='flat_perceiver')
model = hydra.utils.instantiate(cfg.model)
tokens, _, _, _, token_batch, _, _, _ = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
num_latents = cfg.model.num_latents
d_latents = cfg.model.d_latents
num_layers = cfg.model.num_self_attends_per_block
inputs = dict(text=tokens)
inputs_batch = dict(text=token_batch)
outputs = model(inputs)
outputs_batch = model(inputs_batch)
assert isinstance(outputs, ModelOutput)
assert isinstance(outputs.last_hidden_state, torch.Tensor)
assert isinstance(outputs.hidden_states[-1], torch.Tensor)
assert isinstance(outputs.attentions[-1], torch.Tensor)
assert isinstance(outputs.cross_attentions[0], torch.Tensor)
assert outputs.last_hidden_state.size() == (1, num_latents, d_latents)
assert len(outputs.hidden_states) == num_layers + 1
assert isinstance(outputs_batch, ModelOutput)
assert isinstance(outputs_batch.last_hidden_state, torch.Tensor)
assert isinstance(outputs_batch.hidden_states[-1], torch.Tensor)
assert isinstance(outputs_batch.attentions[-1], torch.Tensor)
assert isinstance(outputs_batch.cross_attentions[0], torch.Tensor)
assert outputs_batch.last_hidden_state.size() == (32, num_latents, d_latents)
assert len(outputs_batch.hidden_states) == num_layers + 1
def test_audio_throughput():
"""
Test that the model can process audio.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='flat_perceiver')
model = hydra.utils.instantiate(cfg.model)
_, _, audio_features, _, _, _, audio_batch, _ = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
num_latents = cfg.model.num_latents
d_latents = cfg.model.d_latents
num_layers = cfg.model.num_self_attends_per_block
inputs = dict(audio=audio_features)
inputs_batch = dict(audio=audio_batch)
outputs = model(inputs)
outputs_batch = model(inputs_batch)
assert isinstance(outputs, ModelOutput)
assert isinstance(outputs.last_hidden_state, torch.Tensor)
assert isinstance(outputs.hidden_states[-1], torch.Tensor)
assert isinstance(outputs.attentions[-1], torch.Tensor)
assert isinstance(outputs.cross_attentions[0], torch.Tensor)
assert outputs.last_hidden_state.size() == (1, num_latents, d_latents)
assert len(outputs.hidden_states) == num_layers + 1
assert isinstance(outputs_batch, ModelOutput)
assert isinstance(outputs_batch.last_hidden_state, torch.Tensor)
assert isinstance(outputs_batch.hidden_states[-1], torch.Tensor)
assert isinstance(outputs_batch.attentions[-1], torch.Tensor)
assert isinstance(outputs_batch.cross_attentions[0], torch.Tensor)
assert outputs_batch.last_hidden_state.size() == (32, num_latents, d_latents)
assert len(outputs_batch.hidden_states) == num_layers + 1
def test_image_throughput():
"""
Test that the model can process images.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='flat_perceiver')
model = hydra.utils.instantiate(cfg.model)
_, image_features, _, _, _, image_batch, _, _ = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
num_latents = cfg.model.num_latents
d_latents = cfg.model.d_latents
num_layers = cfg.model.num_self_attends_per_block
inputs = dict(image=image_features)
inputs_batch = dict(image=image_batch)
outputs = model(inputs)
outputs_batch = model(inputs_batch)
assert isinstance(outputs, ModelOutput)
assert isinstance(outputs.last_hidden_state, torch.Tensor)
assert isinstance(outputs.hidden_states[-1], torch.Tensor)
assert isinstance(outputs.attentions[-1], torch.Tensor)
assert isinstance(outputs.cross_attentions[0], torch.Tensor)
assert outputs.last_hidden_state.size() == (1, num_latents, d_latents)
assert len(outputs.hidden_states) == num_layers + 1
assert isinstance(outputs_batch, ModelOutput)
assert isinstance(outputs_batch.last_hidden_state, torch.Tensor)
assert isinstance(outputs_batch.hidden_states[-1], torch.Tensor)
assert isinstance(outputs_batch.attentions[-1], torch.Tensor)
assert isinstance(outputs_batch.cross_attentions[0], torch.Tensor)
assert outputs_batch.last_hidden_state.size() == (32, num_latents, d_latents)
assert len(outputs_batch.hidden_states) == num_layers + 1
def test_video_throughput():
"""
Test that the model can process video.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='flat_perceiver')
model = hydra.utils.instantiate(cfg.model)
_, _, _, video_features, _, _, _, video_batch = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
num_latents = cfg.model.num_latents
d_latents = cfg.model.d_latents
num_layers = cfg.model.num_self_attends_per_block
inputs = dict(image=video_features)
inputs_batch = dict(image=video_batch)
outputs = model(inputs)
outputs_batch = model(inputs_batch)
assert isinstance(outputs, ModelOutput)
assert isinstance(outputs.last_hidden_state, torch.Tensor)
assert isinstance(outputs.hidden_states[-1], torch.Tensor)
assert isinstance(outputs.attentions[-1], torch.Tensor)
assert isinstance(outputs.cross_attentions[0], torch.Tensor)
assert outputs.last_hidden_state.size() == (1, num_latents, d_latents)
assert len(outputs.hidden_states) == num_layers + 1
assert isinstance(outputs_batch, ModelOutput)
assert isinstance(outputs_batch.last_hidden_state, torch.Tensor)
assert isinstance(outputs_batch.hidden_states[-1], torch.Tensor)
assert isinstance(outputs_batch.attentions[-1], torch.Tensor)
assert isinstance(outputs_batch.cross_attentions[0], torch.Tensor)
assert outputs_batch.last_hidden_state.size() == (32, num_latents, d_latents)
assert len(outputs_batch.hidden_states) == num_layers + 1
def test_image_text_throughput():
"""
Test that the model can process image-text pairs.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='flat_perceiver')
model = hydra.utils.instantiate(cfg.model)
_, _, audio_features, _, _, _, audio_batch, _ = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
num_latents = cfg.model.num_latents
d_latents = cfg.model.d_latents
num_layers = cfg.model.num_self_attends_per_block
inputs = dict(audio=audio_features)
inputs_batch = dict(audio=audio_batch)
outputs = model(inputs)
outputs_batch = model(inputs_batch)
assert isinstance(outputs, ModelOutput)
assert isinstance(outputs.last_hidden_state, torch.Tensor)
assert isinstance(outputs.hidden_states[-1], torch.Tensor)
assert isinstance(outputs.attentions[-1], torch.Tensor)
assert isinstance(outputs.cross_attentions[0], torch.Tensor)
assert outputs.last_hidden_state.size() == (1, num_latents, d_latents)
assert len(outputs.hidden_states) == num_layers + 1
assert isinstance(outputs_batch, ModelOutput)
assert isinstance(outputs_batch.last_hidden_state, torch.Tensor)
assert isinstance(outputs_batch.hidden_states[-1], torch.Tensor)
assert isinstance(outputs_batch.attentions[-1], torch.Tensor)
assert isinstance(outputs_batch.cross_attentions[0], torch.Tensor)
assert outputs_batch.last_hidden_state.size() == (32, num_latents, d_latents)
assert len(outputs_batch.hidden_states) == num_layers + 1
def test_image_audio_throughput():
"""
Test that the model can process audio-text pairs.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='flat_perceiver')
model = hydra.utils.instantiate(cfg.model)
_, _, audio_features, _, _, _, audio_batch, _ = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
num_latents = cfg.model.num_latents
d_latents = cfg.model.d_latents
num_layers = cfg.model.num_self_attends_per_block
inputs = dict(audio=audio_features)
inputs_batch = dict(audio=audio_batch)
outputs = model(inputs)
outputs_batch = model(inputs_batch)
assert isinstance(outputs, ModelOutput)
assert isinstance(outputs.last_hidden_state, torch.Tensor)
assert isinstance(outputs.hidden_states[-1], torch.Tensor)
assert isinstance(outputs.attentions[-1], torch.Tensor)
assert isinstance(outputs.cross_attentions[0], torch.Tensor)
assert outputs.last_hidden_state.size() == (1, num_latents, d_latents)
assert len(outputs.hidden_states) == num_layers + 1
assert isinstance(outputs_batch, ModelOutput)
assert isinstance(outputs_batch.last_hidden_state, torch.Tensor)
assert isinstance(outputs_batch.hidden_states[-1], torch.Tensor)
assert isinstance(outputs_batch.attentions[-1], torch.Tensor)
assert isinstance(outputs_batch.cross_attentions[0], torch.Tensor)
assert outputs_batch.last_hidden_state.size() == (32, num_latents, d_latents)
assert len(outputs_batch.hidden_states) == num_layers + 1
def test_audio_text_throughput():
"""
Test that the model can process audio-text pairs.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='flat_perceiver')
model = hydra.utils.instantiate(cfg.model)
_, _, audio_features, _, _, _, audio_batch, _ = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
num_latents = cfg.model.num_latents
d_latents = cfg.model.d_latents
num_layers = cfg.model.num_self_attends_per_block
inputs = dict(audio=audio_features)
inputs_batch = dict(audio=audio_batch)
outputs = model(inputs)
outputs_batch = model(inputs_batch)
assert isinstance(outputs, ModelOutput)
assert isinstance(outputs.last_hidden_state, torch.Tensor)
assert isinstance(outputs.hidden_states[-1], torch.Tensor)
assert isinstance(outputs.attentions[-1], torch.Tensor)
assert isinstance(outputs.cross_attentions[0], torch.Tensor)
assert outputs.last_hidden_state.size() == (1, num_latents, d_latents)
assert len(outputs.hidden_states) == num_layers + 1
assert isinstance(outputs_batch, ModelOutput)
assert isinstance(outputs_batch.last_hidden_state, torch.Tensor)
assert isinstance(outputs_batch.hidden_states[-1], torch.Tensor)
assert isinstance(outputs_batch.attentions[-1], torch.Tensor)
assert isinstance(outputs_batch.cross_attentions[0], torch.Tensor)
assert outputs_batch.last_hidden_state.size() == (32, num_latents, d_latents)
assert len(outputs_batch.hidden_states) == num_layers + 1
def test_video_audio_throughput():
"""
Test that the model can process video.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='flat_perceiver')
model = hydra.utils.instantiate(cfg.model)
_, _, _, video_features, _, _, _, video_batch = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
num_latents = cfg.model.num_latents
d_latents = cfg.model.d_latents
num_layers = cfg.model.num_self_attends_per_block
inputs = dict(image=video_features)
inputs_batch = dict(image=video_batch)
outputs = model(inputs)
outputs_batch = model(inputs_batch)
assert isinstance(outputs, ModelOutput)
assert isinstance(outputs.last_hidden_state, torch.Tensor)
assert isinstance(outputs.hidden_states[-1], torch.Tensor)
assert isinstance(outputs.attentions[-1], torch.Tensor)
assert isinstance(outputs.cross_attentions[0], torch.Tensor)
assert outputs.last_hidden_state.size() == (1, num_latents, d_latents)
assert len(outputs.hidden_states) == num_layers + 1
assert isinstance(outputs_batch, ModelOutput)
assert isinstance(outputs_batch.last_hidden_state, torch.Tensor)
assert isinstance(outputs_batch.hidden_states[-1], torch.Tensor)
assert isinstance(outputs_batch.attentions[-1], torch.Tensor)
assert isinstance(outputs_batch.cross_attentions[0], torch.Tensor)
assert outputs_batch.last_hidden_state.size() == (32, num_latents, d_latents)
assert len(outputs_batch.hidden_states) == num_layers + 1
def test_video_text_thoughput():
"""
Test that the model can process multimodal data.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='flat_perceiver')
model = hydra.utils.instantiate(cfg.model)
tokens, _, audio_features, video_features, token_batch, _, audio_batch, video_batch = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
num_latents = cfg.model.num_latents
d_latents = cfg.model.d_latents
num_layers = cfg.model.num_self_attends_per_block
inputs = dict(text=tokens, image=video_features, audio=audio_features)
inputs_batch = dict(text=token_batch, image=video_batch, audio=audio_batch)
outputs = model(inputs)
outputs_batch = model(inputs_batch)
assert isinstance(outputs, ModelOutput)
assert isinstance(outputs.last_hidden_state, torch.Tensor)
assert isinstance(outputs.hidden_states[-1], torch.Tensor)
assert isinstance(outputs.attentions[-1], torch.Tensor)
assert isinstance(outputs.cross_attentions[0], torch.Tensor)
assert outputs.last_hidden_state.size() == (1, num_latents, d_latents)
assert len(outputs.hidden_states) == num_layers + 1
assert isinstance(outputs_batch, ModelOutput)
assert isinstance(outputs_batch.last_hidden_state, torch.Tensor)
assert isinstance(outputs_batch.hidden_states[-1], torch.Tensor)
assert isinstance(outputs_batch.attentions[-1], torch.Tensor)
assert isinstance(outputs_batch.cross_attentions[0], torch.Tensor)
assert outputs_batch.last_hidden_state.size() == (32, num_latents, d_latents)
assert len(outputs_batch.hidden_states) == num_layers + 1
def test_video_audio_text_thoughput():
"""
Test that the model can process multimodal data.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='flat_perceiver')
model = hydra.utils.instantiate(cfg.model)
tokens, _, audio_features, video_features, token_batch, _, audio_batch, video_batch = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
num_latents = cfg.model.num_latents
d_latents = cfg.model.d_latents
num_layers = cfg.model.num_self_attends_per_block
inputs = dict(text=tokens, image=video_features, audio=audio_features)
inputs_batch = dict(text=token_batch, image=video_batch, audio=audio_batch)
outputs = model(inputs)
outputs_batch = model(inputs_batch)
assert isinstance(outputs, ModelOutput)
assert isinstance(outputs.last_hidden_state, torch.Tensor)
assert isinstance(outputs.hidden_states[-1], torch.Tensor)
assert isinstance(outputs.attentions[-1], torch.Tensor)
assert isinstance(outputs.cross_attentions[0], torch.Tensor)
assert outputs.last_hidden_state.size() == (1, num_latents, d_latents)
assert len(outputs.hidden_states) == num_layers + 1
assert isinstance(outputs_batch, ModelOutput)
assert isinstance(outputs_batch.last_hidden_state, torch.Tensor)
assert isinstance(outputs_batch.hidden_states[-1], torch.Tensor)
assert isinstance(outputs_batch.attentions[-1], torch.Tensor)
assert isinstance(outputs_batch.cross_attentions[0], torch.Tensor)
assert outputs_batch.last_hidden_state.size() == (32, num_latents, d_latents)
assert len(outputs_batch.hidden_states) == num_layers + 1
def test_pl_module_forward():
"""
Test that the model can process outputs.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='flat_perceiver')
pl_module = hydra.utils.instantiate(cfg)
tokens, _, _, _, token_batch, _, _, _ = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
inputs = dict(text=tokens)
inputs_batch = dict(text=token_batch)
forward_outputs = pl_module.forward(inputs)
forward_outputs_batch = pl_module.forward(inputs_batch)
assert isinstance(forward_outputs, ForwardPassOutput)
assert isinstance(forward_outputs.student_output, ModelOutput)
assert isinstance(forward_outputs.teacher_output, ModelOutput)
assert isinstance(forward_outputs_batch, ForwardPassOutput)
assert isinstance(forward_outputs_batch.student_output, ModelOutput)
assert isinstance(forward_outputs_batch.teacher_output, ModelOutput)
def test_pl_module_step():
"""
Test that the model can process loss functions.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='flat_perceiver')
pl_module = hydra.utils.instantiate(cfg)
tokens, _, _, _, token_batch, _, _, _ = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
inputs = dict(text=tokens)
inputs_batch = dict(text=token_batch)
step_outputs, loss = pl_module.step(inputs)
step_outputs_batch, loss_batch = pl_module.step(inputs_batch)
assert isinstance(step_outputs, ForwardPassOutput)
assert loss.size() == torch.Size([])
assert isinstance(step_outputs_batch, ForwardPassOutput)
assert loss.size() == torch.Size([])
def test_latent_masking():
"""
Test that the model can process latent masks.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='flat_perceiver')
pl_module = hydra.utils.instantiate(cfg)
assert pl_module.student.is_student == True
assert pl_module.teacher.model.is_student == False
tokens, _, _, _, _, _, _, _ = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
inputs = dict(text=tokens)
step_outputs, _ = pl_module.step(inputs)
assert step_outputs.student_output.last_hidden_state.size() == step_outputs.teacher_output.last_hidden_state.size()
assert torch.equal(step_outputs.student_output.last_hidden_state, step_outputs.teacher_output.last_hidden_state) == False
| multimodal-self-distillation-main | tests/unit/test_flat_perceiver.py |
multimodal-self-distillation-main | tests/unit/__init__.py |
|
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
import torch
import hydra
from src.datamodules.msmarco_datamodule import MSMARCOPassageDataModule
from src.datamodules.imagenet_datamodule import ImagenetDataModule
from src.datamodules.wikipedia_datamodule import WikipediaDataModule
from src.datamodules.conceptual_datamodule import ConceptualCaptionsDataModule
from src.datamodules.speechcoco_datamodule import SpeechCOCODataModule
from src.datamodules.librispeech_datamodule import LibriSpeechDataModule
from src.datamodules.tinyimagenet_datamodule import TinyImagenetDataModule
from src.datamodules.cococaptions_datamodule import COCOCaptionsDatamodule
def test_wikipedia():
"""
Test that the model can instantiate a WikipediaDatamodule object.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/datamodule', job_name="test_wikipedia"):
cfg = hydra.compose(config_name='wikipedia')
datamodule = hydra.utils.instantiate(cfg)
assert isinstance(datamodule, WikipediaDataModule)
def test_msmarco():
with hydra.initialize(version_base='1.1', config_path='../../configs/datamodule', job_name="test_msmarco"):
cfg = hydra.compose(config_name='msmarco')
datamodule = hydra.utils.instantiate(cfg)
assert isinstance(datamodule, MSMARCOPassageDataModule)
def test_imagenet():
"""
Test that the model can instantiate an ImageNetDatamodule object.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/datamodule', job_name="test_imagenet"):
cfg = hydra.compose(config_name='imagenet')
datamodule = hydra.utils.instantiate(cfg)
assert isinstance(datamodule, ImagenetDataModule)
def test_tinyimagenet():
"""
Test that the model can instantiate an ImageNetDatamodule object.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/datamodule', job_name="test_tiny_imagenet"):
cfg = hydra.compose(config_name='tinyimagenet')
datamodule = hydra.utils.instantiate(cfg)
assert isinstance(datamodule, TinyImagenetDataModule)
train_batch = next(iter(datamodule.train_dataloader()))
assert train_batch['image'].size() == torch.Size([cfg.batch_size, 3, 64, 64])
assert train_batch['label'].size() == torch.Size([cfg.batch_size])
def test_librispeech():
"""
Test that the model can instantiate a LibrispeechDatamodule object.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/datamodule', job_name="test_librispeech"):
cfg = hydra.compose(config_name='librispeech')
datamodule = hydra.utils.instantiate(cfg)
assert isinstance(datamodule, LibriSpeechDataModule)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_batch = next(iter(datamodule.train_dataloader()))
assert train_batch['audio'].size()[0] == cfg.train_batch_size
assert train_batch['text'].size()[0] == cfg.train_batch_size
def test_coco_captions():
"""
Test that the model can instantiate a COCOCaptionsDatamodule object.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/datamodule', job_name="test_coco_captions"):
cfg = hydra.compose(config_name='cococaptions')
datamodule = hydra.utils.instantiate(cfg)
assert isinstance(datamodule, COCOCaptionsDatamodule)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_batch = next(iter(datamodule.train_dataloader()))
assert train_batch['text'].size()[0] == cfg.train_batch_size
assert train_batch['image'].size()[0] == cfg.train_batch_size
def test_conceptual_captions():
"""
Test that the model can instantiate a ConceptualCaptionsDatamodule object.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/datamodule', job_name="test_conceptual_captions"):
cfg = hydra.compose(config_name='conceptual')
datamodule = hydra.utils.instantiate(cfg)
assert isinstance(datamodule, ConceptualCaptionsDataModule)
datamodule.prepare_data()
datamodule.setup(stage='validate')
train_batch = next(iter(datamodule.train_dataloader()))
assert train_batch['text'].size()[0] == cfg.val_batch_size
assert train_batch['image'].size()[0] == cfg.val_batch_size
test_conceptual_captions()
def test_speechcoco():
"""
Test that the model can instantiate a SpeechCocoDatamodule object.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/datamodule', job_name="test_speechcoco"):
cfg = hydra.compose(config_name='speechcoco')
datamodule = hydra.utils.instantiate(cfg)
assert isinstance(datamodule, SpeechCOCODataModule)
datamodule.prepare_data()
datamodule.setup(stage='fit')
train_batch = next(iter(datamodule.train_dataloader()))
assert train_batch['audio'].size()[0] == cfg.train_batch_size
assert train_batch['image'].size()[0] == cfg.train_batch_size
| multimodal-self-distillation-main | tests/unit/test_datamodules.py |
multimodal-self-distillation-main | tests/unit/test_training.py |
|
import os
import sys
import hydra
import torch
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from src.models.components.preprocessor import PerceiverMultimodalPreprocessor
from tests.helpers import get_input_features
max_padding = 2
def test_preprocessor_instantiation():
"""
Test that the model can instantiate a preprocessor.
"""
with hydra.initialize(version_base='1.2', config_path='../../configs/tests', job_name="test_preprocessor_instantiation"):
cfg = hydra.compose(config_name='flat_perceiver')
preprocessor = hydra.utils.instantiate(cfg.model.input_preprocessor)
assert isinstance(preprocessor, PerceiverMultimodalPreprocessor)
def test_preprocessor_text():
"""
Test all reasonable combinations of inputs to the preprocessor.
"""
with hydra.initialize(version_base='1.2', config_path='../../configs/tests', job_name="test_preprocessor_outputs"):
cfg = hydra.compose(config_name='flat_perceiver')
preprocessor = hydra.utils.instantiate(cfg.model.input_preprocessor)
inputs = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
hidden_size = cfg.model.input_preprocessor.modalities.text.d_model
inputs_txt = dict(text=inputs[0])
outputs_txt = preprocessor(inputs_txt)
assert outputs_txt[0].size()[0] == 1
assert outputs_txt[0].size()[1] == 46
assert outputs_txt[0].size()[2] == hidden_size + max_padding
inputs_txt_batch = dict(text=inputs[4])
outputs_txt_batch = preprocessor(inputs_txt_batch)
assert outputs_txt_batch[0].size()[0] == 32
assert outputs_txt_batch[0].size()[1] == 46
assert outputs_txt_batch[0].size()[2] == hidden_size + max_padding
def test_preprocessor_audio():
"""
Test all reasonable combinations of inputs to the preprocessor.
"""
with hydra.initialize(version_base='1.2', config_path='../../configs/tests', job_name="test_preprocessor_outputs"):
cfg = hydra.compose(config_name='flat_perceiver')
preprocessor = hydra.utils.instantiate(cfg.model.input_preprocessor)
inputs = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
hidden_size = cfg.model.input_preprocessor.modalities.text.d_model
inputs_aud = dict(audio=inputs[2])
outputs_aud = preprocessor(inputs_aud)
assert outputs_aud[0].size()[0] == 1
assert outputs_aud[0].size()[1] == 4038
assert outputs_aud[0].size()[2] == hidden_size + max_padding
inputs_aud_batch = dict(audio=inputs[6])
outputs_aud_batch = preprocessor(inputs_aud_batch)
assert outputs_aud_batch[0].size()[0] == 32
assert outputs_aud_batch[0].size()[1] == 4038
assert outputs_aud_batch[0].size()[2] == hidden_size + max_padding
def test_preprocessor_image():
"""
Test all reasonable combinations of inputs to the preprocessor.
"""
with hydra.initialize(version_base='1.2', config_path='../../configs/model', job_name="test_preprocessor_outputs"):
cfg = hydra.compose(config_name='flat_perceiver')
preprocessor = hydra.utils.instantiate(cfg.model.input_preprocessor)
inputs = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
hidden_size = cfg.model.input_preprocessor.modalities.text.d_model
inputs_img = dict(image=inputs[1])
outputs_img = preprocessor(inputs_img)
assert outputs_img[0].size()[0] == 1
assert outputs_img[0].size()[1] == 50176
assert outputs_img[0].size()[2] == hidden_size + max_padding
inputs_img_batch = dict(image=inputs[5])
outputs_img_batch = preprocessor(inputs_img_batch)
assert outputs_img_batch[0].size()[0] == 32
assert outputs_img_batch[0].size()[1] == 50176
assert outputs_img_batch[0].size()[2] == hidden_size + max_padding
def test_preprocessor_video():
"""
Test all reasonable combinations of inputs to the preprocessor.
"""
with hydra.initialize(version_base='1.2', config_path='../../configs/tests', job_name="test_preprocessor_outputs"):
cfg = hydra.compose(config_name='flat_perceiver')
preprocessor = hydra.utils.instantiate(cfg.model.input_preprocessor)
inputs = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
hidden_size = cfg.model.input_preprocessor.modalities.text.d_model
inputs_vid = dict(video=inputs[3])
outputs_vid = preprocessor(inputs_vid) #! length of 800k - which is 16x the length of the image output length - still too long!!!
inputs_vid_batch = dict(video=inputs[7])
# outputs_vid_batch = preprocessor(inputs_vid_batch) #! tensor way too big, crashes RAM
def test_preprocessor_image_text():
"""
Test all reasonable combinations of inputs to the preprocessor.
"""
with hydra.initialize(version_base='1.2', config_path='../../configs/tests', job_name="test_preprocessor_outputs"):
cfg = hydra.compose(config_name='flat_perceiver')
preprocessor = hydra.utils.instantiate(cfg.model.input_preprocessor)
inputs = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
hidden_size = cfg.model.input_preprocessor.modalities.text.d_model
inputs_txt_img = dict(text=inputs[0], image=inputs[1])
outputs_txt_img = preprocessor(inputs_txt_img)
inputs_txt_img_batch = dict(text=inputs[4], image=inputs[5])
outputs_txt_img_batch = preprocessor(inputs_txt_img_batch)
def test_preprocessor_image_audio():
"""
Test all reasonable combinations of inputs to the preprocessor.
"""
with hydra.initialize(version_base='1.2', config_path='../../configs/tests', job_name="test_preprocessor_outputs"):
cfg = hydra.compose(config_name='flat_perceiver')
preprocessor = hydra.utils.instantiate(cfg.model.input_preprocessor)
inputs = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
hidden_size = cfg.model.input_preprocessor.modalities.text.d_model
inputs_img_aud = dict(image=inputs[1], audio=inputs[2])
outputs_img_aud = preprocessor(inputs_img_aud)
inputs_img_aud_batch = dict(image=inputs[5], audio=inputs[6])
outputs_img_aud_batch = preprocessor(inputs_img_aud_batch)
def test_preprocessor_audio_text():
"""
Test all reasonable combinations of inputs to the preprocessor.
"""
with hydra.initialize(version_base='1.2', config_path='../../configs/tests', job_name="test_preprocessor_outputs"):
cfg = hydra.compose(config_name='flat_perceiver')
preprocessor = hydra.utils.instantiate(cfg.model.input_preprocessor)
inputs = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
hidden_size = cfg.model.input_preprocessor.modalities.text.d_model
inputs_aud_txt = dict(text=inputs[0], audio=inputs[2])
outputs_aud_txt = preprocessor(inputs_aud_txt)
inputs_aud_txt_batch = dict(text=inputs[4], audio=inputs[6])
outputs_aud_txt_batch = preprocessor(inputs_aud_txt_batch)
def test_preprocessor_video_audio():
"""
Test all reasonable combinations of inputs to the preprocessor.
"""
with hydra.initialize(version_base='1.2', config_path='../../configs/tests', job_name="test_preprocessor_outputs"):
cfg = hydra.compose(config_name='flat_perceiver')
preprocessor = hydra.utils.instantiate(cfg.model.input_preprocessor)
inputs = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
hidden_size = cfg.model.input_preprocessor.modalities.text.d_model
inputs_vid_aud = dict(video=inputs[3], audio=inputs[2])
outputs_vid_aud = preprocessor(inputs_vid_aud)
inputs_vid_aud_batch = dict(video=inputs[7], audio=inputs[6])
# outputs_vid_aud_batch = preprocessor(inputs_vid_aud_batch) #! video too large
def test_preprocessor_video_text():
"""
Test all reasonable combinations of inputs to the preprocessor.
"""
with hydra.initialize(version_base='1.2', config_path='../../configs/tests', job_name="test_preprocessor_outputs"):
cfg = hydra.compose(config_name='flat_perceiver')
preprocessor = hydra.utils.instantiate(cfg.model.input_preprocessor)
inputs = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
hidden_size = cfg.model.input_preprocessor.modalities.text.d_model
inputs_vid_text = dict(text=inputs[0], video=inputs[3])
outputs_vid_text = preprocessor(inputs_vid_text)
inputs_vid_text_batch = dict(text=inputs[4], video=inputs[7])
# outputs_vid_aud_batch = preprocessor(inputs_vid_aud_batch) #! video too large
def test_preprocessor_video_audio_text():
"""
Test all reasonable combinations of inputs to the preprocessor.
"""
with hydra.initialize(version_base='1.2', config_path='../../configs/tests', job_name="test_preprocessor_outputs"):
cfg = hydra.compose(config_name='flat_perceiver')
preprocessor = hydra.utils.instantiate(cfg.model.input_preprocessor)
inputs = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
hidden_size = cfg.model.input_preprocessor.modalities.text.d_model
inputs_vid_aud_text = dict(text=inputs[0], video=inputs[3], audio=inputs[2])
outputs_vid_aud_text = preprocessor(inputs_vid_aud_text)
inputs_vid_aud_text_batch = dict(text=inputs[4], video=inputs[7], audio=inputs[6])
# outputs_vid_aud_text_batch = preprocessor(inputs_vid_aud_text_batch) #! video too large
| multimodal-self-distillation-main | tests/unit/test_preprocessor.py |
import sys
import os
import io
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
import pytest
import torch
import hydra
from typing import List
from src.models.module import LatentPredictionPretraining
from src.models.components.perceiver import PerceiverModel
from src.models.components.ema import EMA
from src.models.components.outputs import ModelOutput, ForwardPassOutput
from src.models.components.masking import mask_hidden_states
from tests.helpers import get_input_features
def test_model_instantiation():
"""
Test that the model can be instantiated.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='hierarchical_perceiver')
model = hydra.utils.instantiate(cfg.model)
assert isinstance(model, PerceiverModel)
def test_ema_instatiation():
"""
Test that the model can instantiate an EMA object.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='hierarchical_perceiver')
model = hydra.utils.instantiate(cfg.model)
ema = EMA(model)
assert isinstance(ema, EMA)
def test_lightning_module_instantiation():
"""
Test that the model can instantiate a LightningModule object.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='hierarchical_perceiver')
module = hydra.utils.instantiate(cfg)
assert isinstance(module, LatentPredictionPretraining)
def test_text_throughput():
"""
Test that the model can process text.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='hierarchical_perceiver')
model = hydra.utils.instantiate(cfg.model)
tokens, _, _, _, token_batch, _, _, _ = get_input_features(cfg.model.preprocessor.modalities.audio.samples_per_patch)
num_latents = cfg.model.hip.block_configs[-1].num_latents
d_latents = cfg.model.hip.block_configs[-1].hidden_size
# num_layers = cfg.model.num_self_attends_per_block
inputs = dict(text=tokens)
inputs_batch = dict(text=token_batch)
outputs = model(inputs)
outputs_batch = model(inputs_batch)
assert isinstance(outputs, ModelOutput)
assert isinstance(outputs.last_hidden_state, torch.Tensor)
assert isinstance(outputs.hidden_states[-1], torch.Tensor)
assert isinstance(outputs.attentions[-1], torch.Tensor)
assert isinstance(outputs.cross_attentions[0], torch.Tensor)
assert outputs.last_hidden_state.size() == (1, num_latents, d_latents)
# assert len(outputs.hidden_states) == num_layers + 1
assert isinstance(outputs_batch, ModelOutput)
assert isinstance(outputs_batch.last_hidden_state, torch.Tensor)
assert isinstance(outputs_batch.hidden_states[-1], torch.Tensor)
assert isinstance(outputs_batch.attentions[-1], torch.Tensor)
assert isinstance(outputs_batch.cross_attentions[0], torch.Tensor)
assert outputs_batch.last_hidden_state.size() == (32, num_latents, d_latents)
# assert len(outputs_batch.hidden_states) == num_layers + 1
test_text_throughput()
def test_audio_throughput():
"""
Test that the model can process audio.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='hierarchical_perceiver')
model = hydra.utils.instantiate(cfg.model)
_, _, audio_features, _, _, _, audio_batch, _ = get_input_features(cfg.model.preprocessor.modalities.audio.samples_per_patch)
num_latents = cfg.model.num_latents
d_latents = cfg.model.d_latents
num_layers = cfg.model.num_self_attends_per_block
inputs = dict(audio=audio_features)
inputs_batch = dict(audio=audio_batch)
outputs = model(inputs)
outputs_batch = model(inputs_batch)
assert isinstance(outputs, ModelOutput)
assert isinstance(outputs.last_hidden_state, torch.Tensor)
assert isinstance(outputs.hidden_states[-1], torch.Tensor)
assert isinstance(outputs.attentions[-1], torch.Tensor)
assert isinstance(outputs.cross_attentions[0], torch.Tensor)
assert outputs.last_hidden_state.size() == (1, num_latents, d_latents)
assert len(outputs.hidden_states) == num_layers + 1
assert isinstance(outputs_batch, ModelOutput)
assert isinstance(outputs_batch.last_hidden_state, torch.Tensor)
assert isinstance(outputs_batch.hidden_states[-1], torch.Tensor)
assert isinstance(outputs_batch.attentions[-1], torch.Tensor)
assert isinstance(outputs_batch.cross_attentions[0], torch.Tensor)
assert outputs_batch.last_hidden_state.size() == (32, num_latents, d_latents)
assert len(outputs_batch.hidden_states) == num_layers + 1
def test_image_throughput():
"""
Test that the model can process images.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='hierarchical_perceiver')
model = hydra.utils.instantiate(cfg.model)
_, image_features, _, _, _, image_batch, _, _ = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
num_latents = cfg.model.num_latents
d_latents = cfg.model.d_latents
num_layers = cfg.model.num_self_attends_per_block
inputs = dict(image=image_features)
inputs_batch = dict(image=image_batch)
outputs = model(inputs)
outputs_batch = model(inputs_batch)
assert isinstance(outputs, ModelOutput)
assert isinstance(outputs.last_hidden_state, torch.Tensor)
assert isinstance(outputs.hidden_states[-1], torch.Tensor)
assert isinstance(outputs.attentions[-1], torch.Tensor)
assert isinstance(outputs.cross_attentions[0], torch.Tensor)
assert outputs.last_hidden_state.size() == (1, num_latents, d_latents)
assert len(outputs.hidden_states) == num_layers + 1
assert isinstance(outputs_batch, ModelOutput)
assert isinstance(outputs_batch.last_hidden_state, torch.Tensor)
assert isinstance(outputs_batch.hidden_states[-1], torch.Tensor)
assert isinstance(outputs_batch.attentions[-1], torch.Tensor)
assert isinstance(outputs_batch.cross_attentions[0], torch.Tensor)
assert outputs_batch.last_hidden_state.size() == (32, num_latents, d_latents)
assert len(outputs_batch.hidden_states) == num_layers + 1
def test_video_throughput():
"""
Test that the model can process video.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='hierarchical_perceiver')
model = hydra.utils.instantiate(cfg.model)
_, _, _, video_features, _, _, _, video_batch = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
num_latents = cfg.model.num_latents
d_latents = cfg.model.d_latents
num_layers = cfg.model.num_self_attends_per_block
inputs = dict(image=video_features)
inputs_batch = dict(image=video_batch)
outputs = model(inputs)
outputs_batch = model(inputs_batch)
assert isinstance(outputs, ModelOutput)
assert isinstance(outputs.last_hidden_state, torch.Tensor)
assert isinstance(outputs.hidden_states[-1], torch.Tensor)
assert isinstance(outputs.attentions[-1], torch.Tensor)
assert isinstance(outputs.cross_attentions[0], torch.Tensor)
assert outputs.last_hidden_state.size() == (1, num_latents, d_latents)
assert len(outputs.hidden_states) == num_layers + 1
assert isinstance(outputs_batch, ModelOutput)
assert isinstance(outputs_batch.last_hidden_state, torch.Tensor)
assert isinstance(outputs_batch.hidden_states[-1], torch.Tensor)
assert isinstance(outputs_batch.attentions[-1], torch.Tensor)
assert isinstance(outputs_batch.cross_attentions[0], torch.Tensor)
assert outputs_batch.last_hidden_state.size() == (32, num_latents, d_latents)
assert len(outputs_batch.hidden_states) == num_layers + 1
def test_image_text_throughput():
"""
Test that the model can process image-text pairs.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='hierarchical_perceiver')
model = hydra.utils.instantiate(cfg.model)
_, _, audio_features, _, _, _, audio_batch, _ = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
num_latents = cfg.model.num_latents
d_latents = cfg.model.d_latents
num_layers = cfg.model.num_self_attends_per_block
inputs = dict(audio=audio_features)
inputs_batch = dict(audio=audio_batch)
outputs = model(inputs)
outputs_batch = model(inputs_batch)
assert isinstance(outputs, ModelOutput)
assert isinstance(outputs.last_hidden_state, torch.Tensor)
assert isinstance(outputs.hidden_states[-1], torch.Tensor)
assert isinstance(outputs.attentions[-1], torch.Tensor)
assert isinstance(outputs.cross_attentions[0], torch.Tensor)
assert outputs.last_hidden_state.size() == (1, num_latents, d_latents)
assert len(outputs.hidden_states) == num_layers + 1
assert isinstance(outputs_batch, ModelOutput)
assert isinstance(outputs_batch.last_hidden_state, torch.Tensor)
assert isinstance(outputs_batch.hidden_states[-1], torch.Tensor)
assert isinstance(outputs_batch.attentions[-1], torch.Tensor)
assert isinstance(outputs_batch.cross_attentions[0], torch.Tensor)
assert outputs_batch.last_hidden_state.size() == (32, num_latents, d_latents)
assert len(outputs_batch.hidden_states) == num_layers + 1
def test_image_audio_throughput():
"""
Test that the model can process audio-text pairs.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='hierarchical_perceiver')
model = hydra.utils.instantiate(cfg.model)
_, _, audio_features, _, _, _, audio_batch, _ = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
num_latents = cfg.model.num_latents
d_latents = cfg.model.d_latents
num_layers = cfg.model.num_self_attends_per_block
inputs = dict(audio=audio_features)
inputs_batch = dict(audio=audio_batch)
outputs = model(inputs)
outputs_batch = model(inputs_batch)
assert isinstance(outputs, ModelOutput)
assert isinstance(outputs.last_hidden_state, torch.Tensor)
assert isinstance(outputs.hidden_states[-1], torch.Tensor)
assert isinstance(outputs.attentions[-1], torch.Tensor)
assert isinstance(outputs.cross_attentions[0], torch.Tensor)
assert outputs.last_hidden_state.size() == (1, num_latents, d_latents)
assert len(outputs.hidden_states) == num_layers + 1
assert isinstance(outputs_batch, ModelOutput)
assert isinstance(outputs_batch.last_hidden_state, torch.Tensor)
assert isinstance(outputs_batch.hidden_states[-1], torch.Tensor)
assert isinstance(outputs_batch.attentions[-1], torch.Tensor)
assert isinstance(outputs_batch.cross_attentions[0], torch.Tensor)
assert outputs_batch.last_hidden_state.size() == (32, num_latents, d_latents)
assert len(outputs_batch.hidden_states) == num_layers + 1
def test_audio_text_throughput():
"""
Test that the model can process audio-text pairs.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='hierarchical_perceiver')
model = hydra.utils.instantiate(cfg.model)
_, _, audio_features, _, _, _, audio_batch, _ = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
num_latents = cfg.model.num_latents
d_latents = cfg.model.d_latents
num_layers = cfg.model.num_self_attends_per_block
inputs = dict(audio=audio_features)
inputs_batch = dict(audio=audio_batch)
outputs = model(inputs)
outputs_batch = model(inputs_batch)
assert isinstance(outputs, ModelOutput)
assert isinstance(outputs.last_hidden_state, torch.Tensor)
assert isinstance(outputs.hidden_states[-1], torch.Tensor)
assert isinstance(outputs.attentions[-1], torch.Tensor)
assert isinstance(outputs.cross_attentions[0], torch.Tensor)
assert outputs.last_hidden_state.size() == (1, num_latents, d_latents)
assert len(outputs.hidden_states) == num_layers + 1
assert isinstance(outputs_batch, ModelOutput)
assert isinstance(outputs_batch.last_hidden_state, torch.Tensor)
assert isinstance(outputs_batch.hidden_states[-1], torch.Tensor)
assert isinstance(outputs_batch.attentions[-1], torch.Tensor)
assert isinstance(outputs_batch.cross_attentions[0], torch.Tensor)
assert outputs_batch.last_hidden_state.size() == (32, num_latents, d_latents)
assert len(outputs_batch.hidden_states) == num_layers + 1
def test_video_audio_throughput():
"""
Test that the model can process video.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='hierarchical_perceiver')
model = hydra.utils.instantiate(cfg.model)
_, _, _, video_features, _, _, _, video_batch = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
num_latents = cfg.model.num_latents
d_latents = cfg.model.d_latents
num_layers = cfg.model.num_self_attends_per_block
inputs = dict(image=video_features)
inputs_batch = dict(image=video_batch)
outputs = model(inputs)
outputs_batch = model(inputs_batch)
assert isinstance(outputs, ModelOutput)
assert isinstance(outputs.last_hidden_state, torch.Tensor)
assert isinstance(outputs.hidden_states[-1], torch.Tensor)
assert isinstance(outputs.attentions[-1], torch.Tensor)
assert isinstance(outputs.cross_attentions[0], torch.Tensor)
assert outputs.last_hidden_state.size() == (1, num_latents, d_latents)
assert len(outputs.hidden_states) == num_layers + 1
assert isinstance(outputs_batch, ModelOutput)
assert isinstance(outputs_batch.last_hidden_state, torch.Tensor)
assert isinstance(outputs_batch.hidden_states[-1], torch.Tensor)
assert isinstance(outputs_batch.attentions[-1], torch.Tensor)
assert isinstance(outputs_batch.cross_attentions[0], torch.Tensor)
assert outputs_batch.last_hidden_state.size() == (32, num_latents, d_latents)
assert len(outputs_batch.hidden_states) == num_layers + 1
def test_video_text_thoughput():
"""
Test that the model can process multimodal data.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='hierarchical_perceiver')
model = hydra.utils.instantiate(cfg.model)
tokens, _, audio_features, video_features, token_batch, _, audio_batch, video_batch = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
num_latents = cfg.model.num_latents
d_latents = cfg.model.d_latents
num_layers = cfg.model.num_self_attends_per_block
inputs = dict(text=tokens, image=video_features, audio=audio_features)
inputs_batch = dict(text=token_batch, image=video_batch, audio=audio_batch)
outputs = model(inputs)
outputs_batch = model(inputs_batch)
assert isinstance(outputs, ModelOutput)
assert isinstance(outputs.last_hidden_state, torch.Tensor)
assert isinstance(outputs.hidden_states[-1], torch.Tensor)
assert isinstance(outputs.attentions[-1], torch.Tensor)
assert isinstance(outputs.cross_attentions[0], torch.Tensor)
assert outputs.last_hidden_state.size() == (1, num_latents, d_latents)
assert len(outputs.hidden_states) == num_layers + 1
assert isinstance(outputs_batch, ModelOutput)
assert isinstance(outputs_batch.last_hidden_state, torch.Tensor)
assert isinstance(outputs_batch.hidden_states[-1], torch.Tensor)
assert isinstance(outputs_batch.attentions[-1], torch.Tensor)
assert isinstance(outputs_batch.cross_attentions[0], torch.Tensor)
assert outputs_batch.last_hidden_state.size() == (32, num_latents, d_latents)
assert len(outputs_batch.hidden_states) == num_layers + 1
def test_video_audio_text_thoughput():
"""
Test that the model can process multimodal data.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='hierarchical_perceiver')
model = hydra.utils.instantiate(cfg.model)
tokens, _, audio_features, video_features, token_batch, _, audio_batch, video_batch = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
num_latents = cfg.model.num_latents
d_latents = cfg.model.d_latents
num_layers = cfg.model.num_self_attends_per_block
inputs = dict(text=tokens, image=video_features, audio=audio_features)
inputs_batch = dict(text=token_batch, image=video_batch, audio=audio_batch)
outputs = model(inputs)
outputs_batch = model(inputs_batch)
assert isinstance(outputs, ModelOutput)
assert isinstance(outputs.last_hidden_state, torch.Tensor)
assert isinstance(outputs.hidden_states[-1], torch.Tensor)
assert isinstance(outputs.attentions[-1], torch.Tensor)
assert isinstance(outputs.cross_attentions[0], torch.Tensor)
assert outputs.last_hidden_state.size() == (1, num_latents, d_latents)
assert len(outputs.hidden_states) == num_layers + 1
assert isinstance(outputs_batch, ModelOutput)
assert isinstance(outputs_batch.last_hidden_state, torch.Tensor)
assert isinstance(outputs_batch.hidden_states[-1], torch.Tensor)
assert isinstance(outputs_batch.attentions[-1], torch.Tensor)
assert isinstance(outputs_batch.cross_attentions[0], torch.Tensor)
assert outputs_batch.last_hidden_state.size() == (32, num_latents, d_latents)
assert len(outputs_batch.hidden_states) == num_layers + 1
def test_pl_module_forward():
"""
Test that the model can process outputs.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='hierarchical_perceiver')
pl_module = hydra.utils.instantiate(cfg)
tokens, _, _, _, token_batch, _, _, _ = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
inputs = dict(text=tokens)
inputs_batch = dict(text=token_batch)
forward_outputs = pl_module.forward(inputs)
forward_outputs_batch = pl_module.forward(inputs_batch)
assert isinstance(forward_outputs, ForwardPassOutput)
assert isinstance(forward_outputs.student_output, ModelOutput)
assert isinstance(forward_outputs.teacher_output, ModelOutput)
assert isinstance(forward_outputs_batch, ForwardPassOutput)
assert isinstance(forward_outputs_batch.student_output, ModelOutput)
assert isinstance(forward_outputs_batch.teacher_output, ModelOutput)
def test_pl_module_step():
"""
Test that the model can process loss functions.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='hierarchical_perceiver')
pl_module = hydra.utils.instantiate(cfg)
tokens, _, _, _, token_batch, _, _, _ = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
inputs = dict(text=tokens)
inputs_batch = dict(text=token_batch)
step_outputs, loss = pl_module.step(inputs)
step_outputs_batch, loss_batch = pl_module.step(inputs_batch)
assert isinstance(step_outputs, ForwardPassOutput)
assert loss.size() == torch.Size([])
assert isinstance(step_outputs_batch, ForwardPassOutput)
assert loss.size() == torch.Size([])
def test_latent_masking():
"""
Test that the model can process latent masks.
"""
with hydra.initialize(version_base='1.1', config_path='../../configs/model', job_name="test_perceiver_instantiation"):
cfg = hydra.compose(config_name='hierarchical_perceiver')
pl_module = hydra.utils.instantiate(cfg)
assert pl_module.student.is_student == True
assert pl_module.teacher.model.is_student == False
tokens, _, _, _, _, _, _, _ = get_input_features(cfg.model.input_preprocessor.modalities.audio.samples_per_patch)
inputs = dict(text=tokens)
step_outputs, _ = pl_module.step(inputs)
assert step_outputs.student_output.last_hidden_state.size() == step_outputs.teacher_output.last_hidden_state.size()
assert torch.equal(step_outputs.student_output.last_hidden_state, step_outputs.teacher_output.last_hidden_state) == False
| multimodal-self-distillation-main | tests/unit/test_hierarchical_perceiver.py |
import pytest
from tests.helpers.run_command import run_command
from tests.helpers.runif import RunIf
"""
A couple of sanity checks to make sure the model doesn't crash with different running options.
"""
def test_fast_dev_run():
"""Test running for 1 train, val and test batch."""
command = ["train.py", "++trainer.fast_dev_run=true"]
run_command(command)
@pytest.mark.slow
def test_cpu():
"""Test running 1 epoch on CPU."""
command = ["train.py", "++trainer.max_epochs=1", "++trainer.gpus=0"]
run_command(command)
# use RunIf to skip execution of some tests, e.g. when no gpus are available
@RunIf(min_gpus=1)
@pytest.mark.slow
def test_gpu():
"""Test running 1 epoch on GPU."""
command = [
"train.py",
"++trainer.max_epochs=1",
"++trainer.gpus=1",
]
run_command(command)
@RunIf(min_gpus=1)
@pytest.mark.slow
def test_mixed_precision():
"""Test running 1 epoch with pytorch native automatic mixed precision (AMP)."""
command = [
"train.py",
"++trainer.max_epochs=1",
"++trainer.gpus=1",
"++trainer.precision=16",
]
run_command(command)
@RunIf(min_gpus=1)
@pytest.mark.slow
def test_grad_accumulation():
"""Test running 1 epoch with grad accumulation."""
command = [
"train.py",
"++trainer.max_epochs=1",
"++trainer.gpus=1",
"++trainer.precision=16",
"++trainer.grad_accumulation_steps=4",
]
run_command(command)
@pytest.mark.slow
def test_double_validation_loop():
"""Test running 1 epoch with validation loop twice per epoch."""
command = [
"train.py",
"++trainer.max_epochs=1",
"++trainer.val_check_interval=0.5",
]
run_command(command) | multimodal-self-distillation-main | tests/shell/test_basic_commands.py |
multimodal-self-distillation-main | tests/shell/__init__.py |
|
from typing import List
import pytest
import sh
def run_command(command: List[str]):
"""Default method for executing shell commands with pytest."""
msg = None
try:
sh.python(command)
except sh.ErrorReturnCode as e:
msg = e.stderr.decode()
if msg:
pytest.fail(msg=msg) | multimodal-self-distillation-main | tests/helpers/run_command.py |
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
import io
import requests
import wikipedia
import torch
import numpy as np
import soundfile as sf
from PIL import Image
from decord import VideoReader, cpu
from einops import rearrange, reduce
from transformers import PerceiverTokenizer, PerceiverFeatureExtractor, Wav2Vec2FeatureExtractor
from transformers.utils import logging
from src.utils import get_logger
log = get_logger(__name__)
def get_input_features(samples_per_patch):
logging.set_verbosity(logging.CRITICAL)
tokenizer = PerceiverTokenizer.from_pretrained('deepmind/language-perceiver')
feature_extractor = PerceiverFeatureExtractor.from_pretrained('deepmind/vision-perceiver-conv')
audio_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('facebook/wav2vec2-base')
try:
article = wikipedia.page("Communism")
text = article.content
tokens = tokenizer(text, truncation=True, max_length=2048, return_tensors='pt')['input_ids']
url_image = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url_image, stream=True).raw)
image_features = reduce(feature_extractor(image, return_tensors='pt')['pixel_values'].unsqueeze(0), 'i b c h w -> b c h w', 'max')
url_audio = "https://www2.cs.uic.edu/~i101/SoundFiles/gettysburg.wav"
audio = np.array(sf.read(io.BytesIO(requests.get(url_audio).content)), dtype=object)[0]
audio_features = audio_feature_extractor(audio, pad_to_multiple_of=samples_per_patch, padding='longest', return_tensors='pt', sampling_rate=16000)['input_values'].unsqueeze(2)
url_video = "https://dl.fbaipublicfiles.com/pytorchvideo/projects/archery.mp4"
video = rearrange(VideoReader(requests.get(url_video, stream=True).raw, ctx=cpu(0)).get_batch(range(0, 16)).asnumpy(), 'f h w c -> f c h w')
video_features = feature_extractor(list(video), return_tensors='pt')['pixel_values'].unsqueeze(0)
log.info("Successfully downloaded sample data and computed input features.")
except Exception as e:
log.error(f"Failed to create input features: {e}")
log.error("Could not download the test files. Initializing with random tensors.")
tokens = torch.randn(1, 32768)
image_features = torch.randn((1, 3, 224, 224))
audio_features = torch.randn((1, 387648, 1))
video_features = torch.randn((1, 16, 3, 224, 224))
token_batch = tokens.expand(32, -1)
image_batch = image_features.expand(32, -1, -1, -1)
audio_batch = audio_features.expand(32, -1, -1)
video_batch = video_features.expand(32, -1, -1, -1, -1)
logging.set_verbosity(logging.WARNING)
return tokens, image_features, audio_features, video_features, token_batch, image_batch, audio_batch, video_batch
| multimodal-self-distillation-main | tests/helpers/__init__.py |
import platform
from importlib.util import find_spec
"""
Adapted from:
https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pytorch_lightning/utilities/imports.py
"""
def _module_available(module_path: str) -> bool:
"""Check if a path is available in your environment.
>>> _module_available('os')
True
>>> _module_available('bla.bla')
False
"""
try:
return find_spec(module_path) is not None
except ModuleNotFoundError:
# Python 3.7+
return False
_IS_WINDOWS = platform.system() == "Windows"
_DEEPSPEED_AVAILABLE = not _IS_WINDOWS and _module_available("deepspeed")
_FAIRSCALE_AVAILABLE = not _IS_WINDOWS and _module_available("fairscale.nn")
_RPC_AVAILABLE = not _IS_WINDOWS and _module_available("torch.distributed.rpc") | multimodal-self-distillation-main | tests/helpers/module_available.py |
import sys
from typing import Optional
import pytest
import torch
from packaging.version import Version
from pkg_resources import get_distribution
"""
Adapted from:
https://github.com/PyTorchLightning/pytorch-lightning/blob/master/tests/helpers/runif.py
"""
from tests.helpers.module_available import (
_DEEPSPEED_AVAILABLE,
_FAIRSCALE_AVAILABLE,
_IS_WINDOWS,
_RPC_AVAILABLE,
)
class RunIf:
"""RunIf wrapper for conditional skipping of tests.
Fully compatible with `@pytest.mark`.
Example:
@RunIf(min_torch="1.8")
@pytest.mark.parametrize("arg1", [1.0, 2.0])
def test_wrapper(arg1):
assert arg1 > 0
"""
def __new__(
self,
min_gpus: int = 0,
min_torch: Optional[str] = None,
max_torch: Optional[str] = None,
min_python: Optional[str] = None,
skip_windows: bool = False,
rpc: bool = False,
fairscale: bool = False,
deepspeed: bool = False,
**kwargs,
):
"""
Args:
min_gpus: min number of gpus required to run test
min_torch: minimum pytorch version to run test
max_torch: maximum pytorch version to run test
min_python: minimum python version required to run test
skip_windows: skip test for Windows platform
rpc: requires Remote Procedure Call (RPC)
fairscale: if `fairscale` module is required to run the test
deepspeed: if `deepspeed` module is required to run the test
kwargs: native pytest.mark.skipif keyword arguments
"""
conditions = []
reasons = []
if min_gpus:
conditions.append(torch.cuda.device_count() < min_gpus)
reasons.append(f"GPUs>={min_gpus}")
if min_torch:
torch_version = get_distribution("torch").version
conditions.append(Version(torch_version) < Version(min_torch))
reasons.append(f"torch>={min_torch}")
if max_torch:
torch_version = get_distribution("torch").version
conditions.append(Version(torch_version) >= Version(max_torch))
reasons.append(f"torch<{max_torch}")
if min_python:
py_version = (
f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
)
conditions.append(Version(py_version) < Version(min_python))
reasons.append(f"python>={min_python}")
if skip_windows:
conditions.append(_IS_WINDOWS)
reasons.append("does not run on Windows")
if rpc:
conditions.append(not _RPC_AVAILABLE)
reasons.append("RPC")
if fairscale:
conditions.append(not _FAIRSCALE_AVAILABLE)
reasons.append("Fairscale")
if deepspeed:
conditions.append(not _DEEPSPEED_AVAILABLE)
reasons.append("Deepspeed")
reasons = [rs for cond, rs in zip(conditions, reasons) if cond]
return pytest.mark.skipif(
condition=any(conditions),
reason=f"Requires: [{' + '.join(reasons)}]",
**kwargs,
) | multimodal-self-distillation-main | tests/helpers/runif.py |
import os
from typing import List, Optional
import hydra
from omegaconf import DictConfig
from pytorch_lightning import (
Callback,
LightningDataModule,
LightningModule,
Trainer,
seed_everything,
)
from pytorch_lightning.loggers import LightningLoggerBase
from pytorch_lightning.profiler import Profiler
from src import utils
log = utils.get_logger(__name__)
def train(config: DictConfig) -> Optional[float]:
"""Contains the training pipeline.
Can additionally evaluate model on a testset, using best weights achieved during training.
Args:
config (DictConfig): Configuration composed by Hydra.
Returns:
Optional[float]: Metric score for hyperparameter optimization.
"""
# Set seed for random number generators in pytorch, numpy and python.random
if config.get("seed"):
seed_everything(config.seed, workers=True)
# Convert relative ckpt path to absolute path if necessary
ckpt_path = config.trainer.get("resume_from_checkpoint")
if ckpt_path and not os.path.isabs(ckpt_path):
config.trainer.resume_from_checkpoint = os.path.join(
hydra.utils.get_original_cwd(), ckpt_path
)
# Init lightning model
log.info(f"Instantiating model <{config.model._target_}>")
model: LightningModule = hydra.utils.instantiate(config.model)
# Init lightning datamodule
log.info(f"Instantiating datamodule <{config.datamodule._target_}>")
datamodule: LightningDataModule = hydra.utils.instantiate(config.datamodule)
# Init lightning callbacks
callbacks: List[Callback] = []
if "callbacks" in config:
for _, cb_conf in config.callbacks.items():
if "_target_" in cb_conf:
log.info(f"Instantiating callback <{cb_conf._target_}>")
callbacks.append(hydra.utils.instantiate(cb_conf))
# Init lightning loggers
logger: List[LightningLoggerBase] = []
if "logger" in config:
for _, lg_conf in config.logger.items():
if "_target_" in lg_conf:
log.info(f"Instantiating logger <{lg_conf._target_}>")
logger.append(hydra.utils.instantiate(lg_conf))
# Init lightning profilers
profiler: List[Profiler] = []
if "profiler" in config:
for _, pf_conf in config.profiler.items():
if "_target_" in pf_conf:
log.info(f"Instantiating profiler <{pf_conf._target_}>")
profiler.append(hydra.utils.instantiate(pf_conf))
# Init lightning trainer
log.info(f"Instantiating trainer <{config.trainer._target_}>")
trainer: Trainer = hydra.utils.instantiate(
config.trainer, callbacks=callbacks, logger=logger, _convert_="partial", profiler=profiler
)
# Send some parameters from config to all lightning loggers
log.info("Logging hyperparameters!")
utils.log_hyperparameters(
config=config,
model=model,
datamodule=datamodule,
trainer=trainer,
callbacks=callbacks,
logger=logger,
)
#TODO Here should go the optional tune step
# https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.tuner.tuning.Tuner.html#pytorch_lightning.tuner.tuning.Tuner
# https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#tune
# Train the model
if config.get("train"):
log.info("Starting training!")
trainer.fit(model=model, datamodule=datamodule)
# Get metric score for hyperparameter optimization
optimized_metric = config.get("optimized_metric")
if optimized_metric and optimized_metric not in trainer.callback_metrics:
raise Exception(
"Metric for hyperparameter optimization not found! "
"Make sure the `optimized_metric` in `hparams_search` config is correct!"
)
score = trainer.callback_metrics.get(optimized_metric)
log.info(f"Metric {optimized_metric} for hyperparameter optimization: {score}")
# Test the model
if config.get("test"):
ckpt_path = "best"
if not config.get("train") or config.trainer.get("fast_dev_run"):
ckpt_path = None
log.info("Starting testing!")
trainer.test(model=model, datamodule=datamodule, ckpt_path=ckpt_path)
# Make sure everything closed properly
log.info("Finalizing!")
utils.finish(
config=config,
model=model,
datamodule=datamodule,
trainer=trainer,
callbacks=callbacks,
logger=logger,
)
# Print path to best checkpoint
if not config.trainer.get("fast_dev_run") and config.trainer.get("train"):
log.info(f"Best model ckpt at {trainer.checkpoint_callback.best_model_path}")
# Return metric score for hyperparameter optimization
return score | multimodal-self-distillation-main | src/training_pipeline.py |
import os
from typing import List
import hydra
from omegaconf import DictConfig
from pytorch_lightning import LightningDataModule, LightningModule, Trainer, seed_everything
from pytorch_lightning.loggers import LightningLoggerBase
from src import utils
log = utils.get_logger(__name__)
def test(config: DictConfig) -> None:
"""Contains minimal example of the testing pipeline.
Evaluates given checkpoint on a testset.
Args:
config (DictConfig): Configuration composed by Hydra.
Returns:
None
"""
# Set seed for random number generators in pytorch, numpy and python.random
if config.get("seed"):
seed_everything(config.seed, workers=True)
# Convert relative ckpt path to absolute path if necessary
if not os.path.isabs(config.ckpt_path):
config.ckpt_path = os.path.join(
hydra.utils.get_original_cwd(), config.ckpt_path
)
# Init lightning model
log.info(f"Instantiating model <{config.model._target_}>")
model: LightningModule = hydra.utils.instantiate(config.model)
# Init lightning datamodule
log.info(f"Instantiating datamodule <{config.datamodule._target_}>")
datamodule: LightningDataModule = hydra.utils.instantiate(config.datamodule)
# Init lightning loggers
logger: List[LightningLoggerBase] = []
if "logger" in config:
for _, lg_conf in config.logger.items():
if "_target_" in lg_conf:
log.info(f"Instantiating logger <{lg_conf._target_}>")
logger.append(hydra.utils.instantiate(lg_conf))
# Init lightning trainer
log.info(f"Instantiating trainer <{config.trainer._target_}>")
trainer: Trainer = hydra.utils.instantiate(config.trainer, logger=logger)
# Log hyperparameters
trainer.logger.log_hyperparams({"ckpt_path": config.ckpt_path})
log.info("Starting testing!")
trainer.test(model=model, datamodule=datamodule, ckpt_path=config.ckpt_path) | multimodal-self-distillation-main | src/testing_pipeline.py |
multimodal-self-distillation-main | src/__init__.py |
|
from typing import Any, Dict, Optional
import torch
import wandb
from pytorch_lightning import Callback, Trainer, LightningModule
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.utilities import rank_zero_only
from transformers import PerceiverTokenizer
from transformers.utils import logging
from src.models.components.knn import k_nearest_neighbor
from src.models.components.outputs import ForwardPassOutput
from src.utils import get_wandb_logger, exists
class MediaCallback(Callback):
def __init__(self, log_every_n_steps) -> None:
self.log_every_n_steps = log_every_n_steps
logging.set_verbosity(logging.CRITICAL)
self.tokenizer : PerceiverTokenizer = PerceiverTokenizer.from_pretrained('deepmind/language-perceiver')
logging.set_verbosity(logging.WARNING)
def on_fit_start(
self, trainer: Trainer,
pl_module: LightningModule
) -> None:
self.logger : WandbLogger = get_wandb_logger(trainer=trainer)
@rank_zero_only
def on_validation_batch_end(
self,
trainer: Trainer,
pl_module: LightningModule,
outputs: Optional[Dict],
batch: dict,
batch_idx: int,
dataloader_idx: int
) -> None:
self.log_media(batch, outputs['forward_pass_output'], batch_idx)
@rank_zero_only
def on_test_batch_end(
self,
trainer: Trainer,
pl_module: LightningModule,
outputs: Optional[Dict],
batch: Any,
batch_idx: int,
dataloader_idx: int
) -> None:
self.log_media(batch, outputs['forward_pass_output'], batch_idx)
def log_media(self, batch: dict, outputs: ForwardPassOutput, step: int) -> None:
if step % self.log_every_n_steps == 0:
text = self.tokenizer.batch_decode(batch['text'].detach().cpu(), skip_special_tokens=True) if 'text' in batch else None
audio = batch['audio'].detach().cpu() if 'audio' in batch else None
image = batch['image'].detach().cpu() if 'image' in batch else None
video = batch['video'].detach().cpu() if 'video' in batch else None
align_fuse = outputs.align_fuse
features = outputs.student_output.pooler_output.detach().cpu()
queries = outputs.teacher_output.pooler_output.detach().cpu()
labels = outputs.labels.detach().cpu() if exists(outputs.labels) else torch.tensor(list(range(len(features))))
table = wandb.Table(columns=['query', 'ground truth', 'similarity ground truth', '#1 prediction', 'similarity #1 prediction'])
_, similarity_gt, top_k_dist, top_k_ids, probs, _, _ = k_nearest_neighbor(
prediction_features=features,
query_features=queries,
labels=labels,
k=len(labels),
chunking=False
)
for i, sim_gt in enumerate(similarity_gt):
# unimodal cases
if align_fuse[0] == align_fuse[1]:
if align_fuse == [['text'],['text']]:
pass
elif align_fuse == [['image'],['image']]:
pass
elif align_fuse == [['audio'],['audio']]:
pass
elif align_fuse == [['video'],['video']]:
pass
elif align_fuse == [['video', 'audio'],['video', 'audio']]:
pass
else:
raise NotImplementedError(f'Unimodal alignment and/or fusion case: <<{align_fuse}>> not implemented')
# multimodal cases
else:
if align_fuse == [['text'],['audio']]:
text_query = text[i]
audio_gt = audio[i]
audio_pred = audio[top_k_ids[i][0]]
audio_pred_caption = text[top_k_ids[i][0]]
table.add_data(
text_query,
wandb.Audio(audio_gt, sample_rate=16000, caption=text_query),
sim_gt,
wandb.Audio(audio_pred, sample_rate=16000, caption=audio_pred_caption),
top_k_dist[i][0]
)
elif align_fuse == [['text'],['image']]:
table.add_data(
batch['text'][i],
wandb.Image(image[i], caption=text[i]),
sim_gt,
wandb.Image(image[top_k_ids[i][0]], caption=text[top_k_ids[i][0]]),
top_k_dist[i][0]
)
elif align_fuse == [['audio'],['image']]:
pass
elif align_fuse == [['text'],['video']]:
pass
elif align_fuse == [['text'],['video', 'audio']]:
pass
else:
raise NotImplementedError(f'Multimodal alignment and/or fusion case: <<{align_fuse}>> not implemented')
self.logger.experiment.log({f'predictions': table})
| multimodal-self-distillation-main | src/callbacks/media_callback.py |
from enum import Enum
from typing import Any, Optional, Dict
import torch
import pytorch_lightning as pl
from pytorch_lightning import Callback, LightningModule
from pytorch_lightning.utilities import rank_zero_only
from torchmetrics import Recall, Accuracy, RetrievalMRR
from src.models.components.outputs import ForwardPassOutput
from src.models.components.knn import k_nearest_neighbor
from src import utils
log = utils.get_logger(__name__)
class Metric(Enum):
MRR = 'MRR'
ACCURACY = 'Accuracy@k'
RECALL = 'Recall@k'
class MetricsCallback(Callback):
"""In PyTorch lighning logging metrics is agnostic to the logger used.
Simply calling self.log() or pl_module.log() will log the metrics to all
loggers passed to the trainer.
For image-text retrieval tasks we could use the metric Recall@K (R@K) like here:
https://arxiv.org/pdf/2103.01913v2.pdf
Other callbacks can be used to log files, data, checkpoints and artifacts
specific to wandb.
Args:
logging_interval (int): how often to log
"""
def __init__(
self,
top_k: list = [1, 10, 100],
logging_interval: int = 10,
on_step: bool = False,
on_epoch: bool = True,
) -> None:
self.top_k = top_k
self.logging_interval = logging_interval
self.on_step = on_step
self.on_epoch = on_epoch
self.val_accuracy = None
self.test_accuracy = None
self.val_recall = {k: Recall(top_k=k) for k in top_k} #TODO in case on_step: k might be bigger than the batch size, find fix. -> k=batch_size, chunking=False
self.test_recall = {k: Recall(top_k=k) for k in top_k}
self.val_mrr = RetrievalMRR()
self.test_mrr = RetrievalMRR()
self.val_student_preds = []
self.val_teacher_preds = []
self.val_labels = []
self.test_student_preds = []
self.test_teacher_preds = []
self.test_labels = []
self.metric = None
self.align_fuse = None
self.output_modalities = None
@rank_zero_only
def on_train_batch_end(
self,
trainer: pl.Trainer,
pl_module: pl.LightningModule,
outputs: Optional[Dict],
batch: Any,
batch_idx: int
) -> None:
fwd_outputs: ForwardPassOutput = outputs['forward_pass_output']
self.output_modalities = fwd_outputs.output_modalities
self.align_fuse = fwd_outputs.align_fuse
self.metric = fwd_outputs.metric
features = fwd_outputs.student_output.pooler_output.detach().cpu()
queries = fwd_outputs.teacher_output.pooler_output.detach().cpu()
labels = fwd_outputs.labels.detach().cpu() if utils.exists(fwd_outputs.labels) else torch.tensor(list(range(len(features))))
self.compute_metrics(
predictions=features,
queries=queries,
labels=labels,
pl_module=pl_module,
mrr_metric=self.val_mrr,
accuracy_metric=self.val_accuracy,
recall_metric=self.val_recall,
on_step=True,
on_epoch=False,
train_or_val_or_test='train'
)
@rank_zero_only
def on_validation_batch_end(
self,
trainer: pl.Trainer,
pl_module: pl.LightningModule,
outputs: Optional[Dict],
batch: Any,
batch_idx: int,
dataloader_idx: int
) -> None:
# for metrics that don't require whole val/test set sync_dist=True
# is enough: https://github.com/Lightning-AI/lightning/discussions/4702
# when using torchmetrics, sync_dist=True is not needed, calling compute() will sync
# the metrics across all processes: https://github.com/Lightning-AI/lightning/discussions/6501#discussioncomment-553152
fwd_outputs: ForwardPassOutput = outputs['forward_pass_output']
self.output_modalities = fwd_outputs.output_modalities
self.align_fuse = fwd_outputs.align_fuse
self.metric = fwd_outputs.metric
# initialize torchmetrics Accuracy@k objects
if self.val_accuracy is None and fwd_outputs.num_classes is not None and Metric.ACCURACY.value in self.metric:
self.val_accuracy = {k: Accuracy(top_k=k, num_classes=fwd_outputs.num_classes) for k in self.top_k}
features = fwd_outputs.student_output.pooler_output.detach().cpu()
queries = fwd_outputs.teacher_output.pooler_output.detach().cpu()
labels = fwd_outputs.labels.detach().cpu() if utils.exists(fwd_outputs.labels) else None
self.val_student_preds.append(features)
self.val_teacher_preds.append(queries)
self.val_labels.append(labels)
if self.on_step:
if labels is None:
# we treat retrieval as classification with a unique class per sample
labels = torch.tensor(list(range(len(features))))
self.compute_metrics(
predictions=features,
queries=queries,
labels=labels,
pl_module=pl_module,
mrr_metric=self.val_mrr,
accuracy_metric=self.val_accuracy,
recall_metric=self.val_recall,
on_step=True,
on_epoch=False,
train_or_val_or_test='val'
)
if self.on_epoch:
self.val_student_preds.append(features)
self.val_teacher_preds.append(queries)
self.val_labels.append(labels)
@rank_zero_only
def on_validation_epoch_end(
self,
trainer: pl.Trainer,
pl_module: pl.LightningModule
) -> None:
#? use self.output_modalities to properly asign these variables
features = torch.cat(self.val_student_preds, dim=0)
queries = torch.cat(self.val_teacher_preds, dim=0)
if set(self.val_labels) == {None}:
# we treat retrieval as classification with a unique class per sample
labels = torch.tensor(list(range(len(features))))
else:
labels = torch.cat(self.val_labels)
if self.on_epoch:
self.compute_metrics(
predictions=features,
queries=queries,
labels=labels,
pl_module=pl_module,
mrr_metric=self.val_mrr,
accuracy_metric=self.val_accuracy,
recall_metric=self.val_recall,
on_step=False,
on_epoch=True,
train_or_val_or_test='val'
)
self.val_student_preds = []
self.val_teacher_preds = []
self.val_labels = []
@rank_zero_only
def on_test_batch_end(
self,
trainer: pl.Trainer,
pl_module: pl.LightningModule,
outputs: Optional[Dict],
batch: Any,
batch_idx: int,
dataloader_idx: int
) -> None:
fwd_outputs: ForwardPassOutput = outputs['forward_pass_output']
self.output_modalities = fwd_outputs.output_modalities
self.align_fuse = fwd_outputs.align_fuse
self.metric = fwd_outputs.metric
if self.test_accuracy is None and fwd_outputs.num_classes is not None and Metric.ACCURACY.value in self.metric:
self.test_accuracy = {k: Accuracy(top_k=k, num_classes=fwd_outputs.num_classes) for k in self.top_k}
features = fwd_outputs.student_output.pooler_output.detach().cpu()
queries = fwd_outputs.teacher_output.pooler_output.detach().cpu()
labels = fwd_outputs.labels.detach().cpu() if utils.exists(fwd_outputs.labels) else None
self.test_student_preds.append(features)
self.test_teacher_preds.append(queries)
self.test_labels.append(labels)
if self.on_step:
if labels is None:
# we treat retrieval as classification with a unique class per sample
labels = torch.tensor(list(range(len(features))))
self.compute_metrics(
predictions=features,
queries=queries,
labels=labels,
pl_module=pl_module,
mrr_metric=self.test_mrr,
accuracy_metric=self.test_accuracy,
recall_metric=self.test_recall,
on_step=True,
on_epoch=False,
train_or_val_or_test='test'
)
elif self.on_epoch:
self.test_student_preds.append(features)
self.test_teacher_preds.append(queries)
self.test_labels.append(labels)
@rank_zero_only
def on_test_epoch_end(
self,
trainer: pl.Trainer,
pl_module: pl.LightningModule
) -> None:
#? use self.output_modalities to properly asign these variables
features = torch.cat(self.test_student_preds, dim=0)
queries = torch.cat(self.test_teacher_preds, dim=0)
if set(self.test_labels) == {None}:
# we treat retrieval as classification with a unique class per sample
labels = torch.tensor(list(range(len(features))))
else:
labels = torch.cat(self.test_labels)
if self.on_epoch:
self.compute_metrics(
predictions=features,
queries=queries,
labels=labels,
pl_module=pl_module,
mrr_metric=self.test_mrr,
accuracy_metric=self.test_accuracy,
recall_metric=self.test_recall,
on_step=False,
on_epoch=True,
train_or_val_or_test='test'
)
self.val_student_preds = []
self.val_teacher_preds = []
self.val_labels = []
def compute_metrics(
self,
predictions,
queries,
labels,
pl_module: LightningModule,
mrr_metric: RetrievalMRR,
accuracy_metric: dict,
recall_metric: dict,
on_step: bool = False,
on_epoch: bool = True,
train_or_val_or_test: str = 'val'
) -> None:
when_to_log = '_on_step' if on_step else '_on_epoch'
if on_step:
k = len(labels)
elif on_epoch:
k = max(self.top_k)
probabilities, _, labels = k_nearest_neighbor(
prediction_features=predictions,
query_features=queries,
labels=labels,
k=k
)
if Metric.MRR.value in self.metric:
preds = torch.flatten(probabilities)
target = torch.eye(len(labels)).flatten()
indexes = torch.tensor([[n]*len(labels) for n in range(len(labels))], dtype=torch.long).flatten()
pl_module.log(
f'{train_or_val_or_test}/mrr@{len(labels)}{when_to_log}',
mrr_metric(preds, target, indexes=indexes),
prog_bar=True,
on_step=on_step,
on_epoch=on_epoch,
sync_dist=True
)
if Metric.ACCURACY.value in self.metric:
for key, value in accuracy_metric.items():
if key < len(labels):
pl_module.log(
f'{train_or_val_or_test}/accuracy@{key}{when_to_log}',
value(probabilities, labels),
prog_bar=True,
on_step=on_step,
on_epoch=on_epoch,
sync_dist=True
)
if Metric.RECALL.value in self.metric:
for key, value in recall_metric.items():
if key < len(labels):
pl_module.log(
f'{train_or_val_or_test}/recall@{key}{when_to_log}',
value(probabilities, labels),
prog_bar=True,
on_step=on_step,
on_epoch=on_epoch,
sync_dist=True
)
else:
raise Exception('No metric specified or metric not supported')
| multimodal-self-distillation-main | src/callbacks/metrics_callback.py |
multimodal-self-distillation-main | src/callbacks/__init__.py |
|
from typing import List
import matplotlib.pyplot as plt
import seaborn as sn
import torch
import wandb
from pytorch_lightning import Callback
from sklearn import metrics
from sklearn.metrics import f1_score, precision_score, recall_score
from src.utils import get_wandb_logger
class LogConfusionMatrix(Callback):
"""Generate confusion matrix every epoch and send it to wandb.
Expects validation step to return predictions and targets.
"""
def __init__(self):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module) -> None:
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
"""Gather data from single batch."""
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
"""Generate confusion matrix."""
if self.ready:
logger = get_wandb_logger(trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
confusion_matrix = metrics.confusion_matrix(y_true=targets, y_pred=preds)
# set figure size
plt.figure(figsize=(14, 8))
# set labels size
sn.set(font_scale=1.4)
# set font size
sn.heatmap(confusion_matrix, annot=True, annot_kws={"size": 8}, fmt="g")
# names should be uniqe or else charts from different experiments in wandb will overlap
experiment.log({f"confusion_matrix/{experiment.name}": wandb.Image(plt)}, commit=False)
# according to wandb docs this should also work but it crashes
# experiment.log(f{"confusion_matrix/{experiment.name}": plt})
# reset plot
plt.clf()
self.preds.clear()
self.targets.clear()
class LogF1PrecRecHeatmap(Callback):
"""Generate f1, precision, recall heatmap every epoch and send it to wandb.
Expects validation step to return predictions and targets.
"""
def __init__(self, class_names: List[str] = None):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
"""Gather data from single batch."""
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
"""Generate f1, precision and recall heatmap."""
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
f1 = f1_score(targets, preds, average=None)
r = recall_score(targets, preds, average=None)
p = precision_score(targets, preds, average=None)
data = [f1, p, r]
# set figure size
plt.figure(figsize=(14, 3))
# set labels size
sn.set(font_scale=1.2)
# set font size
sn.heatmap(
data,
annot=True,
annot_kws={"size": 10},
fmt=".3f",
yticklabels=["F1", "Precision", "Recall"],
)
# names should be uniqe or else charts from different experiments in wandb will overlap
experiment.log({f"f1_p_r_heatmap/{experiment.name}": wandb.Image(plt)}, commit=False)
# reset plot
plt.clf()
self.preds.clear()
self.targets.clear()
class LogImagePredictions(Callback):
"""Logs a validation batch and their predictions to wandb.
Example adapted from:
https://wandb.ai/wandb/wandb-lightning/reports/Image-Classification-using-PyTorch-Lightning--VmlldzoyODk1NzY
"""
def __init__(self, num_samples: int = 8):
super().__init__()
self.num_samples = num_samples
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_epoch_end(self, trainer, pl_module):
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
# get a validation batch from the validation dat loader
val_samples = next(iter(trainer.datamodule.val_dataloader()))
val_imgs, val_labels = val_samples
# run the batch through the network
val_imgs = val_imgs.to(device=pl_module.device)
logits = pl_module(val_imgs)
preds = torch.argmax(logits, dim=-1)
# log the images as wandb Image
experiment.log(
{
f"Images/{experiment.name}": [
wandb.Image(x, caption=f"Pred:{pred}, Label:{y}")
for x, pred, y in zip(
val_imgs[: self.num_samples],
preds[: self.num_samples],
val_labels[: self.num_samples],
)
]
}
) | multimodal-self-distillation-main | src/callbacks/predictions_callback.py |
import subprocess
from pathlib import Path
from typing import List
import wandb
from pytorch_lightning import Callback
from pytorch_lightning.utilities import rank_zero_only
from src.utils import get_wandb_logger
class WatchModel(Callback):
"""Make wandb watch model at the beginning of the run."""
def __init__(self, log: str = 'all', log_freq: int = 10):
self.log = log
self.log_freq = log_freq
@rank_zero_only
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
logger.watch(model=trainer.model, log='all', log_freq=self.log_freq, log_graph=True)
class UploadCodeAsArtifact(Callback):
"""Upload all code files to wandb as an artifact, at the beginning of the run."""
def __init__(self, code_dir: str, use_git: bool = True):
"""
Args:
code_dir: the code directory
use_git: if using git, then upload all files that are not ignored by git.
if not using git, then upload all '*.py' file
"""
self.code_dir = code_dir
self.use_git = use_git
@rank_zero_only
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
code = wandb.Artifact("project-source", type="code")
if self.use_git:
# get .git folder path
git_dir_path = Path(
subprocess.check_output(["git", "rev-parse", "--git-dir"]).strip().decode("utf8")
).resolve()
for path in Path(self.code_dir).resolve().rglob("*"):
# don't upload files ignored by git
# https://alexwlchan.net/2020/11/a-python-function-to-ignore-a-path-with-git-info-exclude/
command = ["git", "check-ignore", "-q", str(path)]
not_ignored = subprocess.run(command).returncode == 1
# don't upload files from .git folder
not_git = not str(path).startswith(str(git_dir_path))
if path.is_file() and not_git and not_ignored:
code.add_file(str(path), name=str(path.relative_to(self.code_dir)))
else:
for path in Path(self.code_dir).resolve().rglob("*.py"):
code.add_file(str(path), name=str(path.relative_to(self.code_dir)))
experiment.log_artifact(code)
class UploadCheckpointsAsArtifact(Callback):
"""Upload checkpoints to wandb as an artifact, at the end of run."""
def __init__(self, ckpt_dir: str = "checkpoints/", upload_best_only: bool = False):
self.ckpt_dir = ckpt_dir
self.upload_best_only = upload_best_only
@rank_zero_only
def on_keyboard_interrupt(self, trainer, pl_module):
self.on_train_end(trainer, pl_module)
@rank_zero_only
def on_train_end(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
ckpts = wandb.Artifact("experiment-ckpts", type="checkpoints")
if self.upload_best_only:
ckpts.add_file(trainer.checkpoint_callback.best_model_path)
else:
for path in Path(self.ckpt_dir).rglob("*.ckpt"):
ckpts.add_file(str(path))
experiment.log_artifact(ckpts)
| multimodal-self-distillation-main | src/callbacks/artifacts_callback.py |
import pytorch_lightning as pl
import torchvision.datasets as ds
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
class COCOCaptionsDatamodule(pl.LightningDataModule):
def __init__(
self,
data_dir: str,
train_batch_size: int,
val_batch_size: int,
test_batch_size: int,
pin_memory: bool = True):
super().__init__()
# this line allows to access init params with 'self.hparams' attribute
self.save_hyperparameters()
self.align_fuse = [['text'], ['image']]
self.metric = ['Recall@k', 'MRR']
def setup(self, stage: str):
raise NotImplementedError
def prepare_data(self):
raise NotImplementedError
def train_dataloader(self):
coco_train = ds.CocoCaptions(
root=f'{self.hparams.data_dir}/train2014',
annFile=f'{self.hparams.data_dir}/annotations/captions_train2014.json',
transform=transforms.ToTensor(),
)
return DataLoader(
dataset=coco_train,
batch_size=self.hparams.train_batch_size,
collate_fn=self.collate_fn,
)
def val_dataloader(self):
coco_val = ds.CocoCaptions(
root=f'{self.hparams.data_dir}/val2014',
annFile=f'{self.hparams.data_dir}/annotations/captions_val2014.json',
transform=transforms.ToTensor(),
)
return DataLoader(
dataset=coco_val,
batch_size=self.hparams.val_batch_size,
collate_fn=self.collate_fn,
)
def test_dataloader(self):
coco_test = ds.CocoCaptions(
root=f'{self.hparams.data_dir}/test2014',
annFile=f'{self.hparams.data_dir}/annotations/captions_train2014.json', #!
transform=transforms.ToTensor(),
)
return DataLoader(
dataset=coco_test,
batch_size=self.hparams.test_batch_size,
collate_fn=self.collate_fn,
)
def collate_fn(self, batch):
print(batch)
return dict(text=None, image=None, align_fuse=self.align_fuse, metric=self.metric)
| multimodal-self-distillation-main | src/datamodules/cococaptions_datamodule.py |
from torch.utils.data import Dataset, DataLoader
from pytorch_lightning import LightningDataModule
class SpeechCOCODataModule(LightningDataModule):
"""
A DataModule implements 5 key methods:
- prepare_data (things to do on 1 GPU/TPU, not on every GPU/TPU in distributed mode)
- setup (things to do on every accelerator in distributed mode)
- train_dataloader (the training dataloader)
- val_dataloader (the validation dataloader(s))
- test_dataloader (the test dataloader(s))
This allows you to share a full dataset without explaining how to download,
split, transform and process the data.
Read the docs:
https://pytorch-lightning.readthedocs.io/en/latest/extensions/datamodules.html
"""
def __init__(
self,
data_dir,
batch_size,
num_workers,
pin_memory=True
):
super().__init__()
self.save_hyperparameters()
self.train_dataset = None
self.val_dataset = None
self.test_dataset = None
self.align_fuse = [['audio'], ['image']]
def prepare_data(self) -> None:
pass
def setup(self, stage=None):
if stage == "fit" or stage is None:
self.train_dataset = Dataset()
self.val_dataset = Dataset()
if stage == "test" or stage is None:
self.test_dataset = Dataset()
if stage == "predict" or stage is None:
raise Exception("""This DataModule is not designed to be used for prediction.
Please use the Spotify DataModule for prediction.""")
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.hparams.batch_size,
shuffle=True,
num_workers=self.hparams.num_workers,
pin_memory=self.hparams.pin_memory
)
def val_dataloader(self):
return DataLoader(
self.val_dataset,
batch_size=self.hparams.batch_size,
shuffle=False,
num_workers=self.hparams.num_workers,
pin_memory=self.hparams.pin_memory
)
def test_dataloader(self):
return DataLoader(
self.test_dataset,
batch_size=self.hparams.batch_size,
shuffle=False,
num_workers=self.hparams.num_workers,
pin_memory=self.hparams.pin_memory
)
def collate_fn(self, batch):
print(type(batch))
print(batch) | multimodal-self-distillation-main | src/datamodules/speechcoco_datamodule.py |
multimodal-self-distillation-main | src/datamodules/__init__.py |
|
import os
import io
import urllib
import PIL.Image
import torch
import datasets
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from typing import Optional
from transformers import PerceiverFeatureExtractor, PerceiverTokenizer
from datasets import load_dataset, load_from_disk
from datasets.utils.file_utils import get_datasets_user_agent
from torch.utils.data import Dataset, DataLoader
from pytorch_lightning import LightningDataModule
USER_AGENT = get_datasets_user_agent()
class ConceptualCaptionsDataModule(LightningDataModule):
"""
A DataModule implements 5 key methods:
- prepare_data (things to do on 1 GPU/TPU, not on every GPU/TPU in distributed mode)
- setup (things to do on every accelerator in distributed mode)
- train_dataloader (the training dataloader)
- val_dataloader (the validation dataloader(s))
- test_dataloader (the test dataloader(s))
This allows you to share a full dataset without explaining how to download,
split, transform and process the data.
Read the docs:
https://pytorch-lightning.readthedocs.io/en/latest/extensions/datamodules.html
"""
def __init__(
self,
data_dir,
train_batch_size,
val_batch_size,
test_batch_size,
pin_memory=True):
super().__init__()
# this line allows to access init params with 'self.hparams' attribute
self.save_hyperparameters()
self.num_workers = os.cpu_count() * 5
self.tokenizer = PerceiverTokenizer()
self.cc_train: Optional[Dataset] = None
self.cc_val: Optional[Dataset] = None
self.cc_test: Optional[Dataset] = None
self.align_fuse = [['text'], ['image']]
def fetch_single_image(self, image_url, timeout=None, retries=0):
for _ in range(retries + 1):
try:
request = urllib.request.Request(
image_url,
data=None,
headers={"user-agent": USER_AGENT},
)
with urllib.request.urlopen(request, timeout=timeout) as req:
image = PIL.Image.open(io.BytesIO(req.read()))
break
except Exception:
image = None
return image
def fetch_images(self, batch, num_threads, timeout=None, retries=0):
fetch_single_image_with_args = partial(self.fetch_single_image, timeout=timeout, retries=retries)
with ThreadPoolExecutor(max_workers=num_threads) as executor:
batch["image"] = list(executor.map(fetch_single_image_with_args, batch["image_url"]))
return batch
def prepare_data(self):
"""Download data if needed. This method is called only from a single GPU.
Do not use it to assign state (self.x = y)."""
if os.path.isfile(f'{self.hparams.data_dir}/full/dataset.arrow'):
print('Dataset inculding images already downloaded')
else:
load_dataset(
'conceptual_captions', split='validation', cache_dir=self.hparams.data_dir
).map(
function=self.fetch_images, batched=True, batch_size=100, fn_kwargs={"num_threads": self.num_workers}
).filter(
lambda x: x['image'] is not None and x['image'].mode == 'RGB'
).save_to_disk(
f'{self.hparams.data_dir}/full'
)
def setup(self, stage=None):
"""Load data. Set variables: `self.data_train`, `self.data_val`, `self.data_test`.
This method is called by lightning twice for `trainer.fit()` and `trainer.test()`, so be careful if you do a random split!
The `stage` can be used to differentiate whether it's called before trainer.fit()` or `trainer.test()`."""
# Assign train/val/ datasets for use in dataloaders, data should already be downloaded
if stage == "fit" or stage is None:
self.cc_train = ConceptualCaptionsDataset(
load_from_disk(f'{self.hparams.data_dir}/full/', split='train', cache_dir=self.hparams.data_dir)
)
if stage == "validate" or stage is None:
self.cc_val = ConceptualCaptionsDataset(
load_from_disk(f'{self.hparams.data_dir}/full/', split='validation', cache_dir=self.hparams.data_dir)
)
if stage == "test" or stage is None:
raise Exception("""This dataset's test set it not available.""")
if stage == "predict" or stage is None:
raise Exception("""This DataModule is not designed to be used for prediction.
Please use the Spotify DataModule for prediction.""")
def train_dataloader(self):
return DataLoader(
self.cc_train,
batch_size=self.hparams.train_batch_size,
shuffle=True,
collate_fn=self.collate_fn,
num_workers=self.num_workers,
pin_memory=self.hparams.pin_memory
)
def val_dataloader(self):
return DataLoader(
self.cc_val,
batch_size=self.hparams.val_batch_size,
shuffle=False,
collate_fn=self.collate_fn,
num_workers=self.num_workers,
pin_memory=self.hparams.pin_memory
)
def test_dataloader(self):
return DataLoader(
self.cc_test,
batch_size=self.hparams.test_batch_size,
shuffle=False,
collate_fn=self.collate_fn,
num_workers=self.num_workers,
pin_memory=self.hparams.pin_memory
)
def collate_fn(self, batch):
return dict(
text=self.tokenizer([item['caption'] for item in batch], padding=True, return_tensors='pt')['input_ids'],
image=torch.cat([item['image'] for item in batch]),
)
class ConceptualCaptionsDataset(Dataset):
def __init__(
self,
hf_dataset: datasets.arrow_dataset.Dataset
) -> None:
super().__init__()
self.dataset = hf_dataset
self.feature_extractor = PerceiverFeatureExtractor()
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
x = self.dataset[index]
x['image'] = self.feature_extractor(x['image'], return_tensors='pt')['pixel_values']
return x | multimodal-self-distillation-main | src/datamodules/conceptual_datamodule.py |
import os
from typing import Optional
from torch.utils.data import Dataset, DataLoader
from datasets import load_dataset
from pytorch_lightning import LightningDataModule
class MSMARCOPassageDataModule(LightningDataModule):
"""
A DataModule implements 5 key methods:
- prepare_data (things to do on 1 GPU/TPU, not on every GPU/TPU in distributed mode)
- setup (things to do on every accelerator in distributed mode)
- train_dataloader (the training dataloader)
- val_dataloader (the validation dataloader(s))
- test_dataloader (the test dataloader(s))
This allows you to share a full dataset without explaining how to download,
split, transform and process the data.
Read the docs:
https://pytorch-lightning.readthedocs.io/en/latest/extensions/datamodules.html
"""
def __init__(
self,
collator,
data_dir,
train_batch_size,
val_batch_size,
test_batch_size,
train_on_long_form_text=False,
pin_memory=True):
super().__init__()
# this line allows to access init params with 'self.hparams' attribute
self.save_hyperparameters()
self.collator = collator
self.num_workers = os.cpu_count()
if self.hparams.load_preprocessed_data:
self.num_proc = 1
else:
self.num_proc = os.cpu_count()
self.msmarco_train: Optional[Dataset] = None
self.msmarco_val: Optional[Dataset] = None
self.msmarco_test: Optional[Dataset] = None
self.align_fuse = [['text'], ['text']]
def prepare_data(self):
"""Download data if needed. This method is called only from a single GPU.
Do not use it to assign state (self.x = y)."""
if os.path.isdir(self.hparams.data_dir):
print("Data directory already exists, skipping download.")
else:
load_dataset("ms_marco", cache_dir=self.hparams.data_dir)
def setup(self, stage=None):
"""Load data. Set variables: `self.data_train`, `self.data_val`, `self.data_test`.
This method is called by lightning twice for `trainer.fit()` and `trainer.test()`, so be careful if you do a random split!
The `stage` can be used to differentiate whether it's called before trainer.fit()` or `trainer.test()`."""
if self.hparams.train_on_long_form_text:
# Assign train/val datasets for use in dataloaders
if stage == "fit" or stage is None:
self.msmarco_train = load_dataset('ms_marco', split='v2.1', cache_dir=self.hparams.data_dir)['train']
self.msmarco_val = load_dataset('ms_marco', split='v2.1', cache_dir=self.hparams.data_dir)['validation']
# Assign test dataset for use in dataloader(s)
if stage == "test" or stage is None:
self.msmarco_test = load_dataset('ms_marco', split='v2.1', cache_dir=self.hparams.data_dir)['test']
# No dataset split defined for predict stage
if stage == "predict" or stage is None:
raise Exception("""This DataModule is not designed to be used for prediction.
Please use the Spotify DataModule for prediction.""")
else:
raise NotImplementedError
#TODO: implement this
def train_dataloader(self):
return DataLoader(
self.msmarco_train,
batch_size=self.hparams.train_batch_size,
shuffle=True,
collate_fn=self.collator,
num_workers=self.num_workers,
pin_memory=self.hparams.pin_memory
)
def val_dataloader(self):
return DataLoader(
self.msmarco_val,
batch_size=self.hparams.val_batch_size,
shuffle=False,
collate_fn=self.collator,
num_workers=self.num_workers,
pin_memory=self.hparams.pin_memory
)
def test_dataloader(self):
return DataLoader(
self.msmarco_test,
batch_size=self.hparams.test_batch_size,
shuffle=False,
collate_fn=self.collator,
num_workers=self.num_workers,
pin_memory=self.hparams.pin_memory
)
#TODO use this probably: https://ir-datasets.com/msmarco-document.html#msmarco-document/train
class MSMARCODocumentDataModule(LightningDataModule):
"""
A DataModule implements 5 key methods:
- prepare_data (things to do on 1 GPU/TPU, not on every GPU/TPU in distributed mode)
- setup (things to do on every accelerator in distributed mode)
- train_dataloader (the training dataloader)
- val_dataloader (the validation dataloader(s))
- test_dataloader (the test dataloader(s))
This allows you to share a full dataset without explaining how to download,
split, transform and process the data.
Read the docs:
https://pytorch-lightning.readthedocs.io/en/latest/extensions/datamodules.html
"""
def __init__(
self,
collator,
data_dir,
train_batch_size,
val_batch_size,
test_batch_size,
train_on_long_form_text=False,
pin_memory=True):
super().__init__()
# this line allows to access init params with 'self.hparams' attribute
self.save_hyperparameters()
self.collator = collator
self.num_workers = os.cpu_count()
if self.hparams.load_preprocessed_data:
self.num_proc = 1
else:
self.num_proc = os.cpu_count()
self.msmarco_train: Optional[Dataset] = None
self.msmarco_val: Optional[Dataset] = None
self.msmarco_test: Optional[Dataset] = None
def prepare_data(self):
"""Download data if needed. This method is called only from a single GPU.
Do not use it to assign state (self.x = y)."""
if os.path.isdir(self.hparams.data_dir):
print("Data directory already exists, skipping download.")
else:
load_dataset("ms_marco", cache_dir=self.hparams.data_dir)
def setup(self, stage=None):
"""Load data. Set variables: `self.data_train`, `self.data_val`, `self.data_test`.
This method is called by lightning twice for `trainer.fit()` and `trainer.test()`, so be careful if you do a random split!
The `stage` can be used to differentiate whether it's called before trainer.fit()` or `trainer.test()`."""
if self.hparams.train_on_long_form_text:
# Assign train/val datasets for use in dataloaders
if stage == "fit" or stage is None:
self.msmarco_train = load_dataset('ms_marco', split='v2.1', cache_dir=self.hparams.data_dir)['train']
self.msmarco_val = load_dataset('ms_marco', split='v2.1', cache_dir=self.hparams.data_dir)['validation']
# Assign test dataset for use in dataloader(s)
if stage == "test" or stage is None:
self.msmarco_test = load_dataset('ms_marco', split='v2.1', cache_dir=self.hparams.data_dir)['test']
# No dataset split defined for predict stage
if stage == "predict" or stage is None:
raise Exception("""This DataModule is not designed to be used for prediction.
Please use the Spotify DataModule for prediction.""")
else:
raise NotImplementedError
#TODO: implement this
def train_dataloader(self):
return DataLoader(
self.msmarco_train,
batch_size=self.hparams.train_batch_size,
shuffle=True,
collate_fn=self.collator,
num_workers=self.num_workers,
pin_memory=self.hparams.pin_memory
)
def val_dataloader(self):
return DataLoader(
self.msmarco_val,
batch_size=self.hparams.val_batch_size,
shuffle=False,
collate_fn=self.collator,
num_workers=self.num_workers,
pin_memory=self.hparams.pin_memory
)
def test_dataloader(self):
return DataLoader(
self.msmarco_test,
batch_size=self.hparams.test_batch_size,
shuffle=False,
collate_fn=self.collator,
num_workers=self.num_workers,
pin_memory=self.hparams.pin_memory
)
| multimodal-self-distillation-main | src/datamodules/msmarco_datamodule.py |
import os
import torch
from typing import Optional, Union, Dict, List
from torch.utils.data import Dataset, DataLoader
from datasets import load_dataset
from pytorch_lightning import LightningDataModule
from transformers import Wav2Vec2FeatureExtractor, PerceiverTokenizer
from transformers.utils import logging
from src import utils
log = utils.get_logger(__name__)
class LibriSpeechDataModule(LightningDataModule):
"""
A DataModule implements 5 key methods:
- prepare_data (things to do on 1 GPU/TPU, not on every GPU/TPU in distributed mode)
- setup (things to do on every accelerator in distributed mode)
- train_dataloader (the training dataloader)
- val_dataloader (the validation dataloader(s))
- test_dataloader (the test dataloader(s))
This allows you to share a full dataset without explaining how to download,
split, transform and process the data.
Read the docs:
https://pytorch-lightning.readthedocs.io/en/latest/extensions/datamodules.html
"""
def __init__(
self,
data_dir,
train_batch_size,
val_batch_size,
test_batch_size,
split='train.360',
pin_memory=True
) -> None:
super().__init__()
# this line allows to access init params with 'self.hparams' attribute
self.save_hyperparameters()
if os.name == 'nt':
self.num_workers = 0
else:
self.num_workers = os.cpu_count()
self.libri_train: Optional[Dataset] = None
self.libri_val: Optional[Dataset] = None
self.libri_test: Optional[Dataset] = None
self.align_fuse = [['text'], ['audio']]
self.metric = ['Recall@k', 'MRR']
logging.set_verbosity(logging.CRITICAL)
self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('facebook/wav2vec2-base')
self.tokenizer = PerceiverTokenizer.from_pretrained('deepmind/language-perceiver')
logging.set_verbosity(logging.WARNING)
def prepare_data(self):
"""Download data if needed. This method is called only from a single GPU.
Do not use it to assign state (self.x = y)."""
if os.path.isdir(self.hparams.data_dir):
log.info("Data directory already exists, skipping download.")
else:
load_dataset('librispeech_asr', 'clean', split=self.hparams.split, cache_dir=self.hparams.data_dir)
def setup(self, stage=None):
"""Load data. Set variables: `self.data_train`, `self.data_val`, `self.data_test`.
This method is called by lightning twice for `trainer.fit()` and `trainer.test()`, so be careful if you do a random split!
The `stage` can be used to differentiate whether it's called before trainer.fit()` or `trainer.test()`."""
# Assign train/val datasets for use in dataloaders
if stage == "fit" or stage is None:
self.libri_train = load_dataset('librispeech_asr', 'clean', split=self.hparams.split, cache_dir=self.hparams.data_dir)
self.libri_val = load_dataset('librispeech_asr', 'clean', split='validation', cache_dir=self.hparams.data_dir)
# Assign test dataset for use in dataloader(s)
if stage == "test" or stage is None:
self.libri_test = load_dataset('librispeech_asr', 'clean', split='test', cache_dir=self.hparams.data_dir)
if stage == "predict" or stage is None:
raise Exception("This DataModule is not designed to be used for prediction.")
def train_dataloader(self):
return DataLoader(
dataset=self.libri_train,
batch_size=self.hparams.train_batch_size,
shuffle=True,
collate_fn=self.collate_fn,
num_workers=self.num_workers,
pin_memory=self.hparams.pin_memory,
drop_last=True
)
def val_dataloader(self):
return DataLoader(
dataset=self.libri_val,
batch_size=self.hparams.val_batch_size,
shuffle=False,
collate_fn=self.collate_fn,
num_workers=self.num_workers,
pin_memory=self.hparams.pin_memory,
drop_last=True
)
def test_dataloader(self):
return DataLoader(
dataset=self.libri_test,
batch_size=self.hparams.test_batch_size,
shuffle=False,
collate_fn=self.collate_fn,
num_workers=self.num_workers,
pin_memory=self.hparams.pin_memory,
drop_last=True
)
def collate_fn(
self,
batch: List[Dict[str, Union[List[int], torch.Tensor]]]
) -> Dict[str, torch.Tensor]:
input_values = [feature["audio"]["array"] for feature in batch]
text = [feature["text"] for feature in batch]
audio = self.feature_extractor(
input_values,
pad_to_multiple_of=96,
padding="longest",
return_tensors="pt",
sampling_rate=16000,
)
tokens = self.tokenizer(
text,
padding="longest",
return_tensors="pt",
)
return dict(text=tokens["input_ids"], audio=audio["input_values"], align_fuse=self.align_fuse, metric=self.metric)
| multimodal-self-distillation-main | src/datamodules/librispeech_datamodule.py |
import os.path
from typing import Callable
import torch
import pytorch_lightning as pl
from classy_vision.dataset.classy_dataset import ClassyDataset
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
class TinyImagenetDataset(ClassyDataset):
"""
TinyImageNetDataset is a ClassyDataset for the tiny imagenet dataset.
"""
def __init__(self, data_path: str, transform: Callable[[object], object]) -> None:
batchsize_per_replica = 16
shuffle = False
num_samples = 1000
dataset = datasets.ImageFolder(data_path)
super().__init__(
# pyre-fixme[6]
dataset,
batchsize_per_replica,
shuffle,
transform,
num_samples,
)
class TinyImagenetDataModule(pl.LightningDataModule):
"""
TinyImageNetDataModule is a pytorch LightningDataModule for the tiny
imagenet dataset.
"""
def __init__(
self,
data_dir: str,
num_workers: int = 0,
batch_size: int = 16,
shuffle: bool = True,
pin_memory: bool = True,
drop_last: bool = False,
) -> None:
super().__init__()
# this line allows to access init params with 'self.hparams' attribute
self.save_hyperparameters()
self.data_dir = data_dir
self.num_workers = num_workers
self.batch_size = batch_size
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
self.align_fuse = [['image'], ['image']]
def _verify_splits(self, data_dir: str, split: str) -> None:
dirs = os.listdir(data_dir)
if split not in dirs:
raise FileNotFoundError(
f"a {split} Imagenet split was not found in {data_dir}, make sure the"
f" folder contains a subfolder named {split}"
)
def prepare_data(self) -> None:
# imagenet cannot be downloaded... must provide path to folder with the train/val splits
self._verify_splits(self.data_dir, "train")
self._verify_splits(self.data_dir, "val")
self._verify_splits(self.data_dir, "test")
def train_dataloader(self) -> DataLoader:
img_transform = self._default_transforms()
self.train_ds = TinyImagenetDataset(
data_path=os.path.join(self.data_dir, "train"),
transform=lambda x: (img_transform(x[0]), x[1]),
)
return DataLoader(
self.train_ds,
batch_size=self.batch_size,
collate_fn=self.collate_fn,
shuffle=self.shuffle,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory,
)
def val_dataloader(self) -> DataLoader:
img_transform = self._default_transforms()
self.val_ds = TinyImagenetDataset(
data_path=os.path.join(self.data_dir, "val"),
transform=lambda x: (img_transform(x[0]), x[1]),
)
return DataLoader(
self.val_ds,
batch_size=self.batch_size,
collate_fn=self.collate_fn,
shuffle=self.shuffle,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory,
)
def test_dataloader(self) -> DataLoader:
img_transform = self._default_transforms()
self.test_ds = TinyImagenetDataset(
data_path=os.path.join(self.data_dir, "test"),
transform=lambda x: (img_transform(x[0]), x[1]),
)
return DataLoader(
self.test_ds,
batch_size=self.batch_size,
collate_fn=self.collate_fn,
shuffle=self.shuffle,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory,
)
def _default_transforms(self) -> Callable:
img_transform = transforms.ToTensor()
return img_transform
def collate_fn(self, batch):
images, labels = zip(*batch)
return dict(image=torch.stack(tensors=images, dim=0), label=torch.tensor(labels))
| multimodal-self-distillation-main | src/datamodules/tinyimagenet_datamodule.py |
# copy from https://github.com/PyTorchLightning/lightning-bolts/tree/master/pl_bolts/datamodules
# type: ignore[override]
import os
from typing import Any, Callable, Optional
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
from pl_bolts.datasets import UnlabeledImagenet
from pl_bolts.transforms.dataset_normalizations import imagenet_normalization
from pl_bolts.utils import _TORCHVISION_AVAILABLE
from pl_bolts.utils.warnings import warn_missing_pkg
if _TORCHVISION_AVAILABLE:
from torchvision import transforms as transform_lib
else: # pragma: no cover
warn_missing_pkg("torchvision")
class ImagenetDataModule(LightningDataModule): # pragma: no cover
name = "imagenet"
def __init__(
self,
data_dir: str,
meta_dir: Optional[str] = None,
num_workers: int = 0,
batch_size: int = 32,
shuffle: bool = True,
pin_memory: bool = True,
drop_last: bool = False,
*args: Any,
**kwargs: Any,
) -> None:
super().__init__(*args, **kwargs)
# this line allows to access init params with 'self.hparams' attribute
self.save_hyperparameters()
if not _TORCHVISION_AVAILABLE: # pragma: no cover
raise ModuleNotFoundError(
"You want to use ImageNet dataset loaded from `torchvision` which is not installed yet."
)
self.data_dir = data_dir
self.num_workers = num_workers
self.meta_dir = meta_dir
self.batch_size = batch_size
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
self.align_fuse = [['image'], ['image']]
@property
def num_classes(self) -> int:
return 1000
def _verify_splits(self, data_dir: str, split: str) -> None:
dirs = os.listdir(data_dir)
if split not in dirs:
raise FileNotFoundError(
f"a {split} Imagenet split was not found in {data_dir}, make sure the"
f" folder contains a subfolder named {split}"
)
def prepare_data(self) -> None:
# imagenet cannot be downloaded... must provide path to folder with the train/val splits
self._verify_splits(self.data_dir, "train")
self._verify_splits(self.data_dir, "val")
for split in ["train", "val"]:
files = os.listdir(os.path.join(self.data_dir, split))
if "meta.bin" not in files:
raise FileNotFoundError(
"""
no meta.bin present. Imagenet is no longer automatically downloaded by PyTorch.
To get imagenet:
1. download yourself from http://www.image-net.org/challenges/LSVRC/2012/downloads
2. download the devkit (ILSVRC2012_devkit_t12.tar.gz)
3. generate the meta.bin file using the devkit
4. copy the meta.bin file into both train and val split folders
To generate the meta.bin do the following:
from pl_bolts.datamodules.imagenet_dataset import UnlabeledImagenet
path = '/path/to/folder/with/ILSVRC2012_devkit_t12.tar.gz/'
UnlabeledImagenet.generate_meta_bins(path)
"""
)
def train_dataloader(self, num_images_per_class: int = -1, add_normalize: bool = False) -> DataLoader:
transforms = self._default_transforms() if self.train_transforms is None else self.train_transforms
dataset = UnlabeledImagenet(
self.data_dir,
num_imgs_per_class=num_images_per_class,
meta_dir=self.meta_dir,
split="train",
transform=transforms,
)
return DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=self.shuffle,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory,
)
def val_dataloader(self, num_images_per_class: int = 50, add_normalize: bool = False) -> DataLoader:
transforms = self._default_transforms() if self.val_transforms is None else self.val_transforms
dataset = UnlabeledImagenet(
self.data_dir,
num_imgs_per_class_val_split=num_images_per_class,
meta_dir=self.meta_dir,
split="val",
transform=transforms,
)
return DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory,
)
def test_dataloader(self, num_images_per_class: int, add_normalize: bool = False) -> DataLoader:
transforms = self._default_transforms() if self.test_transforms is None else self.test_transforms
dataset = UnlabeledImagenet(
self.data_dir,
num_imgs_per_class=num_images_per_class,
meta_dir=self.meta_dir,
split="test",
transform=transforms,
)
return DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory,
)
def _default_transforms(self) -> Callable:
mnist_transforms = transform_lib.Compose([transform_lib.ToTensor(), imagenet_normalization()])
return mnist_transforms | multimodal-self-distillation-main | src/datamodules/imagenet_datamodule.py |
import os
from typing import Optional
from torch.utils.data import Dataset, DataLoader
from datasets import load_dataset
from pytorch_lightning import LightningDataModule
from transformers import PerceiverTokenizer
class WikipediaDataModule(LightningDataModule):
"""
A DataModule implements 5 key methods:
- prepare_data (things to do on 1 GPU/TPU, not on every GPU/TPU in distributed mode)
- setup (things to do on every accelerator in distributed mode)
- train_dataloader (the training dataloader)
- val_dataloader (the validation dataloader(s))
- test_dataloader (the test dataloader(s))
This allows you to share a full dataset without explaining how to download,
split, transform and process the data.
Read the docs:
https://pytorch-lightning.readthedocs.io/en/latest/extensions/datamodules.html
"""
def __init__(
self,
data_dir,
max_input_length,
train_batch_size,
val_batch_size,
test_batch_size,
pin_memory=True):
super().__init__()
# this line allows to access init params with 'self.hparams' attribute
self.save_hyperparameters()
if os.name == 'nt':
self.num_workers = 0
else:
self.num_workers = os.cpu_count()
self.wiki_train: Optional[Dataset] = None
self.wiki_val: Optional[Dataset] = None
self.wiki_test: Optional[Dataset] = None
self.tokenizer = PerceiverTokenizer.from_pretrained('deepmind/language-perceiver')
self.align_fuse = [['text'], ['text']]
def prepare_data(self):
"""Download data if needed. This method is called only from a single GPU.
Do not use it to assign state (self.x = y)."""
if os.path.isdir(self.hparams.data_dir):
print("Data directory already exists, skipping download.")
else:
load_dataset("wikipedia", "20220301.en", cache_dir=self.hparams.data_dir)
def setup(self, stage=None):
"""Load data. Set variables: `self.data_train`, `self.data_val`, `self.data_test`.
This method is called by lightning twice for `trainer.fit()` and `trainer.test()`, so be careful if you do a random split!
The `stage` can be used to differentiate whether it's called before trainer.fit()` or `trainer.test()`."""
# Assign train/val datasets for use in dataloaders
if stage == "fit" or stage is None:
self.wiki_train = load_dataset("wikipedia", "20220301.en", cache_dir=self.hparams.data_dir)['train'] #TODO implement splitting
if stage == 'validate' or stage is None:
self.wiki_val
# Assign test dataset for use in dataloader(s)
if stage == "test" or stage is None:
self.wiki_test
# No dataset split defined for predict stage
if stage == "predict" or stage is None:
raise Exception("""This DataModule is not designed to be used for prediction.
Please use the Spotify DataModule for prediction.""")
def train_dataloader(self):
return DataLoader(
self.wiki_train,
batch_size=self.hparams.train_batch_size,
shuffle=True,
collate_fn=self.collate_fn,
num_workers=self.num_workers,
pin_memory=self.hparams.pin_memory
)
def val_dataloader(self):
return DataLoader(
self.wiki_val,
batch_size=self.hparams.val_batch_size,
shuffle=False,
collate_fn=self.collate_fn,
num_workers=self.num_workers,
pin_memory=self.hparams.pin_memory
)
def test_dataloader(self):
return DataLoader(
self.wiki_val,
batch_size=self.hparams.test_batch_size,
shuffle=False,
collate_fn=self.collate_fn,
num_workers=self.num_workers,
pin_memory=self.hparams.pin_memory
)
def collate_fn(self, batch):
tokens = self.tokenizer(
batch,
padding="longest",
truncation=True,
max_length=self.hparams.max_input_length,
return_tensors="pt",
)
return dict(text=tokens['input_ids'])
| multimodal-self-distillation-main | src/datamodules/wikipedia_datamodule.py |
import logging
import warnings
from typing import List, Sequence, Tuple
import torch
import pytorch_lightning as pl
import rich.syntax
import rich.tree
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.loggers import LoggerCollection, WandbLogger
from pytorch_lightning import Trainer
def exists(val):
return val is not None
count_parameters = lambda model : {'requires_grad':sum(p.numel() for p in model.parameters() if p.requires_grad)/1e6,
'does_not_require_grad':sum(p.numel() for p in model.parameters() if not p.requires_grad)/1e6}
# function that checks whether all parameters are on the same device and prints module names that are not
def recursive_custom_device_check(module, device):
try:
for modules in module.named_children():
if next(modules[1].parameters()).device != device:
print(modules[0])
print(next(modules[1].parameters()).device)
recursive_custom_device_check(modules[1], device)
except StopIteration:
pass
def freeze_module(module):
for param in module.parameters():
param.requires_grad = False
def get_parameter_dtype(parameter):
try:
return next(parameter.parameters()).dtype
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, torch.Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = parameter._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
def get_logger(name=__name__) -> logging.Logger:
"""Initializes multi-GPU-friendly python command line logger."""
logger = logging.getLogger(name)
# this ensures all logging levels get marked with the rank zero decorator
# otherwise logs would get multiplied for each GPU process in multi-GPU setup
for level in (
"debug",
"info",
"warning",
"error",
"exception",
"fatal",
"critical",
):
setattr(logger, level, rank_zero_only(getattr(logger, level)))
return logger
log = get_logger(__name__)
def extras(config: DictConfig) -> None:
"""Applies optional utilities, controlled by config flags.
Utilities:
- Ignoring python warnings
- Rich config printing
"""
# disable python warnings if <config.ignore_warnings=True>
if config.get("ignore_warnings"):
log.info("Disabling python warnings! <config.ignore_warnings=True>")
warnings.filterwarnings("ignore")
# pretty print config tree using Rich library if <config.print_config=True>
if config.get("print_config"):
log.info("Printing config tree with Rich! <config.print_config=True>")
print_config(config, resolve=True)
@rank_zero_only
def print_config(
config: DictConfig,
print_order: Sequence[str] = (
"datamodule",
"model",
"callbacks",
"logger",
"trainer",
),
resolve: bool = True,
) -> None:
"""Prints content of DictConfig using Rich library and its tree structure.
Args:
config (DictConfig): Configuration composed by Hydra.
print_order (Sequence[str], optional): Determines in what order config components are printed.
resolve (bool, optional): Whether to resolve reference fields of DictConfig.
"""
style = "dim"
tree = rich.tree.Tree("CONFIG", style=style, guide_style=style)
quee = []
for field in print_order:
quee.append(field) if field in config else log.info(f"Field '{field}' not found in config")
for field in config:
if field not in quee:
quee.append(field)
for field in quee:
branch = tree.add(field, style=style, guide_style=style)
config_group = config[field]
if isinstance(config_group, DictConfig):
branch_content = OmegaConf.to_yaml(config_group, resolve=resolve)
else:
branch_content = str(config_group)
branch.add(rich.syntax.Syntax(branch_content, "yaml"))
rich.print(tree)
with open("config_tree.log", "w") as file:
rich.print(tree, file=file)
@rank_zero_only
def log_hyperparameters(
config: DictConfig,
model: pl.LightningModule,
datamodule: pl.LightningDataModule,
trainer: pl.Trainer,
callbacks: List[pl.Callback],
logger: List[pl.loggers.LightningLoggerBase],
) -> None:
"""Controls which config parts are saved by Lightning loggers.
Additionaly saves:
- number of model parameters
"""
if not trainer.logger:
return
hparams = {}
# choose which parts of hydra config will be saved to loggers
hparams["model"] = config["model"]
# save number of model parameters
hparams["model/params/total"] = sum(p.numel() for p in model.parameters())
hparams["model/params/trainable"] = sum(
p.numel() for p in model.parameters() if p.requires_grad
)
hparams["model/params/non_trainable"] = sum(
p.numel() for p in model.parameters() if not p.requires_grad
)
hparams["datamodule"] = config["datamodule"]
hparams["trainer"] = config["trainer"]
if "seed" in config:
hparams["seed"] = config["seed"]
if "callbacks" in config:
hparams["callbacks"] = config["callbacks"]
# send hparams to all loggers
trainer.logger.log_hyperparams(hparams)
def finish(
config: DictConfig,
model: pl.LightningModule,
datamodule: pl.LightningDataModule,
trainer: pl.Trainer,
callbacks: List[pl.Callback],
logger: List[pl.loggers.LightningLoggerBase],
) -> None:
"""Makes sure everything closed properly."""
# without this sweeps with wandb logger might crash!
for lg in logger:
if isinstance(lg, pl.loggers.wandb.WandbLogger):
import wandb
wandb.finish()
def get_wandb_logger(trainer: Trainer) -> WandbLogger:
"""Safely get Weights&Biases logger from Trainer."""
if trainer.fast_dev_run:
raise Exception(
"Cannot use wandb callbacks since pytorch lightning disables loggers in `fast_dev_run=true` mode."
)
if isinstance(trainer.logger, WandbLogger):
return trainer.logger
if isinstance(trainer.logger, LoggerCollection):
for logger in trainer.logger:
if isinstance(logger, WandbLogger):
return logger
raise Exception(
"You are using wandb related callback, but WandbLogger was not found for some reason..."
) | multimodal-self-distillation-main | src/utils/__init__.py |
multimodal-self-distillation-main | src/models/__init__.py |
|
from typing import Any, Union, Tuple, Dict
import torch
import pytorch_lightning as pl
from src.models.components.ema import EMA
from src.models.components.dispatcher import dispatch_inputs
from src.models.components.outputs import DispatcherOutput, ModelOutput, ForwardPassOutput
from src.models.components.criterion import LatentPredictionLoss
from src.models.components.perceiver import PerceiverModel
from src.models.components.hip import HiPModel
from src.utils import exists
class LatentPredictionPretraining(pl.LightningModule):
"""
Example of LightningModule for MNIST classification.
A LightningModule organizes your PyTorch code into 5 sections:
- Computations (init).
- Train loop (training_step)
- Validation loop (validation_step)
- Test loop (test_step)
- Optimizers (configure_optimizers)
Read the docs:
https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html
"""
def __init__(
self,
model: Union[PerceiverModel, HiPModel],
optimizer: torch.optim.Optimizer,
scheduler: torch.optim.lr_scheduler,
criterion: LatentPredictionLoss,
ema_decay: float = 0.999,
ema_end_decay: float = 0.9999,
ema_anneal_end_step: int = 300000,
switch_student_teacher_per_epoch: bool = False,
scheduler_monitor: str = "train/total_loss",
scheduler_interval: str = "epoch",
):
super().__init__()
# this line allows to access init params with 'self.hparams' attribute
# it also ensures init params will be stored in ckpt
self.save_hyperparameters(logger=True) #, ignore=['criterion'])
# student and teacher models is instantiated by Hydra
self.student = model
self.teacher : EMA = EMA(model, ema_decay)
# set student status for each model in order for masking to be applied only to the student model
self.student.set_student_status(True)
self.teacher.model.set_student_status(False)
# EMA parameters
self.ema_decay = ema_decay
self.ema_end_decay = ema_end_decay
self.ema_anneal_end_step = ema_anneal_end_step
# optimizer and scheduler
self.optimizer = optimizer
self.scheduler = scheduler
# loss function
self.criterion = criterion
# whether to switch student and teacher model every epoch in multimodal training
self.switch_student_teacher_per_epoch = switch_student_teacher_per_epoch
def on_fit_start(self) -> None:
student_device = next(self.student.parameters()).device
self.teacher.model.to(student_device)
self.teacher.model.eval()
def ema_step(self):
"""
One EMA step for the offline/teacher model until the ending decay value is reached
"""
if self.ema_decay != self.ema_end_decay:
if self.teacher.num_updates >= self.ema_anneal_end_step:
decay = self.ema_end_decay
else:
decay = self.teacher.get_annealed_rate(
self.ema_decay,
self.ema_end_decay,
self.teacher.num_updates,
self.ema_anneal_end_step,
)
self.teacher.decay = decay
if self.teacher.decay < 1:
self.teacher.step(self.student)
def forward(
self,
batch: Any,
) -> Tuple[ForwardPassOutput, DispatcherOutput]:
dispatched_inputs = dispatch_inputs(
batch,
self.current_epoch,
self.switch_student_teacher_per_epoch,
)
student_outputs: ModelOutput = self.student(
dispatched_inputs.student_input,
apply_mask=dispatched_inputs.apply_mask
)
outputs = ForwardPassOutput(
student_output=student_outputs,
align_fuse=dispatched_inputs.align_fuse,
labels=dispatched_inputs.labels,
output_modalities=dispatched_inputs.output_modalities,
metric=dispatched_inputs.metric
)
return outputs, dispatched_inputs
def step(
self,
batch: Any,
) -> ForwardPassOutput:
# forward pass student
outputs, dispatched_inputs = self.forward(batch)
# forward pass teacher
with torch.no_grad():
self.teacher.model.eval()
teacher_outputs: ModelOutput = self.teacher.model(
dispatched_inputs.teacher_inputs,
apply_mask=dispatched_inputs.apply_mask
)
outputs.set_attributes(**{"teacher_output": teacher_outputs})
# compute loss
criterion_output = self.criterion(outputs)
outputs.set_attributes(**{"criterion_output": criterion_output})
return outputs
def on_train_batch_end(self, outputs: Any, batch: Any, batch_idx: int) -> None:
if exists(self.teacher):
self.ema_step()
# in training/validation/test_step we can return dict with any tensors
# and then read it in some callback or in `training/validation/test_epoch_end()`` below
# remember to always return loss from `training_step()` or else backpropagation will fail!
def training_step(self, batch: Any, batch_idx: int):
outputs : ForwardPassOutput = self.step(batch)
self.log("train/total_loss", outputs.criterion_output.total_loss, on_step=True, on_epoch=False, prog_bar=True, sync_dist=True)
self.log("train/latent_loss", outputs.criterion_output.latent_loss, on_step=True, on_epoch=False, prog_bar=True, sync_dist=True)
self.log("train/align_loss", outputs.criterion_output.align_loss, on_step=True, on_epoch=False, prog_bar=True, sync_dist=True)
return {"loss": outputs.criterion_output.total_loss, "forward_pass_output": outputs}
def validation_step(self, batch: Any, batch_idx: int):
outputs : ForwardPassOutput = self.step(batch)
self.log("val/total_loss", outputs.criterion_output.total_loss, on_step=True, on_epoch=False, prog_bar=True, sync_dist=True)
self.log("val/latent_loss", outputs.criterion_output.latent_loss, on_step=True, on_epoch=False, prog_bar=True, sync_dist=True)
self.log("val/align_loss", outputs.criterion_output.align_loss, on_step=True, on_epoch=False, prog_bar=True, sync_dist=True)
return {"loss": outputs.criterion_output.total_loss, "forward_pass_output": outputs}
def test_step(self, batch: Any, batch_idx: int):
outputs : ForwardPassOutput = self.step(batch)
self.log("test/total_loss", outputs.criterion_output.total_loss, on_step=True, on_epoch=False, prog_bar=True, sync_dist=True)
self.log("test/latent_loss", outputs.criterion_output.latent_loss, on_step=True, on_epoch=False, prog_bar=True, sync_dist=True)
self.log("test/align_loss", outputs.criterion_output.align_loss, on_step=True, on_epoch=False, prog_bar=True, sync_dist=True)
return {"loss": outputs.criterion_output.total_loss, "forward_pass_output": outputs}
def configure_optimizers(self):
"""Choose what optimizers and learning-rate schedulers to use in your optimization.
Normally you'd need one. But in the case of GANs or similar you might have multiple.
Examples:
https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers
"""
optimizer = self.optimizer(params=self.parameters())
scheduler = self.scheduler(optimizer=optimizer)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler,
"monitor": self.hparams.scheduler_monitor,
"interval": self.hparams.scheduler_interval,
"frequency": 1,
},
}
class LatentPredictionFinetuning(pl.LightningModule):
def __init__(self, model: torch.nn.Module):
super().__init__()
#TODO use pl.callbacks.BaseFineTuningCallback when finetuning on a smaller dataset | multimodal-self-distillation-main | src/models/module.py |
from typing import List, Dict, Tuple
from src.models.components.outputs import DispatcherOutput
def dispatch_inputs(
batch: Dict,
epoch: int,
switch_teacher_student: bool = False
) -> DispatcherOutput:
"""
Returns the input dicts for student and teacher model.
Parameters
----------
batch : (dict)
batch of data, must contain the key 'align_fuse' which specifies
the alignment and fusion procedure and what to feed to the student
and teacher, examples:
- [['text'], ['video', 'audio']]
- [['video', 'audio'], ['video', audio']]
- [['text'], ['audio']]
- [['image'], ['image']]
epoch : (int)
number of current epoch
switch_teacher_student : (bool)
validation : (bool)
test : (bool)
Returns
-------
dispatcher_output : DispatcherOutput
"""
if 'align_fuse' in batch.keys():
align_fuse = batch['align_fuse']
else:
# for inference when only the input data is given and nothing else
align_fuse = [[key] for key in batch.keys()]
if 'labels' in batch.keys():
labels = batch['labels']
else:
labels = None
if 'metric' in batch.keys():
metric = batch['metric']
else:
metric = None
if 'num_classes' in batch.keys():
num_classes = batch['num_classes']
else:
num_classes = None
if align_fuse[0] == align_fuse[1]:
# unimodal case, e.g. [['text'], ['text']] or [['image'], ['image']]
apply_mask = True
student_index = 0
teacher_index = 1
elif len(align_fuse) == 1:
# inference is assumed here with align_fuse like [['image']] or [['video', 'audio']]
apply_mask = False
student_index = 0
teacher_index = 0
else:
# multimodal case, e.g. [['text'], ['video', 'audio']] or [['text'], ['audio']]
apply_mask = False
if switch_teacher_student:
if epoch % 2 == 0:
student_index = 0
teacher_index = 1
else:
student_index = 1
teacher_index = 0
else:
student_index = 0
teacher_index = 1
student_inputs = {}
teacher_inputs = {}
for k, v in batch.items():
if k in align_fuse[student_index]:
student_inputs[k] = v
elif k in align_fuse[teacher_index]:
teacher_inputs[k] = v
output_modalities = {'student_output': student_inputs.keys(), 'teacher_output': teacher_inputs.keys()}
dispater_output = DispatcherOutput(
student_input=student_inputs,
teacher_inputs=teacher_inputs,
align_fuse=align_fuse,
apply_mask=apply_mask,
labels=labels,
output_modalities=output_modalities,
metric=metric,
num_classes=num_classes,
)
return dispater_output | multimodal-self-distillation-main | src/models/components/dispatcher.py |
import torch
def reciprocal_ranks(pairwise_similarity_results):
indexes = []
targets = []
for i, result in enumerate(pairwise_similarity_results):
for entry in result:
indexes.append(i)
if entry['corpus_id'] == i:
targets.append(1)
else:
targets.append(0)
preds = [0] * len(targets)
indexes_tensor = torch.LongTensor(indexes)
targets_tenosr = torch.Tensor(targets)
preds_tensor = torch.Tensor(preds)
return indexes_tensor, targets_tenosr, preds_tensor
class PretrainingMetric:
def __init__(self):
pass
# self.train
# self.val
# self.test
| multimodal-self-distillation-main | src/models/components/metrics.py |
import torch
import torch.distributed as dist
class GatherLayer(torch.autograd.Function):
"""
Gathers tensors from all process and supports backward propagation
for the gradients across processes.
"""
@staticmethod
def forward(ctx, x):
if dist.is_available() and dist.is_initialized():
output = [torch.zeros_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(output, x)
else:
output = [x]
return tuple(output)
@staticmethod
def backward(ctx, *grads):
if dist.is_available() and dist.is_initialized():
all_gradients = torch.stack(grads)
dist.all_reduce(all_gradients)
grad_out = all_gradients[get_rank()]
else:
grad_out = grads[0]
return grad_out
def get_rank():
if dist.is_available() and dist.is_initialized():
return dist.get_rank()
return 0
def gather(X, dim=0):
"""Gathers tensors from all processes, supporting backward propagation."""
return torch.cat(GatherLayer.apply(X), dim=dim) | multimodal-self-distillation-main | src/models/components/gather.py |
import math
import torch
from torch import nn
from typing import Any, Callable, Mapping, Optional, Tuple, Union, Dict
from transformers.modeling_outputs import BaseModelOutputWithCrossAttentions
from transformers.pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from src.models.components.masking import mask_hidden_states
from src.models.components.outputs import ModelOutput
from src.models.components.pooler import Pooler
from src.utils import get_logger, get_parameter_dtype
ModalitySizeType = Mapping[str, int]
PreprocessorOutputType = Tuple[torch.Tensor, Optional[torch.Tensor], torch.Tensor]
PreprocessorType = Callable[..., PreprocessorOutputType]
PredictionHeadType = Callable[..., Any]
PostprocessorType = Callable[..., Any]
logger = get_logger(__name__)
class PerceiverEmbeddings(nn.Module):
"""Construct the latent embeddings."""
def __init__(
self,
num_latents: int,
d_latents: int,
):
super().__init__()
self.latents = nn.Parameter(torch.randn(num_latents, d_latents))
def forward(self, batch_size: int):
return self.latents.expand(batch_size, -1, -1) # Thanks, Phil Wang
class PerceiverSelfAttention(nn.Module):
"""Multi-headed {cross, self}-attention. Can be used both in the encoder as well as in the decoder."""
def __init__(
self,
attention_probs_dropout_prob,
is_cross_attention,
qk_channels,
v_channels,
num_heads,
q_dim,
kv_dim
):
super().__init__()
self.num_heads = num_heads
# Q and K must have the same number of channels.
# Default to preserving Q's input's shape.
if qk_channels is None:
qk_channels = q_dim
# V's num_channels determines the shape of the output of QKV-attention.
# Default to the same number of channels used in the key-query operation.
if v_channels is None:
v_channels = qk_channels
if qk_channels % num_heads != 0:
raise ValueError(f"qk_channels ({qk_channels}) must be divisible by num_heads ({num_heads}).")
if v_channels % num_heads != 0:
raise ValueError(f"v_channels ({v_channels}) must be divisible by num_heads ({num_heads}).")
self.qk_channels = qk_channels
self.v_channels = v_channels
self.qk_channels_per_head = self.qk_channels // num_heads
self.v_channels_per_head = self.v_channels // num_heads
# Layer normalization
self.layernorm1 = nn.LayerNorm(q_dim)
self.layernorm2 = nn.LayerNorm(kv_dim) if is_cross_attention else nn.Identity()
# Projection matrices
self.query = nn.Linear(q_dim, qk_channels)
self.key = nn.Linear(kv_dim, qk_channels)
self.value = nn.Linear(kv_dim, v_channels)
self.dropout = nn.Dropout(attention_probs_dropout_prob)
def transpose_for_scores(self, x, channels_per_head):
new_x_shape = x.size()[:-1] + (self.num_heads, channels_per_head)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs: Optional[torch.FloatTensor] = None,
inputs_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
hidden_states = self.layernorm1(hidden_states)
inputs = self.layernorm2(inputs)
# Project queries, keys and values to a common feature dimension. If this is instantiated as a cross-attention module,
# the keys and values come from the inputs; the attention mask needs to be such that the inputs's non-relevant tokens are not attended to.
is_cross_attention = inputs is not None
queries = self.query(hidden_states)
if is_cross_attention:
keys = self.key(inputs)
values = self.value(inputs)
attention_mask = inputs_mask
else:
keys = self.key(hidden_states)
values = self.value(hidden_states)
# Reshape channels for multi-head attention.
# We reshape from (batch_size, time, channels) to (batch_size, num_heads, time, channels per head)
queries = self.transpose_for_scores(queries, self.qk_channels_per_head)
keys = self.transpose_for_scores(keys, self.qk_channels_per_head)
values = self.transpose_for_scores(values, self.v_channels_per_head)
# Take the dot product between the queries and keys to get the raw attention scores.
attention_scores = torch.matmul(queries, keys.transpose(-1, -2))
batch_size, num_heads, seq_len, q_head_dim = queries.shape
_, _, _, v_head_dim = values.shape
hiddens = self.num_heads * v_head_dim
attention_scores = attention_scores / math.sqrt(q_head_dim)
if attention_mask is not None:
# Apply the attention mask (precomputed for all layers in PerceiverModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, values)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (hiddens,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class PerceiverSelfOutput(nn.Module):
def __init__(self, input_channels, output_channels):
super().__init__()
self.dense = nn.Linear(input_channels, output_channels)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
return hidden_states
class PerceiverAttention(nn.Module):
"""Attention module, including a dense block."""
def __init__(
self,
attention_probs_dropout_prob=0.1,
cross_attention_shape_for_attention="kv",
is_cross_attention=False,
qk_channels=None,
v_channels=None,
num_heads=1,
q_dim=None,
kv_dim=None,
use_query_residual=True,
):
super().__init__()
# MultiHead attention
if is_cross_attention and qk_channels is None:
if cross_attention_shape_for_attention == "q":
qk_channels = q_dim
elif cross_attention_shape_for_attention == "kv":
qk_channels = kv_dim
else:
raise ValueError(
f"Unknown value {cross_attention_shape_for_attention} for "
"cross_attention_shape_for_attention."
)
else:
if qk_channels is None:
qk_channels = q_dim
if v_channels is None:
v_channels = qk_channels
self.self = PerceiverSelfAttention(
attention_probs_dropout_prob,
is_cross_attention=is_cross_attention,
qk_channels=qk_channels,
v_channels=v_channels,
num_heads=num_heads,
q_dim=q_dim,
kv_dim=kv_dim
)
# dense block
output_channels = None
if is_cross_attention:
output_channels = q_dim
else:
if output_channels is None:
output_channels = v_channels
self.output = PerceiverSelfOutput(input_channels=self.self.v_channels, output_channels=output_channels)
self.use_query_residual = use_query_residual
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs: Optional[torch.FloatTensor] = None,
inputs_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
inputs,
inputs_mask,
output_attentions,
)
# Output projection
attention_output = self.output(self_outputs[0])
# Optionally include a residual to the original queries.
# Consider omitting the residual if the semantics of query and output
# are different, e.g. if queries are positions and outputs are pixels.
if self.use_query_residual:
attention_output = attention_output + hidden_states
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class PerceiverMLP(nn.Module):
"""A Transformer-style dense module to follow attention."""
def __init__(self, input_size, widening_factor, hidden_act='gelu'):
super().__init__()
self.dense1 = nn.Linear(input_size, widening_factor * input_size)
if isinstance(hidden_act, str):
self.intermediate_act_fn = nn.GELU()
else:
self.intermediate_act_fn = hidden_act
self.dense2 = nn.Linear(widening_factor * input_size, input_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense1(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.dense2(hidden_states)
return hidden_states
class PerceiverLayer(nn.Module):
def __init__(
self,
chunk_size_feed_forward=0, # from PretrainedConfig
attention_probs_dropout_prob=0.1,
cross_attention_shape_for_attention="kv",
is_cross_attention=False,
qk_channels=None,
v_channels=None,
num_heads=1,
q_dim=None,
kv_dim=None,
widening_factor=4,
use_query_residual=True,
):
super().__init__()
self.chunk_size_feed_forward = chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = PerceiverAttention(
attention_probs_dropout_prob=attention_probs_dropout_prob,
cross_attention_shape_for_attention=cross_attention_shape_for_attention,
is_cross_attention=is_cross_attention,
qk_channels=qk_channels,
v_channels=v_channels,
num_heads=num_heads,
q_dim=q_dim,
kv_dim=kv_dim,
use_query_residual=use_query_residual,
)
self.layernorm = nn.LayerNorm(q_dim)
self.mlp = PerceiverMLP(input_size=q_dim, widening_factor=widening_factor)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs: Optional[torch.FloatTensor] = None,
inputs_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
inputs,
inputs_mask,
output_attentions,
)
attention_output = attention_outputs[0]
outputs = attention_outputs[1:] # add attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
layer_output = layer_output + attention_output # residual connection
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
layer_output = self.layernorm(attention_output)
layer_output = self.mlp(layer_output)
return layer_output
class PerceiverEncoder(nn.Module):
"""The Perceiver Encoder: a scalable, fully attentional encoder."""
def __init__(
self,
d_latents,
num_blocks,
num_self_attention_heads,
num_self_attends_per_block,
num_cross_attention_heads,
qk_channels=None,
v_channels=None,
cross_attention_shape_for_attention="kv",
self_attention_widening_factor=1,
cross_attention_widening_factor=1,
attention_probs_dropout_prob=0.1,
chunk_size_feed_forward=0, # found in PretrainedConfig
kv_dim=None,
use_query_residual=True,
):
super().__init__()
self.num_blocks = num_blocks
# Check that we can use multihead-attention with these shapes.
if d_latents % num_self_attention_heads != 0:
raise ValueError(
f"num_z_channels ({d_latents}) must be divisible by"
f" num_self_attend_heads ({num_self_attention_heads})."
)
if d_latents % num_cross_attention_heads != 0:
raise ValueError(
f"num_z_channels ({d_latents}) must be divisible by"
f" num_cross_attend_heads ({num_cross_attention_heads})."
)
# Construct the cross attention layer.
self.cross_attention = PerceiverLayer(
chunk_size_feed_forward=chunk_size_feed_forward,
attention_probs_dropout_prob=attention_probs_dropout_prob,
cross_attention_shape_for_attention=cross_attention_shape_for_attention,
is_cross_attention=True,
qk_channels=qk_channels,
v_channels=v_channels,
num_heads=num_cross_attention_heads,
q_dim=d_latents,
kv_dim=kv_dim,
widening_factor=cross_attention_widening_factor,
use_query_residual=use_query_residual,
)
# Construct a single block of self-attention layers.
# We get deeper architectures by applying this block more than once.
self_attention_layers = []
for _ in range(num_self_attends_per_block):
layer = PerceiverLayer(
chunk_size_feed_forward=chunk_size_feed_forward,
attention_probs_dropout_prob=attention_probs_dropout_prob,
is_cross_attention=False,
qk_channels=qk_channels,
v_channels=v_channels,
num_heads=num_self_attention_heads,
q_dim=d_latents,
kv_dim=d_latents,
widening_factor=self_attention_widening_factor,
)
self_attention_layers.append(layer)
self.self_attends = nn.ModuleList(self_attention_layers)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs: Optional[torch.FloatTensor] = None,
inputs_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[Tuple, BaseModelOutputWithCrossAttentions]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions else None
# Apply the cross-attention between the latents (hidden_states) and inputs:
layer_outputs = self.cross_attention(
hidden_states,
attention_mask=attention_mask,
head_mask=None,
inputs=inputs,
inputs_mask=inputs_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_cross_attentions = all_cross_attentions + (layer_outputs[1],)
# Apply the block of self-attention layers more than once:
for _ in range(self.num_blocks):
for i, layer_module in enumerate(self.self_attends):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(
hidden_states,
attention_mask=attention_mask,
head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, all_hidden_states, all_self_attentions, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithCrossAttentions(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class PerceiverModel(nn.Module):
def __init__(
self,
is_training=False,
is_student=False,
d_model=704,
num_latents=784,
d_latents=512,
num_blocks=1,
num_self_attention_heads=8,
num_self_attends_per_block=8,
num_cross_attention_heads=1,
qk_channels=None,
v_channels=None,
cross_attention_shape_for_attention="kv",
self_attention_widening_factor=1,
cross_attention_widening_factor=1,
attention_probs_dropout_prob=0.1,
chunk_size_feed_forward=0, # found in PretrainedConfig
kv_dim=None,
use_query_residual=True,
mask_time_prob=0.05,
mask_time_length=10,
use_projection_head=True,
use_simsiam_projector=False,
input_preprocessor: PreprocessorType = None,
):
"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PerceiverConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
input_preprocessor (*PreprocessorType*, *optional*):
Optional input preprocessor to use. Examples include
*transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor*,
*transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor*,
*transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor*,
*transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor*.
Note that you can define your own decoders, preprocessors and/or postprocessors to fit your use-case.
"""
super().__init__()
self.is_training = is_training
self.is_student = is_student
self.d_model = d_model
self.num_blocks = num_blocks
self.num_self_attends_per_block = num_self_attends_per_block
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
# initialized by Hydra
self.input_preprocessor = input_preprocessor
self.embeddings = PerceiverEmbeddings(num_latents, d_latents)
self.encoder = PerceiverEncoder(
d_latents=d_latents,
num_blocks=num_blocks,
num_self_attention_heads=num_self_attention_heads,
num_self_attends_per_block=num_self_attends_per_block,
num_cross_attention_heads=num_cross_attention_heads,
qk_channels=qk_channels,
v_channels=v_channels,
cross_attention_shape_for_attention=cross_attention_shape_for_attention,
self_attention_widening_factor=self_attention_widening_factor,
cross_attention_widening_factor=cross_attention_widening_factor,
attention_probs_dropout_prob=attention_probs_dropout_prob,
chunk_size_feed_forward=chunk_size_feed_forward, # found in PretrainedConfig
kv_dim=input_preprocessor.num_channels if input_preprocessor is not None else d_model,
use_query_residual=use_query_residual,
)
self.pooler = Pooler(
dim_in=d_latents,
projection_size=d_latents,
widening_factor=self_attention_widening_factor,
use_projection_head=use_projection_head,
use_simsiam_mlp=use_simsiam_projector
)
@property
def dtype(self) -> torch.dtype:
"""
`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
"""
return get_parameter_dtype(self)
def set_student_status(self, is_student: bool):
self.is_student = is_student
def invert_attention_mask(self, encoder_attention_mask: torch.Tensor) -> torch.Tensor:
"""
Invert an attention mask (e.g., switches 0. and 1.).
Args:
encoder_attention_mask (`torch.Tensor`): An attention mask.
Returns:
`torch.Tensor`: The inverted attention mask.
"""
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
if self.dtype == torch.float16:
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e4
elif self.dtype in [torch.bfloat16, torch.float32]:
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9
else:
raise ValueError(
f"{self.dtype} not recognized. `dtype` should be set to either `torch.float32` or `torch.float16`"
)
# if device is on GPU, convert to CUDA tensor:
encoder_extended_attention_mask = encoder_extended_attention_mask.to(next(self.encoder.parameters()).device)
return encoder_extended_attention_mask
def get_head_mask(
self, head_mask: Optional[torch.Tensor], num_hidden_layers: int, is_attention_chunked: bool = False
) -> torch.Tensor:
"""
Prepare the head mask if needed.
Args:
head_mask (`torch.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*):
The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).
num_hidden_layers (`int`):
The number of hidden layers in the model.
is_attention_chunked: (`bool`, *optional*, defaults to `False`):
Whether or not the attentions scores are computed by chunks or not.
Returns:
`torch.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with
`[None]` for each layer.
"""
if head_mask is not None:
head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
if is_attention_chunked is True:
head_mask = head_mask.unsqueeze(-1)
else:
head_mask = [None] * num_hidden_layers
return head_mask
def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
"""-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]"""
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
head_mask = head_mask.to(dtype=self.dtype) # switch to float if need + fp16 compatibility
return head_mask
def forward(
self,
inputs: torch.FloatTensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
apply_mask: Optional[bool] = True,
output_attentions: Optional[bool] = True,
output_hidden_states: Optional[bool] = True,
return_dict: Optional[bool] = True,
) -> ModelOutput:
r"""
Args:
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
attention_mask (`torch.FloatTensor` of shape `{0}`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
if self.input_preprocessor is not None:
inputs, _, _ = self.input_preprocessor(inputs)
else:
if inputs.size()[-1] != self.d_model:
raise ValueError(
f"Last dimension of the inputs: {inputs.size()[-1]} doesn't correspond to d_model: {self.d_model}. "
"Make sure to set d_model appropriately."
)
if self.is_student and apply_mask:
inputs = mask_hidden_states(
hidden_states=inputs,
attention_mask=attention_mask,
mask_time_prob=self.mask_time_prob,
mask_time_length=self.mask_time_length,
training=self.is_training
)
batch_size, seq_length, _ = inputs.size()
# If no attention mask is provided, make them all ones
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length)))
# Make the attention mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
extended_attention_mask = self.invert_attention_mask(attention_mask)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_blocks x num_heads]
# and head_mask is converted to shape [num_blocks x batch x num_heads x N x N]
head_mask = self.get_head_mask(head_mask, self.num_blocks * self.num_self_attends_per_block)
embedding_output = self.embeddings(batch_size=batch_size)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=None,
head_mask=head_mask,
inputs=inputs,
inputs_mask=extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooler_output = self.pooler(sequence_output)
return ModelOutput(
pooler_output=pooler_output,
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
| multimodal-self-distillation-main | src/models/components/perceiver.py |
import copy
import torch.nn as nn
class EMA:
"""
Modified version of class fairseq.models.ema.EMAModule.
Args:
model (nn.Module):
cfg (DictConfig):
device (str):
skip_keys (list): The keys to skip assigning averaged weights to.
"""
def __init__(
self,
model: nn.Module,
ema_decay: float = 0.999,
skip_keys=None
):
self.model = copy.deepcopy(model)
self.model.requires_grad_(False)
self.decay = ema_decay
self.skip_keys = skip_keys or set()
self.num_updates = 0
def step(self, new_model: nn.Module):
"""
One EMA step
Args:
new_model (nn.Module): Online model to fetch new weights from
"""
ema_state_dict = {}
ema_params = self.model.state_dict()
for key, param in new_model.state_dict().items():
ema_param = ema_params[key].float()
if key in self.skip_keys:
ema_param = param.to(dtype=ema_param.dtype).clone()
else:
ema_param.mul_(self.decay)
ema_param.add_(param.to(dtype=ema_param.dtype), alpha=1 - self.decay)
ema_state_dict[key] = ema_param
self.model.load_state_dict(ema_state_dict, strict=False)
self.num_updates += 1
def restore(self, model: nn.Module):
"""
Reassign weights from another model
Args:
model (nn.Module): model to load weights from.
Returns:
model with new weights
"""
d = self.model.state_dict()
model.load_state_dict(d, strict=False)
return model
def state_dict(self):
return self.model.state_dict()
@staticmethod
def get_annealed_rate(start, end, curr_step, total_steps):
"""
Calculate EMA annealing rate
"""
r = end - start
pct_remaining = 1 - curr_step / total_steps
return end - r * pct_remaining | multimodal-self-distillation-main | src/models/components/ema.py |
multimodal-self-distillation-main | src/models/components/__init__.py |
|
from typing import Optional, Mapping, Callable, Tuple
from functools import reduce
from operator import __add__
import math
import numpy as np
import torch
from torch import nn
from transformers.models.perceiver.modeling_perceiver import build_position_encoding, space_to_depth
PreprocessorOutputType = Tuple[torch.Tensor, Optional[torch.Tensor], torch.Tensor]
PreprocessorType = Callable[..., PreprocessorOutputType]
class Conv2dSamePadding(nn.Conv2d):
"""
Conv2d layer with padding="same" support. Source:
https://gist.github.com/sumanmichael/4de9dee93f972d47c80c4ade8e149ea6
"""
def __init__(self, *args, **kwargs):
super(Conv2dSamePadding, self).__init__(*args, **kwargs)
self.zero_pad_2d = nn.ZeroPad2d(
reduce(__add__, [(k // 2 + (k - 2 * (k // 2)) - 1, k // 2) for k in self.kernel_size[::-1]])
)
def forward(self, input):
return self._conv_forward(self.zero_pad_2d(input), self.weight, self.bias)
class Conv2DDownsample(nn.Module):
"""Downsamples 4x by applying a 2D convolution and doing max pooling."""
def __init__(
self,
num_layers: int = 1,
in_channels: int = 3,
out_channels: int = 64,
use_batchnorm: bool = True,
):
"""
Constructs a Conv2DDownsample model.
Args:
in_channels (`int`, *optional*, defaults to 3):
The number of input channels.
out_channels (`int`, *optional*, defaults to 64):
The number of conv output channels.
use_batchnorm (`bool`, *optional*, defaults to `True`):
Whether to use batchnorm.
"""
super().__init__()
self.conv = Conv2dSamePadding(
in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=2, bias=False
)
self.batchnorm = nn.BatchNorm2d(num_features=out_channels) if use_batchnorm else nn.Identity()
self.relu = nn.ReLU()
self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
out = self.conv(inputs)
out = self.batchnorm(out)
out = self.relu(out)
out = self.max_pool(out)
return out
class AbstractPreprocessor(nn.Module):
@property
def num_channels(self) -> int:
"""Returns size of preprocessor output."""
raise NotImplementedError()
class PerceiverTextPreprocessor(AbstractPreprocessor):
"""
Text preprocessing for Perceiver Encoder. Can be used to embed `inputs` and add positional encodings.
The dimensionality of the embeddings is determined by the `d_model` attribute of the configuration.
Args:
config ([`PerceiverConfig`]):
Model configuration.
"""
def __init__(
self,
d_model: int,
vocab_size: int,
max_position_embeddings: int
) -> None:
super().__init__()
self.d_model = d_model
self.embeddings = nn.Embedding(num_embeddings=vocab_size, embedding_dim=d_model)
self.position_embeddings = nn.Embedding(max_position_embeddings, d_model)
@property
def num_channels(self) -> int:
return self.d_model
def forward(self, inputs: torch.LongTensor, pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True) -> torch.FloatTensor:
device = inputs.device
self.embeddings.to(device)
self.position_embeddings.to(device)
embeddings = self.embeddings(inputs)
seq_length = inputs.shape[1]
position_ids = torch.arange(0, seq_length, device=inputs.device)
embeddings = embeddings + self.position_embeddings(position_ids)
return embeddings, None, None
class PerceiverImagePreprocessor(AbstractPreprocessor):
"""
Image preprocessing for Perceiver Encoder.
Note: the *out_channels* argument refers to the output channels of a convolutional layer, if *prep_type* is set to
"conv1x1" or "conv". If one adds absolute position embeddings, one must make sure the *num_channels* of the
position encoding kwargs are set equal to the *out_channels*.
Args:
config ([*PerceiverConfig*]):
Model configuration.
prep_type (`str`, *optional*, defaults to `"conv"`):
Preprocessing type. Can be "conv1x1", "conv", "patches", "pixels".
spatial_downsample (`int`, *optional*, defaults to 4):
Spatial downsampling factor.
temporal_downsample (`int`, *optional*, defaults to 1):
Temporal downsampling factor (only relevant in case a time dimension is present).
position_encoding_type (`str`, *optional*, defaults to `"fourier"`):
Position encoding type. Can be "fourier" or "trainable".
in_channels (`int`, *optional*, defaults to 3):
Number of channels in the input.
out_channels (`int`, *optional*, defaults to 64):
Number of channels in the output.
conv_after_patching (`bool`, *optional*, defaults to `False`):
Whether to apply a convolutional layer after patching.
conv_after_patching_in_channels (`int`, *optional*, defaults to 54):
Number of channels in the input of the convolutional layer after patching.
conv2d_use_batchnorm (`bool`, *optional*, defaults to `True`):
Whether to use batch normalization in the convolutional layer.
concat_or_add_pos (`str`, *optional*, defaults to `"concat"`):
How to concatenate the position encoding to the input. Can be "concat" or "add".
project_pos_dim (`int`, *optional*, defaults to -1):
Dimension of the position encoding to project to. If -1, no projection is applied.
num_frames: the number of frames in the input video, if num_frames > 1, then the input
is assumed to be a video
**position_encoding_kwargs (`Dict`, *optional*):
Keyword arguments for the position encoding.
"""
def __init__(
self,
prep_type: str = "conv",
spatial_downsample: int = 4,
temporal_downsample: int = 1,
position_encoding_type: str = "fourier",
in_channels: int = 3,
out_channels: int = 64,
conv_after_patching: bool = False,
conv_after_patching_in_channels: int = 54, # only relevant when conv_after_patching = True
conv2d_use_batchnorm: bool = True,
concat_or_add_pos: str = "concat",
project_pos_dim: int = -1,
num_frames: int = 16,
image_size: int = 56,
):
super().__init__()
if prep_type not in ("conv", "patches", "pixels", "conv1x1"):
raise ValueError(f"Prep_type {prep_type} is invalid")
if concat_or_add_pos not in ["concat", "add"]:
raise ValueError(f"Invalid value {concat_or_add_pos} for concat_or_add_pos.")
self.in_channels = in_channels
self.prep_type = prep_type
self.spatial_downsample = spatial_downsample
self.temporal_downsample = temporal_downsample
self.position_encoding_type = position_encoding_type
self.concat_or_add_pos = concat_or_add_pos
self.conv_after_patching = conv_after_patching
self.out_channels = out_channels
if self.prep_type == "conv":
# Downsampling with conv is currently restricted
convnet_num_layers = math.log(spatial_downsample, 4)
convnet_num_layers_is_int = convnet_num_layers == np.round(convnet_num_layers)
if not convnet_num_layers_is_int or temporal_downsample != 1:
raise ValueError(
"Only powers of 4 expected for spatial and 1 expected for temporal downsampling with conv."
)
self.convnet = Conv2DDownsample(
in_channels=in_channels,
num_layers=int(convnet_num_layers),
out_channels=out_channels,
use_batchnorm=conv2d_use_batchnorm,
)
elif self.prep_type == "conv1x1":
if temporal_downsample != 1:
raise ValueError("Conv1x1 does not downsample in time.")
self.convnet_1x1 = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(1, 1),
# spatial_downsample is unconstrained for 1x1 convolutions.
stride=(spatial_downsample, spatial_downsample),
)
# set num_bands differently depending on whether we are dealing with video or single image
if num_frames > 1:
num_bands = 32
else:
num_bands = 64
# set max_resolution differently depending on whether we are dealing with video or single image
if num_frames > 1:
max_resolution = (num_frames, image_size, image_size)
else:
max_resolution = (224, 224)
# Position embeddings
position_encoding_kwargs = dict(
num_bands=num_bands,
max_resolution=max_resolution,
sine_only=False,
concat_pos=True,
)
self.project_pos_dim = project_pos_dim
self.position_embeddings, self.positions_projection = build_position_encoding(
position_encoding_type=position_encoding_type,
out_channels=out_channels,
project_pos_dim=project_pos_dim,
fourier_position_encoding_kwargs=position_encoding_kwargs,
)
# Optional convolutional layer after patches.
self.conv_after_patches = (
nn.Linear(conv_after_patching_in_channels, self.out_channels) if conv_after_patching else nn.Identity()
)
@property
def num_channels(self) -> int:
# Let's assume that the number of resolutions (in the context of image preprocessing)
# of the input data is 2 or 3 depending on whether we are processing image or video respectively.
# In this case, for convenience, we will declare is_temporal variable,
# which will show whether the data has a temporal dimension or not.
is_temporal = self.position_embeddings.num_dimensions > 2
# position embedding
if self.project_pos_dim > 0:
pos_dim = self.project_pos_dim
else:
pos_dim = self.position_embeddings.output_size()
if self.concat_or_add_pos == "add":
return pos_dim
# inputs
if self.conv_after_patching or self.prep_type in ("conv1x1", "conv"):
inp_dim = self.out_channels
elif self.prep_type == "pixels":
inp_dim = self.in_channels
if not is_temporal:
inp_dim = math.ceil(inp_dim / self.spatial_downsample)
elif self.prep_type == "patches":
if self.conv_after_patching:
inp_dim = self.out_channels
else:
inp_dim = self.in_channels * self.spatial_downsample**2
if is_temporal:
inp_dim *= self.temporal_downsample
return inp_dim + pos_dim
def _build_network_inputs(self, inputs: torch.Tensor, pos: torch.Tensor, network_input_is_1d: bool = True):
"""
Construct the final input, including position encoding.
This method expects the inputs to always have channels as last dimension.
"""
batch_size = inputs.shape[0]
index_dims = inputs.shape[1:-1]
indices = np.prod(index_dims)
# Flatten input features to a 1D index dimension if necessary.
if len(inputs.shape) > 3 and network_input_is_1d:
inputs = torch.reshape(inputs, [batch_size, indices, -1])
# Construct the position encoding.
if self.position_encoding_type == "trainable":
pos_enc = self.position_embeddings(batch_size)
elif self.position_encoding_type == "fourier":
pos_enc = self.position_embeddings(index_dims, batch_size, device=inputs.device)
# Optionally project them to a target dimension.
pos_enc = self.positions_projection(pos_enc)
if not network_input_is_1d:
# Reshape pos to match the input feature shape
# if the network takes non-1D inputs
sh = inputs.shape
pos_enc = torch.reshape(pos_enc, list(sh)[:-1] + [-1])
if self.concat_or_add_pos == "concat":
inputs_with_pos = torch.cat([inputs, pos_enc], dim=-1)
elif self.concat_or_add_pos == "add":
inputs_with_pos = inputs + pos_enc
return inputs_with_pos, inputs
def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True):
if self.prep_type == "conv":
# Convnet image featurization.
# Downsamples spatially by a factor of 4
inputs = self.convnet(inputs)
elif self.prep_type == "conv1x1":
# map inputs to self.out_channels
inputs = self.convnet_1x1(inputs)
elif self.prep_type == "pixels":
# if requested, downsamples in the crudest way
if inputs.ndim == 4:
inputs = inputs[:: self.spatial_downsample, :: self.spatial_downsample]
elif inputs.ndim == 5:
inputs = inputs[
:, :: self.temporal_downsample, :, :: self.spatial_downsample, :: self.spatial_downsample
]
else:
raise ValueError("Unsupported data format for pixels.")
elif self.prep_type == "patches":
# Space2depth featurization.
# Video: B x T x C x H x W
inputs = space_to_depth(
inputs, temporal_block_size=self.temporal_downsample, spatial_block_size=self.spatial_downsample
)
if inputs.ndim == 5 and inputs.shape[1] == 1:
# for flow
inputs = inputs.squeeze(dim=1)
# Optionally apply conv layer.
inputs = self.conv_after_patches(inputs)
if self.prep_type != "patches":
# move channels to last dimension, as the _build_network_inputs method below expects this
if inputs.ndim == 4:
inputs = torch.moveaxis(inputs, 1, -1)
elif inputs.ndim == 5:
inputs = torch.moveaxis(inputs, 2, -1)
else:
raise ValueError("Unsupported data format for conv1x1.")
inputs, inputs_without_pos = self._build_network_inputs(inputs, pos, network_input_is_1d)
modality_sizes = None # Size for each modality, only needed for multimodal
return inputs, modality_sizes, inputs_without_pos
class PerceiverOneHotPreprocessor(AbstractPreprocessor):
"""
One-hot preprocessor for Perceiver Encoder. Can be used to add a dummy index dimension to the input.
Args:
config ([`PerceiverConfig`]):
Model configuration.
"""
def __init__(
self,
num_labels: int
) -> None:
super().__init__()
self.num_labels = num_labels
@property
def num_channels(self) -> int:
return self.num_labels
def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True):
# Add a dummy index dimension.
inputs = inputs[:, None, :]
# No position encodings, so the 1st (input) and 3rd (inputs_without_pos)
# outputs are identical.
return inputs, None, inputs
class PerceiverAudioPreprocessor(AbstractPreprocessor):
"""
Audio preprocessing for Perceiver Encoder.
Args:
config ([*PerceiverConfig*]):
Model configuration.
prep_type (`str`, *optional*, defaults to `"patches"`):
Preprocessor type to use. Only "patches" is supported.
samples_per_patch (`int`, *optional*, defaults to 96):
Number of samples per patch.
position_encoding_type (`str`, *optional*, defaults to `"fourier"`):
Type of position encoding to use. Can be "trainable" or "fourier".
concat_or_add_pos (`str`, *optional*, defaults to `"concat"`):
How to concatenate the position encoding to the input. Can be "concat" or "add".
out_channels (`int`, *optional*, defaults to 64):
Number of channels in the output.
project_pos_dim (`int`, *optional*, defaults to -1):
Dimension of the position encoding to project to. If -1, no projection is applied.
**position_encoding_kwargs (`Dict`, *optional*):
Keyword arguments for the position encoding.
"""
def __init__(
self,
prep_type: str = "patches",
samples_per_patch: int = 96,
position_encoding_type: str = "fourier",
concat_or_add_pos: str = "concat",
out_channels=64,
project_pos_dim=-1,
num_frames=1,
audio_samples_per_frame=1920,
):
super().__init__()
if prep_type not in ("patches",):
raise ValueError(f"Prep_type {prep_type} is invalid, can only be 'patches'.")
if concat_or_add_pos not in ["concat", "add"]:
raise ValueError(f"Concat_or_pos {concat_or_add_pos} is invalid, can only be 'concat' or 'add'.")
self.samples_per_patch = samples_per_patch
self.position_encoding_type = position_encoding_type
self.concat_or_add_pos = concat_or_add_pos
self.project_pos_dim = project_pos_dim
self.n_audio_samples = num_frames * audio_samples_per_frame
# Position embeddings
position_encoding_kwargs = dict(
num_bands=192,
max_resolution=(self.n_audio_samples,),
sine_only=False,
concat_pos=True,
)
self.position_embeddings, self.positions_projection = build_position_encoding(
position_encoding_type=position_encoding_type,
out_channels=out_channels,
project_pos_dim=project_pos_dim,
fourier_position_encoding_kwargs=position_encoding_kwargs,
)
@property
def num_channels(self) -> int:
# position embedding
if self.project_pos_dim > 0:
pos_dim = self.project_pos_dim
else:
pos_dim = self.position_embeddings.output_size()
if self.concat_or_add_pos == "add":
return pos_dim
return self.samples_per_patch + pos_dim
def _build_network_inputs(self, inputs, pos):
"""Construct the final input, including position encoding."""
batch_size = inputs.shape[0]
index_dims = inputs.shape[1:-1]
# Construct the position encoding.
if self.position_encoding_type == "trainable":
pos_enc = self.position_embeddings(batch_size)
elif self.position_encoding_type == "fourier":
pos_enc = self.position_embeddings(index_dims, batch_size, device=inputs.device)
# Optionally project them to a target dimension.
pos_enc = self.positions_projection(pos_enc)
if self.concat_or_add_pos == "concat":
inputs_with_pos = torch.cat([inputs, pos_enc], dim=-1)
elif self.concat_or_add_pos == "add":
inputs_with_pos = inputs + pos_enc
return inputs_with_pos, inputs
def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True):
inputs = torch.reshape(inputs, [inputs.shape[0], -1, self.samples_per_patch])
inputs, inputs_without_pos = self._build_network_inputs(inputs, pos)
modality_sizes = None # Size for each modality, only needed for multimodal
return inputs, modality_sizes, inputs_without_pos
class PerceiverMultimodalPreprocessor(AbstractPreprocessor):
"""
Multimodal preprocessing for Perceiver Encoder.
Inputs for each modality are preprocessed, then padded with trainable position embeddings to have the same number
of channels.
Args:
modalities (`Dict[str, PreprocessorType]`):
Dict mapping modality name to preprocessor.
mask_probs (`Dict[str, float]`):
Dict mapping modality name to masking probability of that modality.
min_padding_size (`int`, *optional*, defaults to 2):
The minimum padding size for all modalities. The final output will have num_channels equal to the maximum
channels across all modalities plus min_padding_size.
"""
def __init__(
self,
modalities: Mapping[str, PreprocessorType], #TODO should this be a nn.ModuleDict? Might be the reason why proper device is not set
mask_probs: Optional[Mapping[str, float]] = None,
min_padding_size: int = 2,
):
super().__init__()
self.modalities = modalities
self.min_padding_size = min_padding_size
self.mask_probs = mask_probs if mask_probs is not None else dict()
self.padding = nn.ParameterDict(
{
modality: nn.Parameter(torch.randn(1, self.num_channels - preprocessor.num_channels))
for modality, preprocessor in modalities.items()
}
)
self.mask = nn.ParameterDict(
{modality: nn.Parameter(torch.randn(1, self.num_channels)) for modality, _ in self.mask_probs.items()}
)
@property
def num_channels(self) -> int:
max_channel_size = max(processor.num_channels for _, processor in self.modalities.items()) #TODO this break HiP inputs because it takes the max channel size of all modality-specific preprocessors (even if the modility is not present in the input)
common_channel_size = max_channel_size + self.min_padding_size
return common_channel_size
def forward(
self,
inputs: Mapping[str, torch.Tensor],
pos: Optional[torch.Tensor] = None,
network_input_is_1d: bool = True
) -> PreprocessorOutputType:
padded = {}
modality_sizes = {}
inputs_without_pos = {}
for modality, preprocessor in self.modalities.items():
# preprocess each modality using the respective preprocessor.
if modality in inputs:
output, _, inputs_without_pos[modality] = preprocessor(
inputs[modality], pos=pos, network_input_is_1d=network_input_is_1d
)
# pad to the same common_channel_size.
batch_size, num_samples, num_channels = output.shape
pos_enc = self.padding[modality].expand(batch_size, -1, -1)
padding = torch.broadcast_to(
pos_enc,
[batch_size, num_samples, self.num_channels - num_channels],
)
padding = padding.to(output.device)
output_padded = torch.cat([output, padding], dim=2)
# mask if required
if modality in self.mask_probs:
mask_token = self.mask[modality].expand(batch_size, -1, -1)
mask_prob = self.mask_probs[modality]
mask = torch.bernoulli(torch.full([batch_size, num_samples], mask_prob))
mask = torch.unsqueeze(mask, dim=2).to(mask_token.device)
output_padded = (1 - mask) * output_padded + mask * mask_token
padded[modality] = output_padded
modality_sizes[modality] = output_padded.shape[1]
# Apply a predictable ordering to the modalities
padded_ls = [padded[k] for k in sorted(padded.keys())]
# Finally, concatenate along the time dimension
final_inputs = torch.cat(padded_ls, dim=1)
return final_inputs, modality_sizes, inputs_without_pos | multimodal-self-distillation-main | src/models/components/preprocessor.py |
from typing import Tuple, Optional
import torch
import numpy as np
def _compute_mask_indices(
shape: Tuple[int, int],
mask_prob: float,
mask_length: int,
attention_mask: Optional[torch.LongTensor] = None,
min_masks: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
CPU as part of the preprocessing during training.
Args:
shape: The shape for which to compute masks. This should be of a tuple of size 2 where
the first element is the batch size and the second element is the length of the axis to span.
mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
independently generated mask spans of length `mask_length` is computed by
`mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
actual percentage will be smaller.
mask_length: size of the mask
min_masks: minimum number of masked spans
attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
each batch dimension.
"""
batch_size, sequence_length = shape
if mask_length < 1:
raise ValueError("`mask_length` has to be bigger than 0.")
if mask_length > sequence_length:
raise ValueError(
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
f" and `sequence_length`: {sequence_length}`"
)
# epsilon is used for probabilistic rounding
epsilon = np.random.rand(1).item()
def compute_num_masked_span(input_length):
"""Given input length, compute how many spans should be masked"""
num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
num_masked_span = max(num_masked_span, min_masks)
# make sure num masked span <= sequence_length
if num_masked_span * mask_length > sequence_length:
num_masked_span = sequence_length // mask_length
# make sure num_masked span is also <= input_length - (mask_length - 1)
if input_length - (mask_length - 1) < num_masked_span:
num_masked_span = max(input_length - (mask_length - 1), 0)
return num_masked_span
# compute number of masked spans in batch
input_lengths = (
attention_mask.sum(-1).detach().tolist()
if attention_mask is not None
else [sequence_length for _ in range(batch_size)]
)
# SpecAugment mask to fill
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
spec_aug_mask_idxs = []
max_num_masked_span = compute_num_masked_span(sequence_length)
if max_num_masked_span == 0:
return spec_aug_mask
for input_length in input_lengths:
# compute num of masked spans for this input
num_masked_span = compute_num_masked_span(input_length)
# get random indices to mask
spec_aug_mask_idx = np.random.choice(
np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
)
# pick first sampled index that will serve as a dummy index to pad vector
# to ensure same dimension for all batches due to probabilistic rounding
# Picking first sample just pads those vectors twice.
if len(spec_aug_mask_idx) == 0:
# this case can only happen if `input_length` is strictly smaller then
# `sequence_length` in which case the last token has to be a padding
# token which we can use as a dummy mask id
dummy_mask_idx = sequence_length - 1
else:
dummy_mask_idx = spec_aug_mask_idx[0]
spec_aug_mask_idx = np.concatenate(
[spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
)
spec_aug_mask_idxs.append(spec_aug_mask_idx)
spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
# expand masked indices to masked spans
spec_aug_mask_idxs = np.broadcast_to(
spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
)
spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
# add offset to the starting indexes so that that indexes now create a span
offsets = np.arange(mask_length)[None, None, :]
offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
batch_size, max_num_masked_span * mask_length
)
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
# ensure that we cannot have indices larger than sequence_length
if spec_aug_mask_idxs.max() > sequence_length - 1:
spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
# scatter indices to mask
np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
return spec_aug_mask
def mask_hidden_states(
hidden_states: torch.FloatTensor,
mask_time_indices: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
mask_time_prob: float = 0.05,
mask_time_length: int = 10,
mask_feature_prob: float = 0.0,
mask_feature_length: int = 10,
min_masks: int = 0,
training: bool = True
) -> torch.FloatTensor:
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://arxiv.org/abs/1904.08779).
"""
_, _, hidden_size = hidden_states.size()
if mask_time_prob > 0.0 or mask_feature_prob > 0.0:
masked_spec_embed = torch.nn.Parameter(torch.FloatTensor(hidden_size).uniform_())
masked_spec_embed = masked_spec_embed.to(hidden_states.device)
# generate indices & apply SpecAugment along time axis
batch_size, sequence_length, hidden_size = hidden_states.size()
if mask_time_indices is not None:
# apply SpecAugment along time axis with given mask_time_indices
hidden_states[mask_time_indices] = masked_spec_embed.to(hidden_states.dtype)
elif mask_time_prob > 0 and training:
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=mask_time_prob,
mask_length=mask_time_length,
attention_mask=attention_mask,
min_masks=min_masks,
)
mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
hidden_states[mask_time_indices] = masked_spec_embed.to(hidden_states.dtype)
if mask_feature_prob > 0 and training:
# generate indices & apply SpecAugment along feature axis
mask_feature_indices = _compute_mask_indices(
(batch_size, hidden_size),
mask_prob=mask_feature_prob,
mask_length=mask_feature_length,
min_masks=min_masks,
)
mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
hidden_states[mask_feature_indices] = 0
return hidden_states | multimodal-self-distillation-main | src/models/components/masking.py |
import math
import torch
from torch import nn
import torch.nn.functional as F
from src.models.components.gather import gather
from src.models.components.outputs import ForwardPassOutput, CriterionOutput
class LatentPredictionLoss(nn.Module):
def __init__(
self,
num_hidden_layers_to_predict: int,
use_latent_loss: bool = True,
reduction: str = "none",
aggregation: str = "mean",
beta: float = 1.0,
latent_loss_scale: float = 1.0,
batch_norm_target_layer:bool = True,
instance_norm_target_layer: bool = True,
layer_norm_target_layer: bool = True,
layer_norm_targets: bool = True,
instance_norm_targets: bool = True,
use_align_loss: bool = True,
sim_loss_weight: float = 25.0,
var_loss_weight: float = 25.0,
cov_loss_weight: float = 1.0,
) -> None:
super().__init__()
self.use_latent_loss = use_latent_loss
self.has_faiss_format = False
self.batch_norm_target_layer = batch_norm_target_layer
self.instance_norm_target_layer = instance_norm_target_layer
self.layer_norm_target_layer = layer_norm_target_layer
self.layer_norm_targets = layer_norm_targets
self.instance_norm_targets = instance_norm_targets
self.reduction = reduction
self.aggregation = aggregation
self.beta = beta
self.latent_loss_scale = latent_loss_scale
self.use_align_loss = use_align_loss
self.align_loss_fn = VICRegLoss(sim_loss_weight, var_loss_weight, cov_loss_weight)
self.k = num_hidden_layers_to_predict
def forward(
self,
fwd_output: ForwardPassOutput,
) -> CriterionOutput:
# take the last transformer layers from the student: (batch size, sequence length, hidden size)
x = fwd_output.student_output.hidden_states[-1:][0]
#TODO optionally: x = regression_head(x)
with torch.no_grad():
# (batch_size, sequence_length, hidden_size) * attention_layers
y = fwd_output.teacher_output.hidden_states[-self.k:]
# B: batch size, T: sequence length, C: hidden size
if not self.has_faiss_format:
y = [tl.permute(1, 0, 2) for tl in y] # BTC -> TBC
permuted = False
if self.batch_norm_target_layer or self.instance_norm_target_layer:
y = [tl.permute(1, 2, 0) for tl in y] # TBC -> BCT
permuted = True
if self.batch_norm_target_layer:
y = [
F.batch_norm(
tl.float(), running_mean=None, running_var=None, training=True
)
for tl in y
]
if self.instance_norm_target_layer:
y = [F.instance_norm(tl.float()) for tl in y]
if permuted:
y = [tl.transpose(1, 2) for tl in y] # BCT -> BTC
if self.layer_norm_target_layer:
y = [F.layer_norm(tl.float(), tl.shape[-1:]) for tl in y]
y = sum(y) / len(y)
if not permuted:
y = y.transpose(0, 1)
if self.layer_norm_targets:
y = F.layer_norm(y.float(), y.shape[-1:])
if self.instance_norm_targets:
y = F.instance_norm(y.transpose(1, 2)).transpose(1, 2)
sz = x.size(-1)
latent_loss = F.smooth_l1_loss(
x.float(), y.float(), reduction=self.reduction, beta=self.beta
).sum(dim=-1)
if self.use_latent_loss:
if self.aggregation == 'mean':
latent_loss = latent_loss.mean() / math.sqrt(sz) if self.latent_loss_scale <= 0 else latent_loss.mean() * self.latent_loss_scale
elif self.aggregation == 'sum':
latent_loss = latent_loss.sum() / math.sqrt(sz) if self.latent_loss_scale <= 0 else latent_loss.sum() * self.latent_loss_scale
else:
latent_loss = 0
if self.use_align_loss:
# align loss (batch size, hidden size)
x_pooler = fwd_output.student_output.pooler_output
y_pooler = fwd_output.teacher_output.pooler_output
align_loss = self.align_loss_fn(x_pooler, y_pooler)
else:
align_loss = 0
total_loss = latent_loss + align_loss
criterion_output = CriterionOutput(total_loss=total_loss, latent_loss=latent_loss, align_loss=align_loss)
return criterion_output
class VICRegLoss(nn.Module):
# https://github.com/vturrisi/solo-learn/blob/main/solo/losses/vicreg.py
def __init__(
self,
sim_loss_weight: float = 25.0,
var_loss_weight: float = 25.0,
cov_loss_weight: float = 1.0,
) -> None:
"""_summary_
Args:
sim_loss_weight (float, optional): _description_. Defaults to 25.0.
var_loss_weight (float, optional): _description_. Defaults to 25.0.
cov_loss_weight (float, optional): _description_. Defaults to 1.0.
"""
super().__init__()
self.sim_loss_weight = sim_loss_weight
self.var_loss_weight = var_loss_weight
self.cov_loss_weight = cov_loss_weight
def invariance_loss(self, z1: torch.Tensor, z2: torch.Tensor) -> torch.Tensor:
"""Computes mse loss given batch of projected features z1 from view 1 and
projected features z2 from view 2.
Args:
z1 (torch.Tensor): NxD Tensor containing projected features from view 1.
z2 (torch.Tensor): NxD Tensor containing projected features from view 2.
Returns:
torch.Tensor: invariance loss (mean squared error).
"""
return F.mse_loss(z1, z2)
def variance_loss(self, z1: torch.Tensor, z2: torch.Tensor) -> torch.Tensor:
"""Computes variance loss given batch of projected features z1 from view 1 and
projected features z2 from view 2.
Args:
z1 (torch.Tensor): NxD Tensor containing projected features from view 1.
z2 (torch.Tensor): NxD Tensor containing projected features from view 2.
Returns:
torch.Tensor: variance regularization loss.
"""
eps = 1e-4
std_z1 = torch.sqrt(z1.var(dim=0) + eps)
std_z2 = torch.sqrt(z2.var(dim=0) + eps)
std_loss = torch.mean(F.relu(1 - std_z1)) + torch.mean(F.relu(1 - std_z2))
return std_loss
def covariance_loss(self, z1: torch.Tensor, z2: torch.Tensor) -> torch.Tensor:
"""Computes covariance loss given batch of projected features z1 from view 1 and
projected features z2 from view 2.
Args:
z1 (torch.Tensor): NxD Tensor containing projected features from view 1.
z2 (torch.Tensor): NxD Tensor containing projected features from view 2.
Returns:
torch.Tensor: covariance regularization loss.
"""
N, D = z1.size()
z1 = z1 - z1.mean(dim=0)
z2 = z2 - z2.mean(dim=0)
cov_z1 = (z1.T @ z1) / (N - 1)
cov_z2 = (z2.T @ z2) / (N - 1)
diag = torch.eye(D, device=z1.device)
cov_loss = cov_z1[~diag.bool()].pow_(2).sum() / D + cov_z2[~diag.bool()].pow_(2).sum() / D
return cov_loss
def forward(
self,
z1: torch.Tensor,
z2: torch.Tensor
) -> torch.Tensor:
"""Computes VICReg's loss given batch of projected features z1 from view 1 and
projected features z2 from view 2.
Args:
z1 (torch.Tensor): NxD Tensor containing projected features from view 1.
z2 (torch.Tensor): NxD Tensor containing projected features from view 2.
Returns:
torch.Tensor: VICReg loss.
"""
sim_loss = self.invariance_loss(z1, z2)
# vicreg's official code gathers the tensors here
# https://github.com/facebookresearch/vicreg/blob/main/main_vicreg.py
z1, z2 = gather(z1), gather(z2)
var_loss = self.variance_loss(z1, z2)
cov_loss = self.covariance_loss(z1, z2)
loss = self.sim_loss_weight * sim_loss + self.var_loss_weight * var_loss + self.cov_loss_weight * cov_loss
return loss
| multimodal-self-distillation-main | src/models/components/criterion.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.