code
stringlengths 0
390k
| repo_name
stringclasses 1
value | path
stringlengths 12
69
| language
stringclasses 1
value | license
stringclasses 1
value | size
int64 0
390k
|
---|---|---|---|---|---|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/dmypy/__init__.py
|
Python
|
NOASSERTION
| 0 |
|
from __future__ import annotations
from mypy.dmypy.client import console_entry
if __name__ == "__main__":
console_entry()
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/dmypy/__main__.py
|
Python
|
NOASSERTION
| 128 |
"""Client for mypy daemon mode.
This manages a daemon process which keeps useful state in memory
rather than having to read it back from disk on each run.
"""
from __future__ import annotations
import argparse
import base64
import json
import os
import pickle
import sys
import time
import traceback
from typing import Any, Callable, Mapping, NoReturn
from mypy.dmypy_os import alive, kill
from mypy.dmypy_util import DEFAULT_STATUS_FILE, receive, send
from mypy.ipc import IPCClient, IPCException
from mypy.util import check_python_version, get_terminal_width, should_force_color
from mypy.version import __version__
# Argument parser. Subparsers are tied to action functions by the
# @action(subparse) decorator.
class AugmentedHelpFormatter(argparse.RawDescriptionHelpFormatter):
def __init__(self, prog: str) -> None:
super().__init__(prog=prog, max_help_position=30)
parser = argparse.ArgumentParser(
prog="dmypy", description="Client for mypy daemon mode", fromfile_prefix_chars="@"
)
parser.set_defaults(action=None)
parser.add_argument(
"--status-file", default=DEFAULT_STATUS_FILE, help="status file to retrieve daemon details"
)
parser.add_argument(
"-V",
"--version",
action="version",
version="%(prog)s " + __version__,
help="Show program's version number and exit",
)
subparsers = parser.add_subparsers()
start_parser = p = subparsers.add_parser("start", help="Start daemon")
p.add_argument("--log-file", metavar="FILE", type=str, help="Direct daemon stdout/stderr to FILE")
p.add_argument(
"--timeout", metavar="TIMEOUT", type=int, help="Server shutdown timeout (in seconds)"
)
p.add_argument(
"flags", metavar="FLAG", nargs="*", type=str, help="Regular mypy flags (precede with --)"
)
restart_parser = p = subparsers.add_parser(
"restart", help="Restart daemon (stop or kill followed by start)"
)
p.add_argument("--log-file", metavar="FILE", type=str, help="Direct daemon stdout/stderr to FILE")
p.add_argument(
"--timeout", metavar="TIMEOUT", type=int, help="Server shutdown timeout (in seconds)"
)
p.add_argument(
"flags", metavar="FLAG", nargs="*", type=str, help="Regular mypy flags (precede with --)"
)
status_parser = p = subparsers.add_parser("status", help="Show daemon status")
p.add_argument("-v", "--verbose", action="store_true", help="Print detailed status")
p.add_argument("--fswatcher-dump-file", help="Collect information about the current file state")
stop_parser = p = subparsers.add_parser("stop", help="Stop daemon (asks it politely to go away)")
kill_parser = p = subparsers.add_parser("kill", help="Kill daemon (kills the process)")
check_parser = p = subparsers.add_parser(
"check", formatter_class=AugmentedHelpFormatter, help="Check some files (requires daemon)"
)
p.add_argument("-v", "--verbose", action="store_true", help="Print detailed status")
p.add_argument("-q", "--quiet", action="store_true", help=argparse.SUPPRESS) # Deprecated
p.add_argument("--junit-xml", help="Write junit.xml to the given file")
p.add_argument("--perf-stats-file", help="write performance information to the given file")
p.add_argument("files", metavar="FILE", nargs="+", help="File (or directory) to check")
p.add_argument(
"--export-types",
action="store_true",
help="Store types of all expressions in a shared location (useful for inspections)",
)
run_parser = p = subparsers.add_parser(
"run",
formatter_class=AugmentedHelpFormatter,
help="Check some files, [re]starting daemon if necessary",
)
p.add_argument("-v", "--verbose", action="store_true", help="Print detailed status")
p.add_argument("--junit-xml", help="Write junit.xml to the given file")
p.add_argument("--perf-stats-file", help="write performance information to the given file")
p.add_argument(
"--timeout", metavar="TIMEOUT", type=int, help="Server shutdown timeout (in seconds)"
)
p.add_argument("--log-file", metavar="FILE", type=str, help="Direct daemon stdout/stderr to FILE")
p.add_argument(
"--export-types",
action="store_true",
help="Store types of all expressions in a shared location (useful for inspections)",
)
p.add_argument(
"flags",
metavar="ARG",
nargs="*",
type=str,
help="Regular mypy flags and files (precede with --)",
)
recheck_parser = p = subparsers.add_parser(
"recheck",
formatter_class=AugmentedHelpFormatter,
help="Re-check the previous list of files, with optional modifications (requires daemon)",
)
p.add_argument("-v", "--verbose", action="store_true", help="Print detailed status")
p.add_argument("-q", "--quiet", action="store_true", help=argparse.SUPPRESS) # Deprecated
p.add_argument("--junit-xml", help="Write junit.xml to the given file")
p.add_argument("--perf-stats-file", help="write performance information to the given file")
p.add_argument(
"--export-types",
action="store_true",
help="Store types of all expressions in a shared location (useful for inspections)",
)
p.add_argument(
"--update",
metavar="FILE",
nargs="*",
help="Files in the run to add or check again (default: all from previous run)",
)
p.add_argument("--remove", metavar="FILE", nargs="*", help="Files to remove from the run")
suggest_parser = p = subparsers.add_parser(
"suggest", help="Suggest a signature or show call sites for a specific function"
)
p.add_argument(
"function",
metavar="FUNCTION",
type=str,
help="Function specified as '[package.]module.[class.]function'",
)
p.add_argument(
"--json",
action="store_true",
help="Produce json that pyannotate can use to apply a suggestion",
)
p.add_argument(
"--no-errors", action="store_true", help="Only produce suggestions that cause no errors"
)
p.add_argument(
"--no-any", action="store_true", help="Only produce suggestions that don't contain Any"
)
p.add_argument(
"--flex-any",
type=float,
help="Allow anys in types if they go above a certain score (scores are from 0-1)",
)
p.add_argument(
"--callsites", action="store_true", help="Find callsites instead of suggesting a type"
)
p.add_argument(
"--use-fixme",
metavar="NAME",
type=str,
help="A dummy name to use instead of Any for types that can't be inferred",
)
p.add_argument(
"--max-guesses",
type=int,
help="Set the maximum number of types to try for a function (default 64)",
)
inspect_parser = p = subparsers.add_parser(
"inspect", help="Locate and statically inspect expression(s)"
)
p.add_argument(
"location",
metavar="LOCATION",
type=str,
help="Location specified as path/to/file.py:line:column[:end_line:end_column]."
" If position is given (i.e. only line and column), this will return all"
" enclosing expressions",
)
p.add_argument(
"--show",
metavar="INSPECTION",
type=str,
default="type",
choices=["type", "attrs", "definition"],
help="What kind of inspection to run",
)
p.add_argument(
"--verbose",
"-v",
action="count",
default=0,
help="Increase verbosity of the type string representation (can be repeated)",
)
p.add_argument(
"--limit",
metavar="NUM",
type=int,
default=0,
help="Return at most NUM innermost expressions (if position is given); 0 means no limit",
)
p.add_argument(
"--include-span",
action="store_true",
help="Prepend each inspection result with the span of corresponding expression"
' (e.g. 1:2:3:4:"int")',
)
p.add_argument(
"--include-kind",
action="store_true",
help="Prepend each inspection result with the kind of corresponding expression"
' (e.g. NameExpr:"int")',
)
p.add_argument(
"--include-object-attrs",
action="store_true",
help='Include attributes of "object" in "attrs" inspection',
)
p.add_argument(
"--union-attrs",
action="store_true",
help="Include attributes valid for some of possible expression types"
" (by default an intersection is returned)",
)
p.add_argument(
"--force-reload",
action="store_true",
help="Re-parse and re-type-check file before inspection (may be slow)",
)
hang_parser = p = subparsers.add_parser("hang", help="Hang for 100 seconds")
daemon_parser = p = subparsers.add_parser("daemon", help="Run daemon in foreground")
p.add_argument(
"--timeout", metavar="TIMEOUT", type=int, help="Server shutdown timeout (in seconds)"
)
p.add_argument("--log-file", metavar="FILE", type=str, help="Direct daemon stdout/stderr to FILE")
p.add_argument(
"flags", metavar="FLAG", nargs="*", type=str, help="Regular mypy flags (precede with --)"
)
p.add_argument("--options-data", help=argparse.SUPPRESS)
help_parser = p = subparsers.add_parser("help")
del p
class BadStatus(Exception):
"""Exception raised when there is something wrong with the status file.
For example:
- No status file found
- Status file malformed
- Process whose pid is in the status file does not exist
"""
def main(argv: list[str]) -> None:
"""The code is top-down."""
check_python_version("dmypy")
args = parser.parse_args(argv)
if not args.action:
parser.print_usage()
else:
try:
args.action(args)
except BadStatus as err:
fail(err.args[0])
except Exception:
# We do this explicitly to avoid exceptions percolating up
# through mypy.api invocations
traceback.print_exc()
sys.exit(2)
def fail(msg: str) -> NoReturn:
print(msg, file=sys.stderr)
sys.exit(2)
ActionFunction = Callable[[argparse.Namespace], None]
def action(subparser: argparse.ArgumentParser) -> Callable[[ActionFunction], ActionFunction]:
"""Decorator to tie an action function to a subparser."""
def register(func: ActionFunction) -> ActionFunction:
subparser.set_defaults(action=func)
return func
return register
# Action functions (run in client from command line).
@action(start_parser)
def do_start(args: argparse.Namespace) -> None:
"""Start daemon (it must not already be running).
This is where mypy flags are set from the command line.
Setting flags is a bit awkward; you have to use e.g.:
dmypy start -- --strict
since we don't want to duplicate mypy's huge list of flags.
"""
try:
get_status(args.status_file)
except BadStatus:
# Bad or missing status file or dead process; good to start.
pass
else:
fail("Daemon is still alive")
start_server(args)
@action(restart_parser)
def do_restart(args: argparse.Namespace) -> None:
"""Restart daemon (it may or may not be running; but not hanging).
We first try to stop it politely if it's running. This also sets
mypy flags from the command line (see do_start()).
"""
restart_server(args)
def restart_server(args: argparse.Namespace, allow_sources: bool = False) -> None:
"""Restart daemon (it may or may not be running; but not hanging)."""
try:
do_stop(args)
except BadStatus:
# Bad or missing status file or dead process; good to start.
pass
start_server(args, allow_sources)
def start_server(args: argparse.Namespace, allow_sources: bool = False) -> None:
"""Start the server from command arguments and wait for it."""
# Lazy import so this import doesn't slow down other commands.
from mypy.dmypy_server import daemonize, process_start_options
start_options = process_start_options(args.flags, allow_sources)
if daemonize(start_options, args.status_file, timeout=args.timeout, log_file=args.log_file):
sys.exit(2)
wait_for_server(args.status_file)
def wait_for_server(status_file: str, timeout: float = 5.0) -> None:
"""Wait until the server is up.
Exit if it doesn't happen within the timeout.
"""
endtime = time.time() + timeout
while time.time() < endtime:
try:
data = read_status(status_file)
except BadStatus:
# If the file isn't there yet, retry later.
time.sleep(0.1)
continue
# If the file's content is bogus or the process is dead, fail.
check_status(data)
print("Daemon started")
return
fail("Timed out waiting for daemon to start")
@action(run_parser)
def do_run(args: argparse.Namespace) -> None:
"""Do a check, starting (or restarting) the daemon as necessary
Restarts the daemon if the running daemon reports that it is
required (due to a configuration change, for example).
Setting flags is a bit awkward; you have to use e.g.:
dmypy run -- --strict a.py b.py ...
since we don't want to duplicate mypy's huge list of flags.
(The -- is only necessary if flags are specified.)
"""
if not is_running(args.status_file):
# Bad or missing status file or dead process; good to start.
start_server(args, allow_sources=True)
t0 = time.time()
response = request(
args.status_file,
"run",
version=__version__,
args=args.flags,
export_types=args.export_types,
)
# If the daemon signals that a restart is necessary, do it
if "restart" in response:
print(f"Restarting: {response['restart']}")
restart_server(args, allow_sources=True)
response = request(
args.status_file,
"run",
version=__version__,
args=args.flags,
export_types=args.export_types,
)
t1 = time.time()
response["roundtrip_time"] = t1 - t0
check_output(response, args.verbose, args.junit_xml, args.perf_stats_file)
@action(status_parser)
def do_status(args: argparse.Namespace) -> None:
"""Print daemon status.
This verifies that it is responsive to requests.
"""
status = read_status(args.status_file)
if args.verbose:
show_stats(status)
# Both check_status() and request() may raise BadStatus,
# which will be handled by main().
check_status(status)
response = request(
args.status_file, "status", fswatcher_dump_file=args.fswatcher_dump_file, timeout=5
)
if args.verbose or "error" in response:
show_stats(response)
if "error" in response:
fail(f"Daemon is stuck; consider {sys.argv[0]} kill")
print("Daemon is up and running")
@action(stop_parser)
def do_stop(args: argparse.Namespace) -> None:
"""Stop daemon via a 'stop' request."""
# May raise BadStatus, which will be handled by main().
response = request(args.status_file, "stop", timeout=5)
if "error" in response:
show_stats(response)
fail(f"Daemon is stuck; consider {sys.argv[0]} kill")
else:
print("Daemon stopped")
@action(kill_parser)
def do_kill(args: argparse.Namespace) -> None:
"""Kill daemon process with SIGKILL."""
pid, _ = get_status(args.status_file)
try:
kill(pid)
except OSError as err:
fail(str(err))
else:
print("Daemon killed")
@action(check_parser)
def do_check(args: argparse.Namespace) -> None:
"""Ask the daemon to check a list of files."""
t0 = time.time()
response = request(args.status_file, "check", files=args.files, export_types=args.export_types)
t1 = time.time()
response["roundtrip_time"] = t1 - t0
check_output(response, args.verbose, args.junit_xml, args.perf_stats_file)
@action(recheck_parser)
def do_recheck(args: argparse.Namespace) -> None:
"""Ask the daemon to recheck the previous list of files, with optional modifications.
If at least one of --remove or --update is given, the server will
update the list of files to check accordingly and assume that any other files
are unchanged. If none of these flags are given, the server will call stat()
on each file last checked to determine its status.
Files given in --update ought to exist. Files given in --remove need not exist;
if they don't they will be ignored.
The lists may be empty but oughtn't contain duplicates or overlap.
NOTE: The list of files is lost when the daemon is restarted.
"""
t0 = time.time()
if args.remove is not None or args.update is not None:
response = request(
args.status_file,
"recheck",
export_types=args.export_types,
remove=args.remove,
update=args.update,
)
else:
response = request(args.status_file, "recheck", export_types=args.export_types)
t1 = time.time()
response["roundtrip_time"] = t1 - t0
check_output(response, args.verbose, args.junit_xml, args.perf_stats_file)
@action(suggest_parser)
def do_suggest(args: argparse.Namespace) -> None:
"""Ask the daemon for a suggested signature.
This just prints whatever the daemon reports as output.
For now it may be closer to a list of call sites.
"""
response = request(
args.status_file,
"suggest",
function=args.function,
json=args.json,
callsites=args.callsites,
no_errors=args.no_errors,
no_any=args.no_any,
flex_any=args.flex_any,
use_fixme=args.use_fixme,
max_guesses=args.max_guesses,
)
check_output(response, verbose=False, junit_xml=None, perf_stats_file=None)
@action(inspect_parser)
def do_inspect(args: argparse.Namespace) -> None:
"""Ask daemon to print the type of an expression."""
response = request(
args.status_file,
"inspect",
show=args.show,
location=args.location,
verbosity=args.verbose,
limit=args.limit,
include_span=args.include_span,
include_kind=args.include_kind,
include_object_attrs=args.include_object_attrs,
union_attrs=args.union_attrs,
force_reload=args.force_reload,
)
check_output(response, verbose=False, junit_xml=None, perf_stats_file=None)
def check_output(
response: dict[str, Any], verbose: bool, junit_xml: str | None, perf_stats_file: str | None
) -> None:
"""Print the output from a check or recheck command.
Call sys.exit() unless the status code is zero.
"""
if "error" in response:
fail(response["error"])
try:
out, err, status_code = response["out"], response["err"], response["status"]
except KeyError:
fail(f"Response: {str(response)}")
sys.stdout.write(out)
sys.stdout.flush()
sys.stderr.write(err)
sys.stderr.flush()
if verbose:
show_stats(response)
if junit_xml:
# Lazy import so this import doesn't slow things down when not writing junit
from mypy.util import write_junit_xml
messages = (out + err).splitlines()
write_junit_xml(
response["roundtrip_time"],
bool(err),
{None: messages} if messages else {},
junit_xml,
response["python_version"],
response["platform"],
)
if perf_stats_file:
telemetry = response.get("stats", {})
with open(perf_stats_file, "w") as f:
json.dump(telemetry, f)
if status_code:
sys.exit(status_code)
def show_stats(response: Mapping[str, object]) -> None:
for key, value in sorted(response.items()):
if key in ("out", "err", "stdout", "stderr"):
# Special case text output to display just 40 characters of text
value = repr(value)[1:-1]
if len(value) > 50:
value = f"{value[:40]} ... {len(value)-40} more characters"
print("%-24s: %s" % (key, value))
continue
print("%-24s: %10s" % (key, "%.3f" % value if isinstance(value, float) else value))
@action(hang_parser)
def do_hang(args: argparse.Namespace) -> None:
"""Hang for 100 seconds, as a debug hack."""
print(request(args.status_file, "hang", timeout=1))
@action(daemon_parser)
def do_daemon(args: argparse.Namespace) -> None:
"""Serve requests in the foreground."""
# Lazy import so this import doesn't slow down other commands.
from mypy.dmypy_server import Server, process_start_options
if args.log_file:
sys.stdout = sys.stderr = open(args.log_file, "a", buffering=1)
fd = sys.stdout.fileno()
os.dup2(fd, 2)
os.dup2(fd, 1)
if args.options_data:
from mypy.options import Options
options_dict = pickle.loads(base64.b64decode(args.options_data))
options_obj = Options()
options = options_obj.apply_changes(options_dict)
else:
options = process_start_options(args.flags, allow_sources=False)
Server(options, args.status_file, timeout=args.timeout).serve()
@action(help_parser)
def do_help(args: argparse.Namespace) -> None:
"""Print full help (same as dmypy --help)."""
parser.print_help()
# Client-side infrastructure.
def request(
status_file: str, command: str, *, timeout: int | None = None, **kwds: object
) -> dict[str, Any]:
"""Send a request to the daemon.
Return the JSON dict with the response.
Raise BadStatus if there is something wrong with the status file
or if the process whose pid is in the status file has died.
Return {'error': <message>} if an IPC operation or receive()
raised OSError. This covers cases such as connection refused or
closed prematurely as well as invalid JSON received.
"""
response: dict[str, str] = {}
args = dict(kwds)
args["command"] = command
# Tell the server whether this request was initiated from a human-facing terminal,
# so that it can format the type checking output accordingly.
args["is_tty"] = sys.stdout.isatty() or should_force_color()
args["terminal_width"] = get_terminal_width()
_, name = get_status(status_file)
try:
with IPCClient(name, timeout) as client:
send(client, args)
final = False
while not final:
response = receive(client)
final = bool(response.pop("final", False))
# Display debugging output written to stdout/stderr in the server process for convenience.
# This should not be confused with "out" and "err" fields in the response.
# Those fields hold the output of the "check" command, and are handled in check_output().
stdout = response.pop("stdout", None)
if stdout:
sys.stdout.write(stdout)
stderr = response.pop("stderr", None)
if stderr:
sys.stderr.write(stderr)
except (OSError, IPCException) as err:
return {"error": str(err)}
# TODO: Other errors, e.g. ValueError, UnicodeError
return response
def get_status(status_file: str) -> tuple[int, str]:
"""Read status file and check if the process is alive.
Return (pid, connection_name) on success.
Raise BadStatus if something's wrong.
"""
data = read_status(status_file)
return check_status(data)
def check_status(data: dict[str, Any]) -> tuple[int, str]:
"""Check if the process is alive.
Return (pid, connection_name) on success.
Raise BadStatus if something's wrong.
"""
if "pid" not in data:
raise BadStatus("Invalid status file (no pid field)")
pid = data["pid"]
if not isinstance(pid, int):
raise BadStatus("pid field is not an int")
if not alive(pid):
raise BadStatus("Daemon has died")
if "connection_name" not in data:
raise BadStatus("Invalid status file (no connection_name field)")
connection_name = data["connection_name"]
if not isinstance(connection_name, str):
raise BadStatus("connection_name field is not a string")
return pid, connection_name
def read_status(status_file: str) -> dict[str, object]:
"""Read status file.
Raise BadStatus if the status file doesn't exist or contains
invalid JSON or the JSON is not a dict.
"""
if not os.path.isfile(status_file):
raise BadStatus("No status file found")
with open(status_file) as f:
try:
data = json.load(f)
except Exception as e:
raise BadStatus("Malformed status file (not JSON)") from e
if not isinstance(data, dict):
raise BadStatus("Invalid status file (not a dict)")
return data
def is_running(status_file: str) -> bool:
"""Check if the server is running cleanly"""
try:
get_status(status_file)
except BadStatus:
return False
return True
# Run main().
def console_entry() -> None:
main(sys.argv[1:])
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/dmypy/client.py
|
Python
|
NOASSERTION
| 24,616 |
from __future__ import annotations
import sys
from typing import Any, Callable
if sys.platform == "win32":
import ctypes
import subprocess
from ctypes.wintypes import DWORD, HANDLE
PROCESS_QUERY_LIMITED_INFORMATION = ctypes.c_ulong(0x1000)
kernel32 = ctypes.windll.kernel32
OpenProcess: Callable[[DWORD, int, int], HANDLE] = kernel32.OpenProcess
GetExitCodeProcess: Callable[[HANDLE, Any], int] = kernel32.GetExitCodeProcess
else:
import os
import signal
def alive(pid: int) -> bool:
"""Is the process alive?"""
if sys.platform == "win32":
# why can't anything be easy...
status = DWORD()
handle = OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, 0, pid)
GetExitCodeProcess(handle, ctypes.byref(status))
return status.value == 259 # STILL_ACTIVE
else:
try:
os.kill(pid, 0)
except OSError:
return False
return True
def kill(pid: int) -> None:
"""Kill the process."""
if sys.platform == "win32":
subprocess.check_output(f"taskkill /pid {pid} /f /t")
else:
os.kill(pid, signal.SIGKILL)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/dmypy_os.py
|
Python
|
NOASSERTION
| 1,154 |
"""Server for mypy daemon mode.
This implements a daemon process which keeps useful state in memory
to enable fine-grained incremental reprocessing of changes.
"""
from __future__ import annotations
import argparse
import base64
import io
import json
import os
import pickle
import subprocess
import sys
import time
import traceback
from contextlib import redirect_stderr, redirect_stdout
from typing import AbstractSet, Any, Callable, Final, List, Sequence, Tuple
from typing_extensions import TypeAlias as _TypeAlias
import mypy.build
import mypy.errors
import mypy.main
from mypy.dmypy_util import WriteToConn, receive, send
from mypy.find_sources import InvalidSourceList, create_source_list
from mypy.fscache import FileSystemCache
from mypy.fswatcher import FileData, FileSystemWatcher
from mypy.inspections import InspectionEngine
from mypy.ipc import IPCServer
from mypy.modulefinder import BuildSource, FindModuleCache, SearchPaths, compute_search_paths
from mypy.options import Options
from mypy.server.update import FineGrainedBuildManager, refresh_suppressed_submodules
from mypy.suggestions import SuggestionEngine, SuggestionFailure
from mypy.typestate import reset_global_state
from mypy.util import FancyFormatter, count_stats
from mypy.version import __version__
MEM_PROFILE: Final = False # If True, dump memory profile after initialization
if sys.platform == "win32":
from subprocess import STARTUPINFO
def daemonize(
options: Options, status_file: str, timeout: int | None = None, log_file: str | None = None
) -> int:
"""Create the daemon process via "dmypy daemon" and pass options via command line
When creating the daemon grandchild, we create it in a new console, which is
started hidden. We cannot use DETACHED_PROCESS since it will cause console windows
to pop up when starting. See
https://github.com/python/cpython/pull/4150#issuecomment-340215696
for more on why we can't have nice things.
It also pickles the options to be unpickled by mypy.
"""
command = [sys.executable, "-m", "mypy.dmypy", "--status-file", status_file, "daemon"]
pickled_options = pickle.dumps(options.snapshot())
command.append(f'--options-data="{base64.b64encode(pickled_options).decode()}"')
if timeout:
command.append(f"--timeout={timeout}")
if log_file:
command.append(f"--log-file={log_file}")
info = STARTUPINFO()
info.dwFlags = 0x1 # STARTF_USESHOWWINDOW aka use wShowWindow's value
info.wShowWindow = 0 # SW_HIDE aka make the window invisible
try:
subprocess.Popen(command, creationflags=0x10, startupinfo=info) # CREATE_NEW_CONSOLE
return 0
except subprocess.CalledProcessError as e:
return e.returncode
else:
def _daemonize_cb(func: Callable[[], None], log_file: str | None = None) -> int:
"""Arrange to call func() in a grandchild of the current process.
Return 0 for success, exit status for failure, negative if
subprocess killed by signal.
"""
# See https://stackoverflow.com/questions/473620/how-do-you-create-a-daemon-in-python
sys.stdout.flush()
sys.stderr.flush()
pid = os.fork()
if pid:
# Parent process: wait for child in case things go bad there.
npid, sts = os.waitpid(pid, 0)
sig = sts & 0xFF
if sig:
print("Child killed by signal", sig)
return -sig
sts = sts >> 8
if sts:
print("Child exit status", sts)
return sts
# Child process: do a bunch of UNIX stuff and then fork a grandchild.
try:
os.setsid() # Detach controlling terminal
os.umask(0o27)
devnull = os.open("/dev/null", os.O_RDWR)
os.dup2(devnull, 0)
os.dup2(devnull, 1)
os.dup2(devnull, 2)
os.close(devnull)
pid = os.fork()
if pid:
# Child is done, exit to parent.
os._exit(0)
# Grandchild: run the server.
if log_file:
sys.stdout = sys.stderr = open(log_file, "a", buffering=1)
fd = sys.stdout.fileno()
os.dup2(fd, 2)
os.dup2(fd, 1)
func()
finally:
# Make sure we never get back into the caller.
os._exit(1)
def daemonize(
options: Options, status_file: str, timeout: int | None = None, log_file: str | None = None
) -> int:
"""Run the mypy daemon in a grandchild of the current process
Return 0 for success, exit status for failure, negative if
subprocess killed by signal.
"""
return _daemonize_cb(Server(options, status_file, timeout).serve, log_file)
# Server code.
CONNECTION_NAME: Final = "dmypy"
def process_start_options(flags: list[str], allow_sources: bool) -> Options:
_, options = mypy.main.process_options(
["-i"] + flags, require_targets=False, server_options=True
)
if options.report_dirs:
print("dmypy: Ignoring report generation settings. Start/restart cannot generate reports.")
if options.junit_xml:
print(
"dmypy: Ignoring report generation settings. "
"Start/restart does not support --junit-xml. Pass it to check/recheck instead"
)
options.junit_xml = None
if not options.incremental:
sys.exit("dmypy: start/restart should not disable incremental mode")
if options.follow_imports not in ("skip", "error", "normal"):
sys.exit("dmypy: follow-imports=silent not supported")
return options
def ignore_suppressed_imports(module: str) -> bool:
"""Can we skip looking for newly unsuppressed imports to module?"""
# Various submodules of 'encodings' can be suppressed, since it
# uses module-level '__getattr__'. Skip them since there are many
# of them, and following imports to them is kind of pointless.
return module.startswith("encodings.")
ModulePathPair: _TypeAlias = Tuple[str, str]
ModulePathPairs: _TypeAlias = List[ModulePathPair]
ChangesAndRemovals: _TypeAlias = Tuple[ModulePathPairs, ModulePathPairs]
class Server:
# NOTE: the instance is constructed in the parent process but
# serve() is called in the grandchild (by daemonize()).
def __init__(self, options: Options, status_file: str, timeout: int | None = None) -> None:
"""Initialize the server with the desired mypy flags."""
self.options = options
# Snapshot the options info before we muck with it, to detect changes
self.options_snapshot = options.snapshot()
self.timeout = timeout
self.fine_grained_manager: FineGrainedBuildManager | None = None
if os.path.isfile(status_file):
os.unlink(status_file)
self.fscache = FileSystemCache()
options.raise_exceptions = True
options.incremental = True
options.fine_grained_incremental = True
options.show_traceback = True
if options.use_fine_grained_cache:
# Using fine_grained_cache implies generating and caring
# about the fine grained cache
options.cache_fine_grained = True
else:
options.cache_dir = os.devnull
# Fine-grained incremental doesn't support general partial types
# (details in https://github.com/python/mypy/issues/4492)
options.local_partial_types = True
self.status_file = status_file
# Since the object is created in the parent process we can check
# the output terminal options here.
self.formatter = FancyFormatter(sys.stdout, sys.stderr, options.hide_error_codes)
def _response_metadata(self) -> dict[str, str]:
py_version = f"{self.options.python_version[0]}_{self.options.python_version[1]}"
return {"platform": self.options.platform, "python_version": py_version}
def serve(self) -> None:
"""Serve requests, synchronously (no thread or fork)."""
command = None
server = IPCServer(CONNECTION_NAME, self.timeout)
orig_stdout = sys.stdout
orig_stderr = sys.stderr
try:
with open(self.status_file, "w") as f:
json.dump({"pid": os.getpid(), "connection_name": server.connection_name}, f)
f.write("\n") # I like my JSON with a trailing newline
while True:
with server:
data = receive(server)
sys.stdout = WriteToConn(server, "stdout", sys.stdout.isatty())
sys.stderr = WriteToConn(server, "stderr", sys.stderr.isatty())
resp: dict[str, Any] = {}
if "command" not in data:
resp = {"error": "No command found in request"}
else:
command = data["command"]
if not isinstance(command, str):
resp = {"error": "Command is not a string"}
else:
command = data.pop("command")
try:
resp = self.run_command(command, data)
except Exception:
# If we are crashing, report the crash to the client
tb = traceback.format_exception(*sys.exc_info())
resp = {"error": "Daemon crashed!\n" + "".join(tb)}
resp.update(self._response_metadata())
resp["final"] = True
send(server, resp)
raise
resp["final"] = True
try:
resp.update(self._response_metadata())
send(server, resp)
except OSError:
pass # Maybe the client hung up
if command == "stop":
reset_global_state()
sys.exit(0)
finally:
# Revert stdout/stderr so we can see any errors.
sys.stdout = orig_stdout
sys.stderr = orig_stderr
# If the final command is something other than a clean
# stop, remove the status file. (We can't just
# simplify the logic and always remove the file, since
# that could cause us to remove a future server's
# status file.)
if command != "stop":
os.unlink(self.status_file)
try:
server.cleanup() # try to remove the socket dir on Linux
except OSError:
pass
exc_info = sys.exc_info()
if exc_info[0] and exc_info[0] is not SystemExit:
traceback.print_exception(*exc_info)
def run_command(self, command: str, data: dict[str, object]) -> dict[str, object]:
"""Run a specific command from the registry."""
key = "cmd_" + command
method = getattr(self.__class__, key, None)
if method is None:
return {"error": f"Unrecognized command '{command}'"}
else:
if command not in {"check", "recheck", "run"}:
# Only the above commands use some error formatting.
del data["is_tty"]
del data["terminal_width"]
ret = method(self, **data)
assert isinstance(ret, dict)
return ret
# Command functions (run in the server via RPC).
def cmd_status(self, fswatcher_dump_file: str | None = None) -> dict[str, object]:
"""Return daemon status."""
res: dict[str, object] = {}
res.update(get_meminfo())
if fswatcher_dump_file:
data = self.fswatcher.dump_file_data() if hasattr(self, "fswatcher") else {}
# Using .dumps and then writing was noticeably faster than using dump
s = json.dumps(data)
with open(fswatcher_dump_file, "w") as f:
f.write(s)
return res
def cmd_stop(self) -> dict[str, object]:
"""Stop daemon."""
# We need to remove the status file *before* we complete the
# RPC. Otherwise a race condition exists where a subsequent
# command can see a status file from a dying server and think
# it is a live one.
os.unlink(self.status_file)
return {}
def cmd_run(
self,
version: str,
args: Sequence[str],
export_types: bool,
is_tty: bool,
terminal_width: int,
) -> dict[str, object]:
"""Check a list of files, triggering a restart if needed."""
stderr = io.StringIO()
stdout = io.StringIO()
try:
# Process options can exit on improper arguments, so we need to catch that and
# capture stderr so the client can report it
with redirect_stderr(stderr):
with redirect_stdout(stdout):
sources, options = mypy.main.process_options(
["-i"] + list(args),
require_targets=True,
server_options=True,
fscache=self.fscache,
program="mypy-daemon",
header=argparse.SUPPRESS,
)
# Signal that we need to restart if the options have changed
if not options.compare_stable(self.options_snapshot):
return {"restart": "configuration changed"}
if __version__ != version:
return {"restart": "mypy version changed"}
if self.fine_grained_manager:
manager = self.fine_grained_manager.manager
start_plugins_snapshot = manager.plugins_snapshot
_, current_plugins_snapshot = mypy.build.load_plugins(
options, manager.errors, sys.stdout, extra_plugins=()
)
if current_plugins_snapshot != start_plugins_snapshot:
return {"restart": "plugins changed"}
except InvalidSourceList as err:
return {"out": "", "err": str(err), "status": 2}
except SystemExit as e:
return {"out": stdout.getvalue(), "err": stderr.getvalue(), "status": e.code}
return self.check(sources, export_types, is_tty, terminal_width)
def cmd_check(
self, files: Sequence[str], export_types: bool, is_tty: bool, terminal_width: int
) -> dict[str, object]:
"""Check a list of files."""
try:
sources = create_source_list(files, self.options, self.fscache)
except InvalidSourceList as err:
return {"out": "", "err": str(err), "status": 2}
return self.check(sources, export_types, is_tty, terminal_width)
def cmd_recheck(
self,
is_tty: bool,
terminal_width: int,
export_types: bool,
remove: list[str] | None = None,
update: list[str] | None = None,
) -> dict[str, object]:
"""Check the same list of files we checked most recently.
If remove/update is given, they modify the previous list;
if all are None, stat() is called for each file in the previous list.
"""
t0 = time.time()
if not self.fine_grained_manager:
return {"error": "Command 'recheck' is only valid after a 'check' command"}
sources = self.previous_sources
if remove:
removals = set(remove)
sources = [s for s in sources if s.path and s.path not in removals]
if update:
# Sort list of file updates by extension, so *.pyi files are first.
update.sort(key=lambda f: os.path.splitext(f)[1], reverse=True)
known = {s.path for s in sources if s.path}
added = [p for p in update if p not in known]
try:
added_sources = create_source_list(added, self.options, self.fscache)
except InvalidSourceList as err:
return {"out": "", "err": str(err), "status": 2}
sources = sources + added_sources # Make a copy!
t1 = time.time()
manager = self.fine_grained_manager.manager
manager.log(f"fine-grained increment: cmd_recheck: {t1 - t0:.3f}s")
old_export_types = self.options.export_types
self.options.export_types = self.options.export_types or export_types
if not self.following_imports():
messages = self.fine_grained_increment(
sources, remove, update, explicit_export_types=export_types
)
else:
assert remove is None and update is None
messages = self.fine_grained_increment_follow_imports(
sources, explicit_export_types=export_types
)
res = self.increment_output(messages, sources, is_tty, terminal_width)
self.flush_caches()
self.update_stats(res)
self.options.export_types = old_export_types
return res
def check(
self, sources: list[BuildSource], export_types: bool, is_tty: bool, terminal_width: int
) -> dict[str, Any]:
"""Check using fine-grained incremental mode.
If is_tty is True format the output nicely with colors and summary line
(unless disabled in self.options). Also pass the terminal_width to formatter.
"""
old_export_types = self.options.export_types
self.options.export_types = self.options.export_types or export_types
if not self.fine_grained_manager:
res = self.initialize_fine_grained(sources, is_tty, terminal_width)
else:
if not self.following_imports():
messages = self.fine_grained_increment(sources, explicit_export_types=export_types)
else:
messages = self.fine_grained_increment_follow_imports(
sources, explicit_export_types=export_types
)
res = self.increment_output(messages, sources, is_tty, terminal_width)
self.flush_caches()
self.update_stats(res)
self.options.export_types = old_export_types
return res
def flush_caches(self) -> None:
self.fscache.flush()
if self.fine_grained_manager:
self.fine_grained_manager.flush_cache()
def update_stats(self, res: dict[str, Any]) -> None:
if self.fine_grained_manager:
manager = self.fine_grained_manager.manager
manager.dump_stats()
res["stats"] = manager.stats
manager.stats = {}
def following_imports(self) -> bool:
"""Are we following imports?"""
# TODO: What about silent?
return self.options.follow_imports == "normal"
def initialize_fine_grained(
self, sources: list[BuildSource], is_tty: bool, terminal_width: int
) -> dict[str, Any]:
self.fswatcher = FileSystemWatcher(self.fscache)
t0 = time.time()
self.update_sources(sources)
t1 = time.time()
try:
result = mypy.build.build(sources=sources, options=self.options, fscache=self.fscache)
except mypy.errors.CompileError as e:
output = "".join(s + "\n" for s in e.messages)
if e.use_stdout:
out, err = output, ""
else:
out, err = "", output
return {"out": out, "err": err, "status": 2}
messages = result.errors
self.fine_grained_manager = FineGrainedBuildManager(result)
original_sources_len = len(sources)
if self.following_imports():
sources = find_all_sources_in_build(self.fine_grained_manager.graph, sources)
self.update_sources(sources)
self.previous_sources = sources
# If we are using the fine-grained cache, build hasn't actually done
# the typechecking on the updated files yet.
# Run a fine-grained update starting from the cached data
if result.used_cache:
t2 = time.time()
# Pull times and hashes out of the saved_cache and stick them into
# the fswatcher, so we pick up the changes.
for state in self.fine_grained_manager.graph.values():
meta = state.meta
if meta is None:
continue
assert state.path is not None
self.fswatcher.set_file_data(
state.path,
FileData(st_mtime=float(meta.mtime), st_size=meta.size, hash=meta.hash),
)
changed, removed = self.find_changed(sources)
changed += self.find_added_suppressed(
self.fine_grained_manager.graph,
set(),
self.fine_grained_manager.manager.search_paths,
)
# Find anything that has had its dependency list change
for state in self.fine_grained_manager.graph.values():
if not state.is_fresh():
assert state.path is not None
changed.append((state.id, state.path))
t3 = time.time()
# Run an update
messages = self.fine_grained_manager.update(changed, removed)
if self.following_imports():
# We need to do another update to any new files found by following imports.
messages = self.fine_grained_increment_follow_imports(sources)
t4 = time.time()
self.fine_grained_manager.manager.add_stats(
update_sources_time=t1 - t0,
build_time=t2 - t1,
find_changes_time=t3 - t2,
fg_update_time=t4 - t3,
files_changed=len(removed) + len(changed),
)
else:
# Stores the initial state of sources as a side effect.
self.fswatcher.find_changed()
if MEM_PROFILE:
from mypy.memprofile import print_memory_profile
print_memory_profile(run_gc=False)
__, n_notes, __ = count_stats(messages)
status = 1 if messages and n_notes < len(messages) else 0
# We use explicit sources length to match the logic in non-incremental mode.
messages = self.pretty_messages(messages, original_sources_len, is_tty, terminal_width)
return {"out": "".join(s + "\n" for s in messages), "err": "", "status": status}
def fine_grained_increment(
self,
sources: list[BuildSource],
remove: list[str] | None = None,
update: list[str] | None = None,
explicit_export_types: bool = False,
) -> list[str]:
"""Perform a fine-grained type checking increment.
If remove and update are None, determine changed paths by using
fswatcher. Otherwise, assume that only these files have changes.
Args:
sources: sources passed on the command line
remove: paths of files that have been removed
update: paths of files that have been changed or created
explicit_export_types: --export-type was passed in a check command
(as opposite to being set in dmypy start)
"""
assert self.fine_grained_manager is not None
manager = self.fine_grained_manager.manager
t0 = time.time()
if remove is None and update is None:
# Use the fswatcher to determine which files were changed
# (updated or added) or removed.
self.update_sources(sources)
changed, removed = self.find_changed(sources)
else:
# Use the remove/update lists to update fswatcher.
# This avoids calling stat() for unchanged files.
changed, removed = self.update_changed(sources, remove or [], update or [])
if explicit_export_types:
# If --export-types is given, we need to force full re-checking of all
# explicitly passed files, since we need to visit each expression.
add_all_sources_to_changed(sources, changed)
changed += self.find_added_suppressed(
self.fine_grained_manager.graph, set(), manager.search_paths
)
manager.search_paths = compute_search_paths(sources, manager.options, manager.data_dir)
t1 = time.time()
manager.log(f"fine-grained increment: find_changed: {t1 - t0:.3f}s")
messages = self.fine_grained_manager.update(changed, removed)
t2 = time.time()
manager.log(f"fine-grained increment: update: {t2 - t1:.3f}s")
manager.add_stats(
find_changes_time=t1 - t0,
fg_update_time=t2 - t1,
files_changed=len(removed) + len(changed),
)
self.previous_sources = sources
return messages
def fine_grained_increment_follow_imports(
self, sources: list[BuildSource], explicit_export_types: bool = False
) -> list[str]:
"""Like fine_grained_increment, but follow imports."""
t0 = time.time()
# TODO: Support file events
assert self.fine_grained_manager is not None
fine_grained_manager = self.fine_grained_manager
graph = fine_grained_manager.graph
manager = fine_grained_manager.manager
orig_modules = list(graph.keys())
self.update_sources(sources)
changed_paths = self.fswatcher.find_changed()
manager.search_paths = compute_search_paths(sources, manager.options, manager.data_dir)
t1 = time.time()
manager.log(f"fine-grained increment: find_changed: {t1 - t0:.3f}s")
seen = {source.module for source in sources}
# Find changed modules reachable from roots (or in roots) already in graph.
changed, new_files = self.find_reachable_changed_modules(
sources, graph, seen, changed_paths
)
if explicit_export_types:
# Same as in fine_grained_increment().
add_all_sources_to_changed(sources, changed)
sources.extend(new_files)
# Process changes directly reachable from roots.
messages = fine_grained_manager.update(changed, [], followed=True)
# Follow deps from changed modules (still within graph).
worklist = changed.copy()
while worklist:
module = worklist.pop()
if module[0] not in graph:
continue
sources2 = self.direct_imports(module, graph)
# Filter anything already seen before. This prevents
# infinite looping if there are any self edges. (Self
# edges are maybe a bug, but...)
sources2 = [source for source in sources2 if source.module not in seen]
changed, new_files = self.find_reachable_changed_modules(
sources2, graph, seen, changed_paths
)
self.update_sources(new_files)
messages = fine_grained_manager.update(changed, [], followed=True)
worklist.extend(changed)
t2 = time.time()
def refresh_file(module: str, path: str) -> list[str]:
return fine_grained_manager.update([(module, path)], [], followed=True)
for module_id, state in list(graph.items()):
new_messages = refresh_suppressed_submodules(
module_id, state.path, fine_grained_manager.deps, graph, self.fscache, refresh_file
)
if new_messages is not None:
messages = new_messages
t3 = time.time()
# There may be new files that became available, currently treated as
# suppressed imports. Process them.
while True:
new_unsuppressed = self.find_added_suppressed(graph, seen, manager.search_paths)
if not new_unsuppressed:
break
new_files = [BuildSource(mod[1], mod[0], followed=True) for mod in new_unsuppressed]
sources.extend(new_files)
self.update_sources(new_files)
messages = fine_grained_manager.update(new_unsuppressed, [], followed=True)
for module_id, path in new_unsuppressed:
new_messages = refresh_suppressed_submodules(
module_id, path, fine_grained_manager.deps, graph, self.fscache, refresh_file
)
if new_messages is not None:
messages = new_messages
t4 = time.time()
# Find all original modules in graph that were not reached -- they are deleted.
to_delete = []
for module_id in orig_modules:
if module_id not in graph:
continue
if module_id not in seen:
module_path = graph[module_id].path
assert module_path is not None
to_delete.append((module_id, module_path))
if to_delete:
messages = fine_grained_manager.update([], to_delete)
fix_module_deps(graph)
self.previous_sources = find_all_sources_in_build(graph)
self.update_sources(self.previous_sources)
# Store current file state as side effect
self.fswatcher.find_changed()
t5 = time.time()
manager.log(f"fine-grained increment: update: {t5 - t1:.3f}s")
manager.add_stats(
find_changes_time=t1 - t0,
fg_update_time=t2 - t1,
refresh_suppressed_time=t3 - t2,
find_added_supressed_time=t4 - t3,
cleanup_time=t5 - t4,
)
return messages
def find_reachable_changed_modules(
self,
roots: list[BuildSource],
graph: mypy.build.Graph,
seen: set[str],
changed_paths: AbstractSet[str],
) -> tuple[list[tuple[str, str]], list[BuildSource]]:
"""Follow imports within graph from given sources until hitting changed modules.
If we find a changed module, we can't continue following imports as the imports
may have changed.
Args:
roots: modules where to start search from
graph: module graph to use for the search
seen: modules we've seen before that won't be visited (mutated here!!)
changed_paths: which paths have changed (stop search here and return any found)
Return (encountered reachable changed modules,
unchanged files not in sources_set traversed).
"""
changed = []
new_files = []
worklist = roots.copy()
seen.update(source.module for source in worklist)
while worklist:
nxt = worklist.pop()
if nxt.module not in seen:
seen.add(nxt.module)
new_files.append(nxt)
if nxt.path in changed_paths:
assert nxt.path is not None # TODO
changed.append((nxt.module, nxt.path))
elif nxt.module in graph:
state = graph[nxt.module]
for dep in state.dependencies:
if dep not in seen:
seen.add(dep)
worklist.append(BuildSource(graph[dep].path, graph[dep].id, followed=True))
return changed, new_files
def direct_imports(
self, module: tuple[str, str], graph: mypy.build.Graph
) -> list[BuildSource]:
"""Return the direct imports of module not included in seen."""
state = graph[module[0]]
return [BuildSource(graph[dep].path, dep, followed=True) for dep in state.dependencies]
def find_added_suppressed(
self, graph: mypy.build.Graph, seen: set[str], search_paths: SearchPaths
) -> list[tuple[str, str]]:
"""Find suppressed modules that have been added (and not included in seen).
Args:
seen: reachable modules we've seen before (mutated here!!)
Return suppressed, added modules.
"""
all_suppressed = set()
for state in graph.values():
all_suppressed |= state.suppressed_set
# Filter out things that shouldn't actually be considered suppressed.
#
# TODO: Figure out why these are treated as suppressed
all_suppressed = {
module
for module in all_suppressed
if module not in graph and not ignore_suppressed_imports(module)
}
# Optimization: skip top-level packages that are obviously not
# there, to avoid calling the relatively slow find_module()
# below too many times.
packages = {module.split(".", 1)[0] for module in all_suppressed}
packages = filter_out_missing_top_level_packages(packages, search_paths, self.fscache)
# TODO: Namespace packages
finder = FindModuleCache(search_paths, self.fscache, self.options)
found = []
for module in all_suppressed:
top_level_pkg = module.split(".", 1)[0]
if top_level_pkg not in packages:
# Fast path: non-existent top-level package
continue
result = finder.find_module(module, fast_path=True)
if isinstance(result, str) and module not in seen:
# When not following imports, we only follow imports to .pyi files.
if not self.following_imports() and not result.endswith(".pyi"):
continue
found.append((module, result))
seen.add(module)
return found
def increment_output(
self, messages: list[str], sources: list[BuildSource], is_tty: bool, terminal_width: int
) -> dict[str, Any]:
status = 1 if messages else 0
messages = self.pretty_messages(messages, len(sources), is_tty, terminal_width)
return {"out": "".join(s + "\n" for s in messages), "err": "", "status": status}
def pretty_messages(
self,
messages: list[str],
n_sources: int,
is_tty: bool = False,
terminal_width: int | None = None,
) -> list[str]:
use_color = self.options.color_output and is_tty
fit_width = self.options.pretty and is_tty
if fit_width:
messages = self.formatter.fit_in_terminal(
messages, fixed_terminal_width=terminal_width
)
if self.options.error_summary:
summary: str | None = None
n_errors, n_notes, n_files = count_stats(messages)
if n_errors:
summary = self.formatter.format_error(
n_errors, n_files, n_sources, use_color=use_color
)
elif not messages or n_notes == len(messages):
summary = self.formatter.format_success(n_sources, use_color)
if summary:
# Create new list to avoid appending multiple summaries on successive runs.
messages = messages + [summary]
if use_color:
messages = [self.formatter.colorize(m) for m in messages]
return messages
def update_sources(self, sources: list[BuildSource]) -> None:
paths = [source.path for source in sources if source.path is not None]
if self.following_imports():
# Filter out directories (used for namespace packages).
paths = [path for path in paths if self.fscache.isfile(path)]
self.fswatcher.add_watched_paths(paths)
def update_changed(
self, sources: list[BuildSource], remove: list[str], update: list[str]
) -> ChangesAndRemovals:
changed_paths = self.fswatcher.update_changed(remove, update)
return self._find_changed(sources, changed_paths)
def find_changed(self, sources: list[BuildSource]) -> ChangesAndRemovals:
changed_paths = self.fswatcher.find_changed()
return self._find_changed(sources, changed_paths)
def _find_changed(
self, sources: list[BuildSource], changed_paths: AbstractSet[str]
) -> ChangesAndRemovals:
# Find anything that has been added or modified
changed = [
(source.module, source.path)
for source in sources
if source.path and source.path in changed_paths
]
# Now find anything that has been removed from the build
modules = {source.module for source in sources}
omitted = [source for source in self.previous_sources if source.module not in modules]
removed = []
for source in omitted:
path = source.path
assert path
removed.append((source.module, path))
# Always add modules that were (re-)added, since they may be detected as not changed by
# fswatcher (if they were actually not changed), but they may still need to be checked
# in case they had errors before they were deleted from sources on previous runs.
previous_modules = {source.module for source in self.previous_sources}
changed_set = set(changed)
changed.extend(
[
(source.module, source.path)
for source in sources
if source.path
and source.module not in previous_modules
and (source.module, source.path) not in changed_set
]
)
# Find anything that has had its module path change because of added or removed __init__s
last = {s.path: s.module for s in self.previous_sources}
for s in sources:
assert s.path
if s.path in last and last[s.path] != s.module:
# Mark it as removed from its old name and changed at its new name
removed.append((last[s.path], s.path))
changed.append((s.module, s.path))
return changed, removed
def cmd_inspect(
self,
show: str,
location: str,
verbosity: int = 0,
limit: int = 0,
include_span: bool = False,
include_kind: bool = False,
include_object_attrs: bool = False,
union_attrs: bool = False,
force_reload: bool = False,
) -> dict[str, object]:
"""Locate and inspect expression(s)."""
if not self.fine_grained_manager:
return {
"error": 'Command "inspect" is only valid after a "check" command'
" (that produces no parse errors)"
}
engine = InspectionEngine(
self.fine_grained_manager,
verbosity=verbosity,
limit=limit,
include_span=include_span,
include_kind=include_kind,
include_object_attrs=include_object_attrs,
union_attrs=union_attrs,
force_reload=force_reload,
)
old_inspections = self.options.inspections
self.options.inspections = True
try:
if show == "type":
result = engine.get_type(location)
elif show == "attrs":
result = engine.get_attrs(location)
elif show == "definition":
result = engine.get_definition(location)
else:
assert False, "Unknown inspection kind"
finally:
self.options.inspections = old_inspections
if "out" in result:
assert isinstance(result["out"], str)
result["out"] += "\n"
return result
def cmd_suggest(self, function: str, callsites: bool, **kwargs: Any) -> dict[str, object]:
"""Suggest a signature for a function."""
if not self.fine_grained_manager:
return {
"error": "Command 'suggest' is only valid after a 'check' command"
" (that produces no parse errors)"
}
engine = SuggestionEngine(self.fine_grained_manager, **kwargs)
try:
if callsites:
out = engine.suggest_callsites(function)
else:
out = engine.suggest(function)
except SuggestionFailure as err:
return {"error": str(err)}
else:
if not out:
out = "No suggestions\n"
elif not out.endswith("\n"):
out += "\n"
return {"out": out, "err": "", "status": 0}
finally:
self.flush_caches()
def cmd_hang(self) -> dict[str, object]:
"""Hang for 100 seconds, as a debug hack."""
time.sleep(100)
return {}
# Misc utilities.
MiB: Final = 2**20
def get_meminfo() -> dict[str, Any]:
res: dict[str, Any] = {}
try:
import psutil
except ImportError:
res["memory_psutil_missing"] = (
"psutil not found, run pip install mypy[dmypy] "
"to install the needed components for dmypy"
)
else:
process = psutil.Process()
meminfo = process.memory_info()
res["memory_rss_mib"] = meminfo.rss / MiB
res["memory_vms_mib"] = meminfo.vms / MiB
if sys.platform == "win32":
res["memory_maxrss_mib"] = meminfo.peak_wset / MiB
else:
# See https://stackoverflow.com/questions/938733/total-memory-used-by-python-process
import resource # Since it doesn't exist on Windows.
rusage = resource.getrusage(resource.RUSAGE_SELF)
if sys.platform == "darwin":
factor = 1
else:
factor = 1024 # Linux
res["memory_maxrss_mib"] = rusage.ru_maxrss * factor / MiB
return res
def find_all_sources_in_build(
graph: mypy.build.Graph, extra: Sequence[BuildSource] = ()
) -> list[BuildSource]:
result = list(extra)
seen = {source.module for source in result}
for module, state in graph.items():
if module not in seen:
result.append(BuildSource(state.path, module))
return result
def add_all_sources_to_changed(sources: list[BuildSource], changed: list[tuple[str, str]]) -> None:
"""Add all (explicit) sources to the list changed files in place.
Use this when re-processing of unchanged files is needed (e.g. for
the purpose of exporting types for inspections).
"""
changed_set = set(changed)
changed.extend(
[
(bs.module, bs.path)
for bs in sources
if bs.path and (bs.module, bs.path) not in changed_set
]
)
def fix_module_deps(graph: mypy.build.Graph) -> None:
"""After an incremental update, update module dependencies to reflect the new state.
This can make some suppressed dependencies non-suppressed, and vice versa (if modules
have been added to or removed from the build).
"""
for state in graph.values():
new_suppressed = []
new_dependencies = []
for dep in state.dependencies + state.suppressed:
if dep in graph:
new_dependencies.append(dep)
else:
new_suppressed.append(dep)
state.dependencies = new_dependencies
state.dependencies_set = set(new_dependencies)
state.suppressed = new_suppressed
state.suppressed_set = set(new_suppressed)
def filter_out_missing_top_level_packages(
packages: set[str], search_paths: SearchPaths, fscache: FileSystemCache
) -> set[str]:
"""Quickly filter out obviously missing top-level packages.
Return packages with entries that can't be found removed.
This is approximate: some packages that aren't actually valid may be
included. However, all potentially valid packages must be returned.
"""
# Start with a empty set and add all potential top-level packages.
found = set()
paths = (
search_paths.python_path
+ search_paths.mypy_path
+ search_paths.package_path
+ search_paths.typeshed_path
)
for p in paths:
try:
entries = fscache.listdir(p)
except Exception:
entries = []
for entry in entries:
# The code is hand-optimized for mypyc since this may be somewhat
# performance-critical.
if entry.endswith(".py"):
entry = entry[:-3]
elif entry.endswith(".pyi"):
entry = entry[:-4]
elif entry.endswith("-stubs"):
# Possible PEP 561 stub package
entry = entry[:-6]
if entry in packages:
found.add(entry)
return found
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/dmypy_server.py
|
Python
|
NOASSERTION
| 44,745 |
"""Shared code between dmypy.py and dmypy_server.py.
This should be pretty lightweight and not depend on other mypy code (other than ipc).
"""
from __future__ import annotations
import io
import json
from types import TracebackType
from typing import Any, Final, Iterable, Iterator, TextIO
from mypy.ipc import IPCBase
DEFAULT_STATUS_FILE: Final = ".dmypy.json"
def receive(connection: IPCBase) -> Any:
"""Receive single JSON data frame from a connection.
Raise OSError if the data received is not valid JSON or if it is
not a dict.
"""
bdata = connection.read()
if not bdata:
raise OSError("No data received")
try:
data = json.loads(bdata)
except Exception as e:
raise OSError("Data received is not valid JSON") from e
if not isinstance(data, dict):
raise OSError(f"Data received is not a dict ({type(data)})")
return data
def send(connection: IPCBase, data: Any) -> None:
"""Send data to a connection encoded and framed.
The data must be JSON-serializable. We assume that a single send call is a
single frame to be sent on the connect.
"""
connection.write(json.dumps(data))
class WriteToConn(TextIO):
"""Helper class to write to a connection instead of standard output."""
def __init__(self, server: IPCBase, output_key: str, isatty: bool) -> None:
self.server = server
self.output_key = output_key
self._isatty = isatty
def __enter__(self) -> TextIO:
return self
def __exit__(
self,
t: type[BaseException] | None,
value: BaseException | None,
traceback: TracebackType | None,
) -> None:
pass
def __iter__(self) -> Iterator[str]:
raise io.UnsupportedOperation
def __next__(self) -> str:
raise io.UnsupportedOperation
def close(self) -> None:
pass
def fileno(self) -> int:
raise OSError
def flush(self) -> None:
pass
def isatty(self) -> bool:
return self._isatty
def read(self, n: int = 0) -> str:
raise io.UnsupportedOperation
def readable(self) -> bool:
return False
def readline(self, limit: int = 0) -> str:
raise io.UnsupportedOperation
def readlines(self, hint: int = 0) -> list[str]:
raise io.UnsupportedOperation
def seek(self, offset: int, whence: int = 0) -> int:
raise io.UnsupportedOperation
def seekable(self) -> bool:
return False
def tell(self) -> int:
raise io.UnsupportedOperation
def truncate(self, size: int | None = 0) -> int:
raise io.UnsupportedOperation
def write(self, output: str) -> int:
resp: dict[str, Any] = {}
resp[self.output_key] = output
send(self.server, resp)
return len(output)
def writable(self) -> bool:
return True
def writelines(self, lines: Iterable[str]) -> None:
for s in lines:
self.write(s)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/dmypy_util.py
|
Python
|
NOASSERTION
| 2,995 |
from __future__ import annotations
from typing import Callable, Container, cast
from mypy.nodes import ARG_STAR, ARG_STAR2
from mypy.types import (
AnyType,
CallableType,
DeletedType,
ErasedType,
Instance,
LiteralType,
NoneType,
Overloaded,
Parameters,
ParamSpecType,
PartialType,
ProperType,
TupleType,
Type,
TypeAliasType,
TypedDictType,
TypeOfAny,
TypeTranslator,
TypeType,
TypeVarId,
TypeVarTupleType,
TypeVarType,
TypeVisitor,
UnboundType,
UninhabitedType,
UnionType,
UnpackType,
get_proper_type,
get_proper_types,
)
from mypy.typevartuples import erased_vars
def erase_type(typ: Type) -> ProperType:
"""Erase any type variables from a type.
Also replace tuple types with the corresponding concrete types.
Examples:
A -> A
B[X] -> B[Any]
Tuple[A, B] -> tuple
Callable[[A1, A2, ...], R] -> Callable[..., Any]
Type[X] -> Type[Any]
"""
typ = get_proper_type(typ)
return typ.accept(EraseTypeVisitor())
class EraseTypeVisitor(TypeVisitor[ProperType]):
def visit_unbound_type(self, t: UnboundType) -> ProperType:
# TODO: replace with an assert after UnboundType can't leak from semantic analysis.
return AnyType(TypeOfAny.from_error)
def visit_any(self, t: AnyType) -> ProperType:
return t
def visit_none_type(self, t: NoneType) -> ProperType:
return t
def visit_uninhabited_type(self, t: UninhabitedType) -> ProperType:
return t
def visit_erased_type(self, t: ErasedType) -> ProperType:
return t
def visit_partial_type(self, t: PartialType) -> ProperType:
# Should not get here.
raise RuntimeError("Cannot erase partial types")
def visit_deleted_type(self, t: DeletedType) -> ProperType:
return t
def visit_instance(self, t: Instance) -> ProperType:
args = erased_vars(t.type.defn.type_vars, TypeOfAny.special_form)
return Instance(t.type, args, t.line)
def visit_type_var(self, t: TypeVarType) -> ProperType:
return AnyType(TypeOfAny.special_form)
def visit_param_spec(self, t: ParamSpecType) -> ProperType:
return AnyType(TypeOfAny.special_form)
def visit_parameters(self, t: Parameters) -> ProperType:
raise RuntimeError("Parameters should have been bound to a class")
def visit_type_var_tuple(self, t: TypeVarTupleType) -> ProperType:
# Likely, we can never get here because of aggressive erasure of types that
# can contain this, but better still return a valid replacement.
return t.tuple_fallback.copy_modified(args=[AnyType(TypeOfAny.special_form)])
def visit_unpack_type(self, t: UnpackType) -> ProperType:
return AnyType(TypeOfAny.special_form)
def visit_callable_type(self, t: CallableType) -> ProperType:
# We must preserve the fallback type for overload resolution to work.
any_type = AnyType(TypeOfAny.special_form)
return CallableType(
arg_types=[any_type, any_type],
arg_kinds=[ARG_STAR, ARG_STAR2],
arg_names=[None, None],
ret_type=any_type,
fallback=t.fallback,
is_ellipsis_args=True,
implicit=True,
)
def visit_overloaded(self, t: Overloaded) -> ProperType:
return t.fallback.accept(self)
def visit_tuple_type(self, t: TupleType) -> ProperType:
return t.partial_fallback.accept(self)
def visit_typeddict_type(self, t: TypedDictType) -> ProperType:
return t.fallback.accept(self)
def visit_literal_type(self, t: LiteralType) -> ProperType:
# The fallback for literal types should always be either
# something like int or str, or an enum class -- types that
# don't contain any TypeVars. So there's no need to visit it.
return t
def visit_union_type(self, t: UnionType) -> ProperType:
erased_items = [erase_type(item) for item in t.items]
from mypy.typeops import make_simplified_union
return make_simplified_union(erased_items)
def visit_type_type(self, t: TypeType) -> ProperType:
return TypeType.make_normalized(t.item.accept(self), line=t.line)
def visit_type_alias_type(self, t: TypeAliasType) -> ProperType:
raise RuntimeError("Type aliases should be expanded before accepting this visitor")
def erase_typevars(t: Type, ids_to_erase: Container[TypeVarId] | None = None) -> Type:
"""Replace all type variables in a type with any,
or just the ones in the provided collection.
"""
def erase_id(id: TypeVarId) -> bool:
if ids_to_erase is None:
return True
return id in ids_to_erase
return t.accept(TypeVarEraser(erase_id, AnyType(TypeOfAny.special_form)))
def replace_meta_vars(t: Type, target_type: Type) -> Type:
"""Replace unification variables in a type with the target type."""
return t.accept(TypeVarEraser(lambda id: id.is_meta_var(), target_type))
class TypeVarEraser(TypeTranslator):
"""Implementation of type erasure"""
def __init__(self, erase_id: Callable[[TypeVarId], bool], replacement: Type) -> None:
super().__init__()
self.erase_id = erase_id
self.replacement = replacement
def visit_type_var(self, t: TypeVarType) -> Type:
if self.erase_id(t.id):
return self.replacement
return t
# TODO: below two methods duplicate some logic with expand_type().
# In fact, we may want to refactor this whole visitor to use expand_type().
def visit_instance(self, t: Instance) -> Type:
result = super().visit_instance(t)
assert isinstance(result, ProperType) and isinstance(result, Instance)
if t.type.fullname == "builtins.tuple":
# Normalize Tuple[*Tuple[X, ...], ...] -> Tuple[X, ...]
arg = result.args[0]
if isinstance(arg, UnpackType):
unpacked = get_proper_type(arg.type)
if isinstance(unpacked, Instance):
assert unpacked.type.fullname == "builtins.tuple"
return unpacked
return result
def visit_tuple_type(self, t: TupleType) -> Type:
result = super().visit_tuple_type(t)
assert isinstance(result, ProperType) and isinstance(result, TupleType)
if len(result.items) == 1:
# Normalize Tuple[*Tuple[X, ...]] -> Tuple[X, ...]
item = result.items[0]
if isinstance(item, UnpackType):
unpacked = get_proper_type(item.type)
if isinstance(unpacked, Instance):
assert unpacked.type.fullname == "builtins.tuple"
if result.partial_fallback.type.fullname != "builtins.tuple":
# If it is a subtype (like named tuple) we need to preserve it,
# this essentially mimics the logic in tuple_fallback().
return result.partial_fallback.accept(self)
return unpacked
return result
def visit_type_var_tuple(self, t: TypeVarTupleType) -> Type:
if self.erase_id(t.id):
return t.tuple_fallback.copy_modified(args=[self.replacement])
return t
def visit_param_spec(self, t: ParamSpecType) -> Type:
if self.erase_id(t.id):
return self.replacement
return t
def visit_type_alias_type(self, t: TypeAliasType) -> Type:
# Type alias target can't contain bound type variables (not bound by the type
# alias itself), so it is safe to just erase the arguments.
return t.copy_modified(args=[a.accept(self) for a in t.args])
def remove_instance_last_known_values(t: Type) -> Type:
return t.accept(LastKnownValueEraser())
class LastKnownValueEraser(TypeTranslator):
"""Removes the Literal[...] type that may be associated with any
Instance types."""
def visit_instance(self, t: Instance) -> Type:
if not t.last_known_value and not t.args:
return t
return t.copy_modified(args=[a.accept(self) for a in t.args], last_known_value=None)
def visit_type_alias_type(self, t: TypeAliasType) -> Type:
# Type aliases can't contain literal values, because they are
# always constructed as explicit types.
return t
def visit_union_type(self, t: UnionType) -> Type:
new = cast(UnionType, super().visit_union_type(t))
# Erasure can result in many duplicate items; merge them.
# Call make_simplified_union only on lists of instance types
# that all have the same fullname, to avoid simplifying too
# much.
instances = [item for item in new.items if isinstance(get_proper_type(item), Instance)]
# Avoid merge in simple cases such as optional types.
if len(instances) > 1:
instances_by_name: dict[str, list[Instance]] = {}
p_new_items = get_proper_types(new.items)
for p_item in p_new_items:
if isinstance(p_item, Instance) and not p_item.args:
instances_by_name.setdefault(p_item.type.fullname, []).append(p_item)
merged: list[Type] = []
for item in new.items:
orig_item = item
item = get_proper_type(item)
if isinstance(item, Instance) and not item.args:
types = instances_by_name.get(item.type.fullname)
if types is not None:
if len(types) == 1:
merged.append(item)
else:
from mypy.typeops import make_simplified_union
merged.append(make_simplified_union(types))
del instances_by_name[item.type.fullname]
else:
merged.append(orig_item)
return UnionType.make_union(merged)
return new
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/erasetype.py
|
Python
|
NOASSERTION
| 10,059 |
"""Defines the different custom formats in which mypy can output."""
import json
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from mypy.errors import MypyError
class ErrorFormatter(ABC):
"""Base class to define how errors are formatted before being printed."""
@abstractmethod
def report_error(self, error: "MypyError") -> str:
raise NotImplementedError
class JSONFormatter(ErrorFormatter):
"""Formatter for basic JSON output format."""
def report_error(self, error: "MypyError") -> str:
"""Prints out the errors as simple, static JSON lines."""
return json.dumps(
{
"file": error.file_path,
"line": error.line,
"column": error.column,
"message": error.message,
"hint": None if len(error.hints) == 0 else "\n".join(error.hints),
"code": None if error.errorcode is None else error.errorcode.code,
"severity": error.severity,
}
)
OUTPUT_CHOICES = {"json": JSONFormatter()}
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/error_formatter.py
|
Python
|
NOASSERTION
| 1,115 |
"""Classification of possible errors mypy can detect.
These can be used for filtering specific errors.
"""
from __future__ import annotations
from collections import defaultdict
from typing import Final
from mypy_extensions import mypyc_attr
error_codes: dict[str, ErrorCode] = {}
sub_code_map: dict[str, set[str]] = defaultdict(set)
@mypyc_attr(allow_interpreted_subclasses=True)
class ErrorCode:
def __init__(
self,
code: str,
description: str,
category: str,
default_enabled: bool = True,
sub_code_of: ErrorCode | None = None,
) -> None:
self.code = code
self.description = description
self.category = category
self.default_enabled = default_enabled
self.sub_code_of = sub_code_of
if sub_code_of is not None:
assert sub_code_of.sub_code_of is None, "Nested subcategories are not supported"
sub_code_map[sub_code_of.code].add(code)
error_codes[code] = self
def __str__(self) -> str:
return f"<ErrorCode {self.code}>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, ErrorCode):
return False
return self.code == other.code
def __hash__(self) -> int:
return hash((self.code,))
ATTR_DEFINED: Final = ErrorCode("attr-defined", "Check that attribute exists", "General")
NAME_DEFINED: Final = ErrorCode("name-defined", "Check that name is defined", "General")
CALL_ARG: Final[ErrorCode] = ErrorCode(
"call-arg", "Check number, names and kinds of arguments in calls", "General"
)
ARG_TYPE: Final = ErrorCode("arg-type", "Check argument types in calls", "General")
CALL_OVERLOAD: Final = ErrorCode(
"call-overload", "Check that an overload variant matches arguments", "General"
)
VALID_TYPE: Final[ErrorCode] = ErrorCode(
"valid-type", "Check that type (annotation) is valid", "General"
)
VAR_ANNOTATED: Final = ErrorCode(
"var-annotated", "Require variable annotation if type can't be inferred", "General"
)
OVERRIDE: Final = ErrorCode(
"override", "Check that method override is compatible with base class", "General"
)
RETURN: Final[ErrorCode] = ErrorCode(
"return", "Check that function always returns a value", "General"
)
RETURN_VALUE: Final[ErrorCode] = ErrorCode(
"return-value", "Check that return value is compatible with signature", "General"
)
ASSIGNMENT: Final[ErrorCode] = ErrorCode(
"assignment", "Check that assigned value is compatible with target", "General"
)
METHOD_ASSIGN: Final[ErrorCode] = ErrorCode(
"method-assign",
"Check that assignment target is not a method",
"General",
sub_code_of=ASSIGNMENT,
)
TYPE_ARG: Final = ErrorCode("type-arg", "Check that generic type arguments are present", "General")
TYPE_VAR: Final = ErrorCode("type-var", "Check that type variable values are valid", "General")
UNION_ATTR: Final = ErrorCode(
"union-attr", "Check that attribute exists in each item of a union", "General"
)
INDEX: Final = ErrorCode("index", "Check indexing operations", "General")
OPERATOR: Final = ErrorCode("operator", "Check that operator is valid for operands", "General")
LIST_ITEM: Final = ErrorCode(
"list-item", "Check list items in a list expression [item, ...]", "General"
)
DICT_ITEM: Final = ErrorCode(
"dict-item", "Check dict items in a dict expression {key: value, ...}", "General"
)
TYPEDDICT_ITEM: Final = ErrorCode(
"typeddict-item", "Check items when constructing TypedDict", "General"
)
TYPEDDICT_UNKNOWN_KEY: Final = ErrorCode(
"typeddict-unknown-key",
"Check unknown keys when constructing TypedDict",
"General",
sub_code_of=TYPEDDICT_ITEM,
)
HAS_TYPE: Final = ErrorCode(
"has-type", "Check that type of reference can be determined", "General"
)
IMPORT: Final = ErrorCode(
"import", "Require that imported module can be found or has stubs", "General"
)
IMPORT_NOT_FOUND: Final = ErrorCode(
"import-not-found", "Require that imported module can be found", "General", sub_code_of=IMPORT
)
IMPORT_UNTYPED: Final = ErrorCode(
"import-untyped", "Require that imported module has stubs", "General", sub_code_of=IMPORT
)
NO_REDEF: Final = ErrorCode("no-redef", "Check that each name is defined once", "General")
FUNC_RETURNS_VALUE: Final = ErrorCode(
"func-returns-value", "Check that called function returns a value in value context", "General"
)
ABSTRACT: Final = ErrorCode(
"abstract", "Prevent instantiation of classes with abstract attributes", "General"
)
TYPE_ABSTRACT: Final = ErrorCode(
"type-abstract", "Require only concrete classes where Type[...] is expected", "General"
)
VALID_NEWTYPE: Final = ErrorCode(
"valid-newtype", "Check that argument 2 to NewType is valid", "General"
)
STRING_FORMATTING: Final = ErrorCode(
"str-format", "Check that string formatting/interpolation is type-safe", "General"
)
STR_BYTES_PY3: Final = ErrorCode(
"str-bytes-safe", "Warn about implicit coercions related to bytes and string types", "General"
)
EXIT_RETURN: Final = ErrorCode(
"exit-return", "Warn about too general return type for '__exit__'", "General"
)
LITERAL_REQ: Final = ErrorCode("literal-required", "Check that value is a literal", "General")
UNUSED_COROUTINE: Final = ErrorCode(
"unused-coroutine", "Ensure that all coroutines are used", "General"
)
# TODO: why do we need the explicit type here? Without it mypyc CI builds fail with
# mypy/message_registry.py:37: error: Cannot determine type of "EMPTY_BODY" [has-type]
EMPTY_BODY: Final[ErrorCode] = ErrorCode(
"empty-body",
"A dedicated error code to opt out return errors for empty/trivial bodies",
"General",
)
SAFE_SUPER: Final = ErrorCode(
"safe-super", "Warn about calls to abstract methods with empty/trivial bodies", "General"
)
TOP_LEVEL_AWAIT: Final = ErrorCode(
"top-level-await", "Warn about top level await expressions", "General"
)
AWAIT_NOT_ASYNC: Final = ErrorCode(
"await-not-async", 'Warn about "await" outside coroutine ("async def")', "General"
)
# These error codes aren't enabled by default.
NO_UNTYPED_DEF: Final[ErrorCode] = ErrorCode(
"no-untyped-def", "Check that every function has an annotation", "General"
)
NO_UNTYPED_CALL: Final = ErrorCode(
"no-untyped-call",
"Disallow calling functions without type annotations from annotated functions",
"General",
)
REDUNDANT_CAST: Final = ErrorCode(
"redundant-cast", "Check that cast changes type of expression", "General"
)
ASSERT_TYPE: Final = ErrorCode("assert-type", "Check that assert_type() call succeeds", "General")
COMPARISON_OVERLAP: Final = ErrorCode(
"comparison-overlap", "Check that types in comparisons and 'in' expressions overlap", "General"
)
NO_ANY_UNIMPORTED: Final = ErrorCode(
"no-any-unimported", 'Reject "Any" types from unfollowed imports', "General"
)
NO_ANY_RETURN: Final = ErrorCode(
"no-any-return",
'Reject returning value with "Any" type if return type is not "Any"',
"General",
)
UNREACHABLE: Final = ErrorCode(
"unreachable", "Warn about unreachable statements or expressions", "General"
)
ANNOTATION_UNCHECKED = ErrorCode(
"annotation-unchecked", "Notify about type annotations in unchecked functions", "General"
)
TYPEDDICT_READONLY_MUTATED = ErrorCode(
"typeddict-readonly-mutated", "TypedDict's ReadOnly key is mutated", "General"
)
POSSIBLY_UNDEFINED: Final[ErrorCode] = ErrorCode(
"possibly-undefined",
"Warn about variables that are defined only in some execution paths",
"General",
default_enabled=False,
)
REDUNDANT_EXPR: Final = ErrorCode(
"redundant-expr", "Warn about redundant expressions", "General", default_enabled=False
)
TRUTHY_BOOL: Final[ErrorCode] = ErrorCode(
"truthy-bool",
"Warn about expressions that could always evaluate to true in boolean contexts",
"General",
default_enabled=False,
)
TRUTHY_FUNCTION: Final[ErrorCode] = ErrorCode(
"truthy-function",
"Warn about function that always evaluate to true in boolean contexts",
"General",
)
TRUTHY_ITERABLE: Final[ErrorCode] = ErrorCode(
"truthy-iterable",
"Warn about Iterable expressions that could always evaluate to true in boolean contexts",
"General",
default_enabled=False,
)
NAME_MATCH: Final = ErrorCode(
"name-match", "Check that type definition has consistent naming", "General"
)
NO_OVERLOAD_IMPL: Final = ErrorCode(
"no-overload-impl",
"Check that overloaded functions outside stub files have an implementation",
"General",
)
IGNORE_WITHOUT_CODE: Final = ErrorCode(
"ignore-without-code",
"Warn about '# type: ignore' comments which do not have error codes",
"General",
default_enabled=False,
)
UNUSED_AWAITABLE: Final = ErrorCode(
"unused-awaitable",
"Ensure that all awaitable values are used",
"General",
default_enabled=False,
)
REDUNDANT_SELF_TYPE = ErrorCode(
"redundant-self",
"Warn about redundant Self type annotations on method first argument",
"General",
default_enabled=False,
)
USED_BEFORE_DEF: Final[ErrorCode] = ErrorCode(
"used-before-def", "Warn about variables that are used before they are defined", "General"
)
UNUSED_IGNORE: Final = ErrorCode(
"unused-ignore", "Ensure that all type ignores are used", "General", default_enabled=False
)
EXPLICIT_OVERRIDE_REQUIRED: Final = ErrorCode(
"explicit-override",
"Require @override decorator if method is overriding a base class method",
"General",
default_enabled=False,
)
UNIMPORTED_REVEAL: Final = ErrorCode(
"unimported-reveal",
"Require explicit import from typing or typing_extensions for reveal_type",
"General",
default_enabled=False,
)
MUTABLE_OVERRIDE: Final[ErrorCode] = ErrorCode(
"mutable-override",
"Reject covariant overrides for mutable attributes",
"General",
default_enabled=False,
)
# Syntax errors are often blocking.
SYNTAX: Final[ErrorCode] = ErrorCode("syntax", "Report syntax errors", "General")
# This is an internal marker code for a whole-file ignore. It is not intended to
# be user-visible.
FILE: Final = ErrorCode("file", "Internal marker for a whole file being ignored", "General")
del error_codes[FILE.code]
# This is a catch-all for remaining uncategorized errors.
MISC: Final[ErrorCode] = ErrorCode("misc", "Miscellaneous other checks", "General")
OVERLOAD_CANNOT_MATCH: Final[ErrorCode] = ErrorCode(
"overload-cannot-match",
"Warn if an @overload signature can never be matched",
"General",
sub_code_of=MISC,
)
OVERLOAD_OVERLAP: Final[ErrorCode] = ErrorCode(
"overload-overlap",
"Warn if multiple @overload variants overlap in unsafe ways",
"General",
sub_code_of=MISC,
)
PROPERTY_DECORATOR = ErrorCode(
"prop-decorator",
"Decorators on top of @property are not supported",
"General",
sub_code_of=MISC,
)
NARROWED_TYPE_NOT_SUBTYPE: Final[ErrorCode] = ErrorCode(
"narrowed-type-not-subtype",
"Warn if a TypeIs function's narrowed type is not a subtype of the original type",
"General",
)
# This copy will not include any error codes defined later in the plugins.
mypy_error_codes = error_codes.copy()
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/errorcodes.py
|
Python
|
NOASSERTION
| 11,169 |
from __future__ import annotations
import os.path
import sys
import traceback
from collections import defaultdict
from typing import Callable, Final, Iterable, NoReturn, Optional, TextIO, Tuple, TypeVar
from typing_extensions import Literal, TypeAlias as _TypeAlias
from mypy import errorcodes as codes
from mypy.error_formatter import ErrorFormatter
from mypy.errorcodes import IMPORT, IMPORT_NOT_FOUND, IMPORT_UNTYPED, ErrorCode, mypy_error_codes
from mypy.message_registry import ErrorMessage
from mypy.options import Options
from mypy.scope import Scope
from mypy.util import DEFAULT_SOURCE_OFFSET, is_typeshed_file
from mypy.version import __version__ as mypy_version
T = TypeVar("T")
# Show error codes for some note-level messages (these usually appear alone
# and not as a comment for a previous error-level message).
SHOW_NOTE_CODES: Final = {codes.ANNOTATION_UNCHECKED}
# Do not add notes with links to error code docs to errors with these codes.
# We can tweak this set as we get more experience about what is helpful and what is not.
HIDE_LINK_CODES: Final = {
# This is a generic error code, so it has no useful docs
codes.MISC,
# These are trivial and have some custom notes (e.g. for list being invariant)
codes.ASSIGNMENT,
codes.ARG_TYPE,
codes.RETURN_VALUE,
# Undefined name/attribute errors are self-explanatory
codes.ATTR_DEFINED,
codes.NAME_DEFINED,
# Overrides have a custom link to docs
codes.OVERRIDE,
}
allowed_duplicates: Final = ["@overload", "Got:", "Expected:"]
BASE_RTD_URL: Final = "https://mypy.rtfd.io/en/stable/_refs.html#code"
# Keep track of the original error code when the error code of a message is changed.
# This is used to give notes about out-of-date "type: ignore" comments.
original_error_codes: Final = {codes.LITERAL_REQ: codes.MISC, codes.TYPE_ABSTRACT: codes.MISC}
class ErrorInfo:
"""Representation of a single error message."""
# Description of a sequence of imports that refer to the source file
# related to this error. Each item is a (path, line number) tuple.
import_ctx: list[tuple[str, int]]
# The path to source file that was the source of this error.
file = ""
# The fully-qualified id of the source module for this error.
module: str | None = None
# The name of the type in which this error is located at.
type: str | None = "" # Unqualified, may be None
# The name of the function or member in which this error is located at.
function_or_member: str | None = "" # Unqualified, may be None
# The line number related to this error within file.
line = 0 # -1 if unknown
# The column number related to this error with file.
column = 0 # -1 if unknown
# The end line number related to this error within file.
end_line = 0 # -1 if unknown
# The end column number related to this error with file.
end_column = 0 # -1 if unknown
# Either 'error' or 'note'
severity = ""
# The error message.
message = ""
# The error code.
code: ErrorCode | None = None
# If True, we should halt build after the file that generated this error.
blocker = False
# Only report this particular messages once per program.
only_once = False
# Do not remove duplicate copies of this message (ignored if only_once is True).
allow_dups = False
# Actual origin of the error message as tuple (path, line number, end line number)
# If end line number is unknown, use line number.
origin: tuple[str, Iterable[int]]
# Fine-grained incremental target where this was reported
target: str | None = None
# If True, don't show this message in output, but still record the error (needed
# by mypy daemon)
hidden = False
def __init__(
self,
import_ctx: list[tuple[str, int]],
*,
file: str,
module: str | None,
typ: str | None,
function_or_member: str | None,
line: int,
column: int,
end_line: int,
end_column: int,
severity: str,
message: str,
code: ErrorCode | None,
blocker: bool,
only_once: bool,
allow_dups: bool,
origin: tuple[str, Iterable[int]] | None = None,
target: str | None = None,
priority: int = 0,
) -> None:
self.import_ctx = import_ctx
self.file = file
self.module = module
self.type = typ
self.function_or_member = function_or_member
self.line = line
self.column = column
self.end_line = end_line
self.end_column = end_column
self.severity = severity
self.message = message
self.code = code
self.blocker = blocker
self.only_once = only_once
self.allow_dups = allow_dups
self.origin = origin or (file, [line])
self.target = target
self.priority = priority
# Type used internally to represent errors:
# (path, line, column, end_line, end_column, severity, message, allow_dups, code)
ErrorTuple: _TypeAlias = Tuple[
Optional[str], int, int, int, int, str, str, bool, Optional[ErrorCode]
]
class ErrorWatcher:
"""Context manager that can be used to keep track of new errors recorded
around a given operation.
Errors maintain a stack of such watchers. The handler is called starting
at the top of the stack, and is propagated down the stack unless filtered
out by one of the ErrorWatcher instances.
"""
def __init__(
self,
errors: Errors,
*,
filter_errors: bool | Callable[[str, ErrorInfo], bool] = False,
save_filtered_errors: bool = False,
) -> None:
self.errors = errors
self._has_new_errors = False
self._filter = filter_errors
self._filtered: list[ErrorInfo] | None = [] if save_filtered_errors else None
def __enter__(self) -> ErrorWatcher:
self.errors._watchers.append(self)
return self
def __exit__(self, exc_type: object, exc_val: object, exc_tb: object) -> Literal[False]:
last = self.errors._watchers.pop()
assert last == self
return False
def on_error(self, file: str, info: ErrorInfo) -> bool:
"""Handler called when a new error is recorded.
The default implementation just sets the has_new_errors flag
Return True to filter out the error, preventing it from being seen by other
ErrorWatcher further down the stack and from being recorded by Errors
"""
self._has_new_errors = True
if isinstance(self._filter, bool):
should_filter = self._filter
elif callable(self._filter):
should_filter = self._filter(file, info)
else:
raise AssertionError(f"invalid error filter: {type(self._filter)}")
if should_filter and self._filtered is not None:
self._filtered.append(info)
return should_filter
def has_new_errors(self) -> bool:
return self._has_new_errors
def filtered_errors(self) -> list[ErrorInfo]:
assert self._filtered is not None
return self._filtered
class Errors:
"""Container for compile errors.
This class generates and keeps tracks of compile errors and the
current error context (nested imports).
"""
# Map from files to generated error messages. Is an OrderedDict so
# that it can be used to order messages based on the order the
# files were processed.
error_info_map: dict[str, list[ErrorInfo]]
# optimization for legacy codebases with many files with errors
has_blockers: set[str]
# Files that we have reported the errors for
flushed_files: set[str]
# Current error context: nested import context/stack, as a list of (path, line) pairs.
import_ctx: list[tuple[str, int]]
# Path name prefix that is removed from all paths, if set.
ignore_prefix: str | None = None
# Path to current file.
file: str = ""
# Ignore some errors on these lines of each file
# (path -> line -> error-codes)
ignored_lines: dict[str, dict[int, list[str]]]
# Lines that were skipped during semantic analysis e.g. due to ALWAYS_FALSE, MYPY_FALSE,
# or platform/version checks. Those lines would not be type-checked.
skipped_lines: dict[str, set[int]]
# Lines on which an error was actually ignored.
used_ignored_lines: dict[str, dict[int, list[str]]]
# Files where all errors should be ignored.
ignored_files: set[str]
# Collection of reported only_once messages.
only_once_messages: set[str]
# Set to True to show "In function "foo":" messages.
show_error_context: bool = False
# Set to True to show column numbers in error messages.
show_column_numbers: bool = False
# Set to True to show end line and end column in error messages.
# Ths implies `show_column_numbers`.
show_error_end: bool = False
# Set to True to show absolute file paths in error messages.
show_absolute_path: bool = False
# State for keeping track of the current fine-grained incremental mode target.
# (See mypy.server.update for more about targets.)
# Current module id.
target_module: str | None = None
scope: Scope | None = None
# Have we seen an import-related error so far? If yes, we filter out other messages
# in some cases to avoid reporting huge numbers of errors.
seen_import_error = False
_watchers: list[ErrorWatcher] = []
def __init__(
self,
options: Options,
*,
read_source: Callable[[str], list[str] | None] | None = None,
hide_error_codes: bool | None = None,
) -> None:
self.options = options
self.hide_error_codes = (
hide_error_codes if hide_error_codes is not None else options.hide_error_codes
)
# We use fscache to read source code when showing snippets.
self.read_source = read_source
self.initialize()
def initialize(self) -> None:
self.error_info_map = {}
self.flushed_files = set()
self.import_ctx = []
self.function_or_member = [None]
self.ignored_lines = {}
self.skipped_lines = {}
self.used_ignored_lines = defaultdict(lambda: defaultdict(list))
self.ignored_files = set()
self.only_once_messages = set()
self.has_blockers = set()
self.scope = None
self.target_module = None
self.seen_import_error = False
def reset(self) -> None:
self.initialize()
def set_ignore_prefix(self, prefix: str) -> None:
"""Set path prefix that will be removed from all paths."""
prefix = os.path.normpath(prefix)
# Add separator to the end, if not given.
if os.path.basename(prefix) != "":
prefix += os.sep
self.ignore_prefix = prefix
def simplify_path(self, file: str) -> str:
if self.options.show_absolute_path:
return os.path.abspath(file)
else:
file = os.path.normpath(file)
return remove_path_prefix(file, self.ignore_prefix)
def set_file(
self, file: str, module: str | None, options: Options, scope: Scope | None = None
) -> None:
"""Set the path and module id of the current file."""
# The path will be simplified later, in render_messages. That way
# * 'file' is always a key that uniquely identifies a source file
# that mypy read (simplified paths might not be unique); and
# * we only have to simplify in one place, while still supporting
# reporting errors for files other than the one currently being
# processed.
self.file = file
self.target_module = module
self.scope = scope
self.options = options
def set_file_ignored_lines(
self, file: str, ignored_lines: dict[int, list[str]], ignore_all: bool = False
) -> None:
self.ignored_lines[file] = ignored_lines
if ignore_all:
self.ignored_files.add(file)
def set_skipped_lines(self, file: str, skipped_lines: set[int]) -> None:
self.skipped_lines[file] = skipped_lines
def current_target(self) -> str | None:
"""Retrieves the current target from the associated scope.
If there is no associated scope, use the target module."""
if self.scope is not None:
return self.scope.current_target()
return self.target_module
def current_module(self) -> str | None:
return self.target_module
def import_context(self) -> list[tuple[str, int]]:
"""Return a copy of the import context."""
return self.import_ctx.copy()
def set_import_context(self, ctx: list[tuple[str, int]]) -> None:
"""Replace the entire import context with a new value."""
self.import_ctx = ctx.copy()
def report(
self,
line: int,
column: int | None,
message: str,
code: ErrorCode | None = None,
*,
blocker: bool = False,
severity: str = "error",
file: str | None = None,
only_once: bool = False,
allow_dups: bool = False,
origin_span: Iterable[int] | None = None,
offset: int = 0,
end_line: int | None = None,
end_column: int | None = None,
) -> None:
"""Report message at the given line using the current error context.
Args:
line: line number of error
column: column number of error
message: message to report
code: error code (defaults to 'misc'; not shown for notes)
blocker: if True, don't continue analysis after this error
severity: 'error' or 'note'
file: if non-None, override current file as context
only_once: if True, only report this exact message once per build
allow_dups: if True, allow duplicate copies of this message (ignored if only_once)
origin_span: if non-None, override current context as origin
(type: ignores have effect here)
end_line: if non-None, override current context as end
"""
if self.scope:
type = self.scope.current_type_name()
if self.scope.ignored > 0:
type = None # Omit type context if nested function
function = self.scope.current_function_name()
else:
type = None
function = None
if column is None:
column = -1
if end_column is None:
if column == -1:
end_column = -1
else:
end_column = column + 1
if file is None:
file = self.file
if offset:
message = " " * offset + message
if origin_span is None:
origin_span = [line]
if end_line is None:
end_line = line
code = code or (codes.MISC if not blocker else None)
info = ErrorInfo(
import_ctx=self.import_context(),
file=file,
module=self.current_module(),
typ=type,
function_or_member=function,
line=line,
column=column,
end_line=end_line,
end_column=end_column,
severity=severity,
message=message,
code=code,
blocker=blocker,
only_once=only_once,
allow_dups=allow_dups,
origin=(self.file, origin_span),
target=self.current_target(),
)
self.add_error_info(info)
def _add_error_info(self, file: str, info: ErrorInfo) -> None:
assert file not in self.flushed_files
# process the stack of ErrorWatchers before modifying any internal state
# in case we need to filter out the error entirely
if self._filter_error(file, info):
return
if file not in self.error_info_map:
self.error_info_map[file] = []
self.error_info_map[file].append(info)
if info.blocker:
self.has_blockers.add(file)
if info.code in (IMPORT, IMPORT_UNTYPED, IMPORT_NOT_FOUND):
self.seen_import_error = True
def _filter_error(self, file: str, info: ErrorInfo) -> bool:
"""
process ErrorWatcher stack from top to bottom,
stopping early if error needs to be filtered out
"""
i = len(self._watchers)
while i > 0:
i -= 1
w = self._watchers[i]
if w.on_error(file, info):
return True
return False
def add_error_info(self, info: ErrorInfo) -> None:
file, lines = info.origin
# process the stack of ErrorWatchers before modifying any internal state
# in case we need to filter out the error entirely
# NB: we need to do this both here and in _add_error_info, otherwise we
# might incorrectly update the sets of ignored or only_once messages
if self._filter_error(file, info):
return
if not info.blocker: # Blockers cannot be ignored
if file in self.ignored_lines:
# Check each line in this context for "type: ignore" comments.
# line == end_line for most nodes, so we only loop once.
for scope_line in lines:
if self.is_ignored_error(scope_line, info, self.ignored_lines[file]):
# Annotation requests us to ignore all errors on this line.
self.used_ignored_lines[file][scope_line].append(
(info.code or codes.MISC).code
)
return
if file in self.ignored_files:
return
if info.only_once:
if info.message in self.only_once_messages:
return
self.only_once_messages.add(info.message)
if (
self.seen_import_error
and info.code not in (IMPORT, IMPORT_UNTYPED, IMPORT_NOT_FOUND)
and self.has_many_errors()
):
# Missing stubs can easily cause thousands of errors about
# Any types, especially when upgrading to mypy 0.900,
# which no longer bundles third-party library stubs. Avoid
# showing too many errors to make it easier to see
# import-related errors.
info.hidden = True
self.report_hidden_errors(info)
self._add_error_info(file, info)
ignored_codes = self.ignored_lines.get(file, {}).get(info.line, [])
if ignored_codes and info.code:
# Something is ignored on the line, but not this error, so maybe the error
# code is incorrect.
msg = f'Error code "{info.code.code}" not covered by "type: ignore" comment'
if info.code in original_error_codes:
# If there seems to be a "type: ignore" with a stale error
# code, report a more specific note.
old_code = original_error_codes[info.code].code
if old_code in ignored_codes:
msg = (
f'Error code changed to {info.code.code}; "type: ignore" comment '
+ "may be out of date"
)
note = ErrorInfo(
import_ctx=info.import_ctx,
file=info.file,
module=info.module,
typ=info.type,
function_or_member=info.function_or_member,
line=info.line,
column=info.column,
end_line=info.end_line,
end_column=info.end_column,
severity="note",
message=msg,
code=None,
blocker=False,
only_once=False,
allow_dups=False,
)
self._add_error_info(file, note)
if (
self.options.show_error_code_links
and not self.options.hide_error_codes
and info.code is not None
and info.code not in HIDE_LINK_CODES
and info.code.code in mypy_error_codes
):
message = f"See {BASE_RTD_URL}-{info.code.code} for more info"
if message in self.only_once_messages:
return
self.only_once_messages.add(message)
info = ErrorInfo(
import_ctx=info.import_ctx,
file=info.file,
module=info.module,
typ=info.type,
function_or_member=info.function_or_member,
line=info.line,
column=info.column,
end_line=info.end_line,
end_column=info.end_column,
severity="note",
message=message,
code=info.code,
blocker=False,
only_once=True,
allow_dups=False,
priority=20,
)
self._add_error_info(file, info)
def has_many_errors(self) -> bool:
if self.options.many_errors_threshold < 0:
return False
if len(self.error_info_map) >= self.options.many_errors_threshold:
return True
if (
sum(len(errors) for errors in self.error_info_map.values())
>= self.options.many_errors_threshold
):
return True
return False
def report_hidden_errors(self, info: ErrorInfo) -> None:
message = (
"(Skipping most remaining errors due to unresolved imports or missing stubs; "
+ "fix these first)"
)
if message in self.only_once_messages:
return
self.only_once_messages.add(message)
new_info = ErrorInfo(
import_ctx=info.import_ctx,
file=info.file,
module=info.module,
typ=None,
function_or_member=None,
line=info.line,
column=info.column,
end_line=info.end_line,
end_column=info.end_column,
severity="note",
message=message,
code=None,
blocker=False,
only_once=True,
allow_dups=False,
origin=info.origin,
target=info.target,
)
self._add_error_info(info.origin[0], new_info)
def is_ignored_error(self, line: int, info: ErrorInfo, ignores: dict[int, list[str]]) -> bool:
if info.blocker:
# Blocking errors can never be ignored
return False
if info.code and not self.is_error_code_enabled(info.code):
return True
if line not in ignores:
return False
if not ignores[line]:
# Empty list means that we ignore all errors
return True
if info.code and self.is_error_code_enabled(info.code):
return (
info.code.code in ignores[line]
or info.code.sub_code_of is not None
and info.code.sub_code_of.code in ignores[line]
)
return False
def is_error_code_enabled(self, error_code: ErrorCode) -> bool:
if self.options:
current_mod_disabled = self.options.disabled_error_codes
current_mod_enabled = self.options.enabled_error_codes
else:
current_mod_disabled = set()
current_mod_enabled = set()
if error_code in current_mod_disabled:
return False
elif error_code in current_mod_enabled:
return True
elif error_code.sub_code_of is not None and error_code.sub_code_of in current_mod_disabled:
return False
else:
return error_code.default_enabled
def clear_errors_in_targets(self, path: str, targets: set[str]) -> None:
"""Remove errors in specific fine-grained targets within a file."""
if path in self.error_info_map:
new_errors = []
has_blocker = False
for info in self.error_info_map[path]:
if info.target not in targets:
new_errors.append(info)
has_blocker |= info.blocker
elif info.only_once:
self.only_once_messages.remove(info.message)
self.error_info_map[path] = new_errors
if not has_blocker and path in self.has_blockers:
self.has_blockers.remove(path)
def generate_unused_ignore_errors(self, file: str) -> None:
if (
is_typeshed_file(self.options.abs_custom_typeshed_dir if self.options else None, file)
or file in self.ignored_files
):
return
ignored_lines = self.ignored_lines[file]
used_ignored_lines = self.used_ignored_lines[file]
for line, ignored_codes in ignored_lines.items():
if line in self.skipped_lines[file]:
continue
if codes.UNUSED_IGNORE.code in ignored_codes:
continue
used_ignored_codes = used_ignored_lines[line]
unused_ignored_codes = set(ignored_codes) - set(used_ignored_codes)
# `ignore` is used
if not ignored_codes and used_ignored_codes:
continue
# All codes appearing in `ignore[...]` are used
if ignored_codes and not unused_ignored_codes:
continue
# Display detail only when `ignore[...]` specifies more than one error code
unused_codes_message = ""
if len(ignored_codes) > 1 and unused_ignored_codes:
unused_codes_message = f"[{', '.join(sorted(unused_ignored_codes))}]"
message = f'Unused "type: ignore{unused_codes_message}" comment'
for unused in unused_ignored_codes:
narrower = set(used_ignored_codes) & codes.sub_code_map[unused]
if narrower:
message += f", use narrower [{', '.join(narrower)}] instead of [{unused}] code"
# Don't use report since add_error_info will ignore the error!
info = ErrorInfo(
import_ctx=self.import_context(),
file=file,
module=self.current_module(),
typ=None,
function_or_member=None,
line=line,
column=-1,
end_line=line,
end_column=-1,
severity="error",
message=message,
code=codes.UNUSED_IGNORE,
blocker=False,
only_once=False,
allow_dups=False,
)
self._add_error_info(file, info)
def generate_ignore_without_code_errors(
self, file: str, is_warning_unused_ignores: bool
) -> None:
if (
is_typeshed_file(self.options.abs_custom_typeshed_dir if self.options else None, file)
or file in self.ignored_files
):
return
used_ignored_lines = self.used_ignored_lines[file]
# If the whole file is ignored, ignore it.
if used_ignored_lines:
_, used_codes = min(used_ignored_lines.items())
if codes.FILE.code in used_codes:
return
for line, ignored_codes in self.ignored_lines[file].items():
if ignored_codes:
continue
# If the ignore is itself unused and that would be warned about, let
# that error stand alone
if is_warning_unused_ignores and not used_ignored_lines[line]:
continue
codes_hint = ""
ignored_codes = sorted(set(used_ignored_lines[line]))
if ignored_codes:
codes_hint = f' (consider "type: ignore[{", ".join(ignored_codes)}]" instead)'
message = f'"type: ignore" comment without error code{codes_hint}'
# Don't use report since add_error_info will ignore the error!
info = ErrorInfo(
import_ctx=self.import_context(),
file=file,
module=self.current_module(),
typ=None,
function_or_member=None,
line=line,
column=-1,
end_line=line,
end_column=-1,
severity="error",
message=message,
code=codes.IGNORE_WITHOUT_CODE,
blocker=False,
only_once=False,
allow_dups=False,
)
self._add_error_info(file, info)
def num_messages(self) -> int:
"""Return the number of generated messages."""
return sum(len(x) for x in self.error_info_map.values())
def is_errors(self) -> bool:
"""Are there any generated messages?"""
return bool(self.error_info_map)
def is_blockers(self) -> bool:
"""Are the any errors that are blockers?"""
return bool(self.has_blockers)
def blocker_module(self) -> str | None:
"""Return the module with a blocking error, or None if not possible."""
for path in self.has_blockers:
for err in self.error_info_map[path]:
if err.blocker:
return err.module
return None
def is_errors_for_file(self, file: str) -> bool:
"""Are there any errors for the given file?"""
return file in self.error_info_map and file not in self.ignored_files
def prefer_simple_messages(self) -> bool:
"""Should we generate simple/fast error messages?
Return True if errors are not shown to user, i.e. errors are ignored
or they are collected for internal use only.
If True, we should prefer to generate a simple message quickly.
All normal errors should still be reported.
"""
if self.file in self.ignored_files:
# Errors ignored, so no point generating fancy messages
return True
for _watcher in self._watchers:
if _watcher._filter is True and _watcher._filtered is None:
# Errors are filtered
return True
return False
def raise_error(self, use_stdout: bool = True) -> NoReturn:
"""Raise a CompileError with the generated messages.
Render the messages suitable for displaying.
"""
# self.new_messages() will format all messages that haven't already
# been returned from a file_messages() call.
raise CompileError(
self.new_messages(), use_stdout=use_stdout, module_with_blocker=self.blocker_module()
)
def format_messages(
self, error_tuples: list[ErrorTuple], source_lines: list[str] | None
) -> list[str]:
"""Return a string list that represents the error messages.
Use a form suitable for displaying to the user. If self.pretty
is True also append a relevant trimmed source code line (only for
severity 'error').
"""
a: list[str] = []
for (
file,
line,
column,
end_line,
end_column,
severity,
message,
allow_dups,
code,
) in error_tuples:
s = ""
if file is not None:
if self.options.show_column_numbers and line >= 0 and column >= 0:
srcloc = f"{file}:{line}:{1 + column}"
if self.options.show_error_end and end_line >= 0 and end_column >= 0:
srcloc += f":{end_line}:{end_column}"
elif line >= 0:
srcloc = f"{file}:{line}"
else:
srcloc = file
s = f"{srcloc}: {severity}: {message}"
else:
s = message
if (
not self.hide_error_codes
and code
and (severity != "note" or code in SHOW_NOTE_CODES)
):
# If note has an error code, it is related to a previous error. Avoid
# displaying duplicate error codes.
s = f"{s} [{code.code}]"
a.append(s)
if self.options.pretty:
# Add source code fragment and a location marker.
if severity == "error" and source_lines and line > 0:
source_line = source_lines[line - 1]
source_line_expanded = source_line.expandtabs()
if column < 0:
# Something went wrong, take first non-empty column.
column = len(source_line) - len(source_line.lstrip())
# Shifts column after tab expansion
column = len(source_line[:column].expandtabs())
end_column = len(source_line[:end_column].expandtabs())
# Note, currently coloring uses the offset to detect source snippets,
# so these offsets should not be arbitrary.
a.append(" " * DEFAULT_SOURCE_OFFSET + source_line_expanded)
marker = "^"
if end_line == line and end_column > column:
marker = f'^{"~" * (end_column - column - 1)}'
a.append(" " * (DEFAULT_SOURCE_OFFSET + column) + marker)
return a
def file_messages(self, path: str, formatter: ErrorFormatter | None = None) -> list[str]:
"""Return a string list of new error messages from a given file.
Use a form suitable for displaying to the user.
"""
if path not in self.error_info_map:
return []
error_info = self.error_info_map[path]
error_info = [info for info in error_info if not info.hidden]
error_tuples = self.render_messages(self.sort_messages(error_info))
error_tuples = self.remove_duplicates(error_tuples)
if formatter is not None:
errors = create_errors(error_tuples)
return [formatter.report_error(err) for err in errors]
self.flushed_files.add(path)
source_lines = None
if self.options.pretty and self.read_source:
source_lines = self.read_source(path)
return self.format_messages(error_tuples, source_lines)
def new_messages(self) -> list[str]:
"""Return a string list of new error messages.
Use a form suitable for displaying to the user.
Errors from different files are ordered based on the order in which
they first generated an error.
"""
msgs = []
for path in self.error_info_map.keys():
if path not in self.flushed_files:
msgs.extend(self.file_messages(path))
return msgs
def targets(self) -> set[str]:
"""Return a set of all targets that contain errors."""
# TODO: Make sure that either target is always defined or that not being defined
# is okay for fine-grained incremental checking.
return {
info.target for errs in self.error_info_map.values() for info in errs if info.target
}
def render_messages(self, errors: list[ErrorInfo]) -> list[ErrorTuple]:
"""Translate the messages into a sequence of tuples.
Each tuple is of form (path, line, col, severity, message, allow_dups, code).
The rendered sequence includes information about error contexts.
The path item may be None. If the line item is negative, the
line number is not defined for the tuple.
"""
result: list[ErrorTuple] = []
prev_import_context: list[tuple[str, int]] = []
prev_function_or_member: str | None = None
prev_type: str | None = None
for e in errors:
# Report module import context, if different from previous message.
if not self.options.show_error_context:
pass
elif e.import_ctx != prev_import_context:
last = len(e.import_ctx) - 1
i = last
while i >= 0:
path, line = e.import_ctx[i]
fmt = "{}:{}: note: In module imported here"
if i < last:
fmt = "{}:{}: note: ... from here"
if i > 0:
fmt += ","
else:
fmt += ":"
# Remove prefix to ignore from path (if present) to
# simplify path.
path = remove_path_prefix(path, self.ignore_prefix)
result.append(
(None, -1, -1, -1, -1, "note", fmt.format(path, line), e.allow_dups, None)
)
i -= 1
file = self.simplify_path(e.file)
# Report context within a source file.
if not self.options.show_error_context:
pass
elif e.function_or_member != prev_function_or_member or e.type != prev_type:
if e.function_or_member is None:
if e.type is None:
result.append(
(file, -1, -1, -1, -1, "note", "At top level:", e.allow_dups, None)
)
else:
result.append(
(
file,
-1,
-1,
-1,
-1,
"note",
f'In class "{e.type}":',
e.allow_dups,
None,
)
)
else:
if e.type is None:
result.append(
(
file,
-1,
-1,
-1,
-1,
"note",
f'In function "{e.function_or_member}":',
e.allow_dups,
None,
)
)
else:
result.append(
(
file,
-1,
-1,
-1,
-1,
"note",
'In member "{}" of class "{}":'.format(
e.function_or_member, e.type
),
e.allow_dups,
None,
)
)
elif e.type != prev_type:
if e.type is None:
result.append(
(file, -1, -1, -1, -1, "note", "At top level:", e.allow_dups, None)
)
else:
result.append(
(file, -1, -1, -1, -1, "note", f'In class "{e.type}":', e.allow_dups, None)
)
if isinstance(e.message, ErrorMessage):
result.append(
(
file,
e.line,
e.column,
e.end_line,
e.end_column,
e.severity,
e.message.value,
e.allow_dups,
e.code,
)
)
else:
result.append(
(
file,
e.line,
e.column,
e.end_line,
e.end_column,
e.severity,
e.message,
e.allow_dups,
e.code,
)
)
prev_import_context = e.import_ctx
prev_function_or_member = e.function_or_member
prev_type = e.type
return result
def sort_messages(self, errors: list[ErrorInfo]) -> list[ErrorInfo]:
"""Sort an array of error messages locally by line number.
I.e., sort a run of consecutive messages with the same
context by line number, but otherwise retain the general
ordering of the messages.
"""
result: list[ErrorInfo] = []
i = 0
while i < len(errors):
i0 = i
# Find neighbouring errors with the same context and file.
while (
i + 1 < len(errors)
and errors[i + 1].import_ctx == errors[i].import_ctx
and errors[i + 1].file == errors[i].file
):
i += 1
i += 1
# Sort the errors specific to a file according to line number and column.
a = sorted(errors[i0:i], key=lambda x: (x.line, x.column))
a = self.sort_within_context(a)
result.extend(a)
return result
def sort_within_context(self, errors: list[ErrorInfo]) -> list[ErrorInfo]:
"""For the same location decide which messages to show first/last.
Currently, we only compare within the same error code, to decide the
order of various additional notes.
"""
result = []
i = 0
while i < len(errors):
i0 = i
# Find neighbouring errors with the same position and error code.
while (
i + 1 < len(errors)
and errors[i + 1].line == errors[i].line
and errors[i + 1].column == errors[i].column
and errors[i + 1].end_line == errors[i].end_line
and errors[i + 1].end_column == errors[i].end_column
and errors[i + 1].code == errors[i].code
):
i += 1
i += 1
# Sort the messages specific to a given error by priority.
a = sorted(errors[i0:i], key=lambda x: x.priority)
result.extend(a)
return result
def remove_duplicates(self, errors: list[ErrorTuple]) -> list[ErrorTuple]:
"""Remove duplicates from a sorted error list."""
res: list[ErrorTuple] = []
i = 0
while i < len(errors):
dup = False
# Use slightly special formatting for member conflicts reporting.
conflicts_notes = False
j = i - 1
# Find duplicates, unless duplicates are allowed.
if not errors[i][7]:
while j >= 0 and errors[j][0] == errors[i][0]:
if errors[j][6].strip() == "Got:":
conflicts_notes = True
j -= 1
j = i - 1
while j >= 0 and errors[j][0] == errors[i][0] and errors[j][1] == errors[i][1]:
if (
errors[j][5] == errors[i][5]
and
# Allow duplicate notes in overload conflicts reporting.
not (
(errors[i][5] == "note" and errors[i][6].strip() in allowed_duplicates)
or (errors[i][6].strip().startswith("def ") and conflicts_notes)
)
and errors[j][6] == errors[i][6]
): # ignore column
dup = True
break
j -= 1
if not dup:
res.append(errors[i])
i += 1
return res
class CompileError(Exception):
"""Exception raised when there is a compile error.
It can be a parse, semantic analysis, type check or other
compilation-related error.
CompileErrors raised from an errors object carry all of the
messages that have not been reported out by error streaming.
This is patched up by build.build to contain either all error
messages (if errors were streamed) or none (if they were not).
"""
messages: list[str]
use_stdout = False
# Can be set in case there was a module with a blocking error
module_with_blocker: str | None = None
def __init__(
self, messages: list[str], use_stdout: bool = False, module_with_blocker: str | None = None
) -> None:
super().__init__("\n".join(messages))
self.messages = messages
self.use_stdout = use_stdout
self.module_with_blocker = module_with_blocker
def remove_path_prefix(path: str, prefix: str | None) -> str:
"""If path starts with prefix, return copy of path with the prefix removed.
Otherwise, return path. If path is None, return None.
"""
if prefix is not None and path.startswith(prefix):
return path[len(prefix) :]
else:
return path
def report_internal_error(
err: Exception,
file: str | None,
line: int,
errors: Errors,
options: Options,
stdout: TextIO | None = None,
stderr: TextIO | None = None,
) -> NoReturn:
"""Report internal error and exit.
This optionally starts pdb or shows a traceback.
"""
stdout = stdout or sys.stdout
stderr = stderr or sys.stderr
# Dump out errors so far, they often provide a clue.
# But catch unexpected errors rendering them.
try:
for msg in errors.new_messages():
print(msg)
except Exception as e:
print("Failed to dump errors:", repr(e), file=stderr)
# Compute file:line prefix for official-looking error messages.
if file:
if line:
prefix = f"{file}:{line}: "
else:
prefix = f"{file}: "
else:
prefix = ""
# Print "INTERNAL ERROR" message.
print(
f"{prefix}error: INTERNAL ERROR --",
"Please try using mypy master on GitHub:\n"
"https://mypy.readthedocs.io/en/stable/common_issues.html"
"#using-a-development-mypy-build",
file=stderr,
)
if options.show_traceback:
print("Please report a bug at https://github.com/python/mypy/issues", file=stderr)
else:
print(
"If this issue continues with mypy master, "
"please report a bug at https://github.com/python/mypy/issues",
file=stderr,
)
print(f"version: {mypy_version}", file=stderr)
# If requested, drop into pdb. This overrides show_tb.
if options.pdb:
print("Dropping into pdb", file=stderr)
import pdb
pdb.post_mortem(sys.exc_info()[2])
# If requested, print traceback, else print note explaining how to get one.
if options.raise_exceptions:
raise err
if not options.show_traceback:
if not options.pdb:
print(
"{}: note: please use --show-traceback to print a traceback "
"when reporting a bug".format(prefix),
file=stderr,
)
else:
tb = traceback.extract_stack()[:-2]
tb2 = traceback.extract_tb(sys.exc_info()[2])
print("Traceback (most recent call last):")
for s in traceback.format_list(tb + tb2):
print(s.rstrip("\n"))
print(f"{type(err).__name__}: {err}", file=stdout)
print(f"{prefix}: note: use --pdb to drop into pdb", file=stderr)
# Exit. The caller has nothing more to say.
# We use exit code 2 to signal that this is no ordinary error.
raise SystemExit(2)
class MypyError:
def __init__(
self,
file_path: str,
line: int,
column: int,
message: str,
errorcode: ErrorCode | None,
severity: Literal["error", "note"],
) -> None:
self.file_path = file_path
self.line = line
self.column = column
self.message = message
self.errorcode = errorcode
self.severity = severity
self.hints: list[str] = []
# (file_path, line, column)
_ErrorLocation = Tuple[str, int, int]
def create_errors(error_tuples: list[ErrorTuple]) -> list[MypyError]:
errors: list[MypyError] = []
latest_error_at_location: dict[_ErrorLocation, MypyError] = {}
for error_tuple in error_tuples:
file_path, line, column, _, _, severity, message, _, errorcode = error_tuple
if file_path is None:
continue
assert severity in ("error", "note")
if severity == "note":
error_location = (file_path, line, column)
error = latest_error_at_location.get(error_location)
if error is None:
# This is purely a note, with no error correlated to it
error = MypyError(file_path, line, column, message, errorcode, severity="note")
errors.append(error)
continue
error.hints.append(message)
else:
error = MypyError(file_path, line, column, message, errorcode, severity="error")
errors.append(error)
error_location = (file_path, line, column)
latest_error_at_location[error_location] = error
return errors
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/errors.py
|
Python
|
NOASSERTION
| 49,818 |
"""
Evaluate an expression.
Used by stubtest; in a separate file because things break if we don't
put it in a mypyc-compiled file.
"""
import ast
from typing import Final
import mypy.nodes
from mypy.visitor import ExpressionVisitor
UNKNOWN = object()
class _NodeEvaluator(ExpressionVisitor[object]):
def visit_int_expr(self, o: mypy.nodes.IntExpr) -> int:
return o.value
def visit_str_expr(self, o: mypy.nodes.StrExpr) -> str:
return o.value
def visit_bytes_expr(self, o: mypy.nodes.BytesExpr) -> object:
# The value of a BytesExpr is a string created from the repr()
# of the bytes object. Get the original bytes back.
try:
return ast.literal_eval(f"b'{o.value}'")
except SyntaxError:
return ast.literal_eval(f'b"{o.value}"')
def visit_float_expr(self, o: mypy.nodes.FloatExpr) -> float:
return o.value
def visit_complex_expr(self, o: mypy.nodes.ComplexExpr) -> object:
return o.value
def visit_ellipsis(self, o: mypy.nodes.EllipsisExpr) -> object:
return Ellipsis
def visit_star_expr(self, o: mypy.nodes.StarExpr) -> object:
return UNKNOWN
def visit_name_expr(self, o: mypy.nodes.NameExpr) -> object:
if o.name == "True":
return True
elif o.name == "False":
return False
elif o.name == "None":
return None
# TODO: Handle more names by figuring out a way to hook into the
# symbol table.
return UNKNOWN
def visit_member_expr(self, o: mypy.nodes.MemberExpr) -> object:
return UNKNOWN
def visit_yield_from_expr(self, o: mypy.nodes.YieldFromExpr) -> object:
return UNKNOWN
def visit_yield_expr(self, o: mypy.nodes.YieldExpr) -> object:
return UNKNOWN
def visit_call_expr(self, o: mypy.nodes.CallExpr) -> object:
return UNKNOWN
def visit_op_expr(self, o: mypy.nodes.OpExpr) -> object:
return UNKNOWN
def visit_comparison_expr(self, o: mypy.nodes.ComparisonExpr) -> object:
return UNKNOWN
def visit_cast_expr(self, o: mypy.nodes.CastExpr) -> object:
return o.expr.accept(self)
def visit_assert_type_expr(self, o: mypy.nodes.AssertTypeExpr) -> object:
return o.expr.accept(self)
def visit_reveal_expr(self, o: mypy.nodes.RevealExpr) -> object:
return UNKNOWN
def visit_super_expr(self, o: mypy.nodes.SuperExpr) -> object:
return UNKNOWN
def visit_unary_expr(self, o: mypy.nodes.UnaryExpr) -> object:
operand = o.expr.accept(self)
if operand is UNKNOWN:
return UNKNOWN
if o.op == "-":
if isinstance(operand, (int, float, complex)):
return -operand
elif o.op == "+":
if isinstance(operand, (int, float, complex)):
return +operand
elif o.op == "~":
if isinstance(operand, int):
return ~operand
elif o.op == "not":
if isinstance(operand, (bool, int, float, str, bytes)):
return not operand
return UNKNOWN
def visit_assignment_expr(self, o: mypy.nodes.AssignmentExpr) -> object:
return o.value.accept(self)
def visit_list_expr(self, o: mypy.nodes.ListExpr) -> object:
items = [item.accept(self) for item in o.items]
if all(item is not UNKNOWN for item in items):
return items
return UNKNOWN
def visit_dict_expr(self, o: mypy.nodes.DictExpr) -> object:
items = [
(UNKNOWN if key is None else key.accept(self), value.accept(self))
for key, value in o.items
]
if all(key is not UNKNOWN and value is not None for key, value in items):
return dict(items)
return UNKNOWN
def visit_tuple_expr(self, o: mypy.nodes.TupleExpr) -> object:
items = [item.accept(self) for item in o.items]
if all(item is not UNKNOWN for item in items):
return tuple(items)
return UNKNOWN
def visit_set_expr(self, o: mypy.nodes.SetExpr) -> object:
items = [item.accept(self) for item in o.items]
if all(item is not UNKNOWN for item in items):
return set(items)
return UNKNOWN
def visit_index_expr(self, o: mypy.nodes.IndexExpr) -> object:
return UNKNOWN
def visit_type_application(self, o: mypy.nodes.TypeApplication) -> object:
return UNKNOWN
def visit_lambda_expr(self, o: mypy.nodes.LambdaExpr) -> object:
return UNKNOWN
def visit_list_comprehension(self, o: mypy.nodes.ListComprehension) -> object:
return UNKNOWN
def visit_set_comprehension(self, o: mypy.nodes.SetComprehension) -> object:
return UNKNOWN
def visit_dictionary_comprehension(self, o: mypy.nodes.DictionaryComprehension) -> object:
return UNKNOWN
def visit_generator_expr(self, o: mypy.nodes.GeneratorExpr) -> object:
return UNKNOWN
def visit_slice_expr(self, o: mypy.nodes.SliceExpr) -> object:
return UNKNOWN
def visit_conditional_expr(self, o: mypy.nodes.ConditionalExpr) -> object:
return UNKNOWN
def visit_type_var_expr(self, o: mypy.nodes.TypeVarExpr) -> object:
return UNKNOWN
def visit_paramspec_expr(self, o: mypy.nodes.ParamSpecExpr) -> object:
return UNKNOWN
def visit_type_var_tuple_expr(self, o: mypy.nodes.TypeVarTupleExpr) -> object:
return UNKNOWN
def visit_type_alias_expr(self, o: mypy.nodes.TypeAliasExpr) -> object:
return UNKNOWN
def visit_namedtuple_expr(self, o: mypy.nodes.NamedTupleExpr) -> object:
return UNKNOWN
def visit_enum_call_expr(self, o: mypy.nodes.EnumCallExpr) -> object:
return UNKNOWN
def visit_typeddict_expr(self, o: mypy.nodes.TypedDictExpr) -> object:
return UNKNOWN
def visit_newtype_expr(self, o: mypy.nodes.NewTypeExpr) -> object:
return UNKNOWN
def visit__promote_expr(self, o: mypy.nodes.PromoteExpr) -> object:
return UNKNOWN
def visit_await_expr(self, o: mypy.nodes.AwaitExpr) -> object:
return UNKNOWN
def visit_temp_node(self, o: mypy.nodes.TempNode) -> object:
return UNKNOWN
_evaluator: Final = _NodeEvaluator()
def evaluate_expression(expr: mypy.nodes.Expression) -> object:
"""Evaluate an expression at runtime.
Return the result of the expression, or UNKNOWN if the expression cannot be
evaluated.
"""
return expr.accept(_evaluator)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/evalexpr.py
|
Python
|
NOASSERTION
| 6,562 |
from __future__ import annotations
from typing import Final, Iterable, Mapping, Sequence, TypeVar, cast, overload
from mypy.nodes import ARG_STAR, FakeInfo, Var
from mypy.state import state
from mypy.types import (
ANY_STRATEGY,
AnyType,
BoolTypeQuery,
CallableType,
DeletedType,
ErasedType,
FunctionLike,
Instance,
LiteralType,
NoneType,
Overloaded,
Parameters,
ParamSpecFlavor,
ParamSpecType,
PartialType,
ProperType,
TrivialSyntheticTypeTranslator,
TupleType,
Type,
TypeAliasType,
TypedDictType,
TypeOfAny,
TypeType,
TypeVarId,
TypeVarLikeType,
TypeVarTupleType,
TypeVarType,
UnboundType,
UninhabitedType,
UnionType,
UnpackType,
flatten_nested_unions,
get_proper_type,
split_with_prefix_and_suffix,
)
from mypy.typevartuples import split_with_instance
# Solving the import cycle:
import mypy.type_visitor # ruff: isort: skip
# WARNING: these functions should never (directly or indirectly) depend on
# is_subtype(), meet_types(), join_types() etc.
# TODO: add a static dependency test for this.
@overload
def expand_type(typ: CallableType, env: Mapping[TypeVarId, Type]) -> CallableType: ...
@overload
def expand_type(typ: ProperType, env: Mapping[TypeVarId, Type]) -> ProperType: ...
@overload
def expand_type(typ: Type, env: Mapping[TypeVarId, Type]) -> Type: ...
def expand_type(typ: Type, env: Mapping[TypeVarId, Type]) -> Type:
"""Substitute any type variable references in a type given by a type
environment.
"""
return typ.accept(ExpandTypeVisitor(env))
@overload
def expand_type_by_instance(typ: CallableType, instance: Instance) -> CallableType: ...
@overload
def expand_type_by_instance(typ: ProperType, instance: Instance) -> ProperType: ...
@overload
def expand_type_by_instance(typ: Type, instance: Instance) -> Type: ...
def expand_type_by_instance(typ: Type, instance: Instance) -> Type:
"""Substitute type variables in type using values from an Instance.
Type variables are considered to be bound by the class declaration."""
if not instance.args and not instance.type.has_type_var_tuple_type:
return typ
else:
variables: dict[TypeVarId, Type] = {}
if instance.type.has_type_var_tuple_type:
assert instance.type.type_var_tuple_prefix is not None
assert instance.type.type_var_tuple_suffix is not None
args_prefix, args_middle, args_suffix = split_with_instance(instance)
tvars_prefix, tvars_middle, tvars_suffix = split_with_prefix_and_suffix(
tuple(instance.type.defn.type_vars),
instance.type.type_var_tuple_prefix,
instance.type.type_var_tuple_suffix,
)
tvar = tvars_middle[0]
assert isinstance(tvar, TypeVarTupleType)
variables = {tvar.id: TupleType(list(args_middle), tvar.tuple_fallback)}
instance_args = args_prefix + args_suffix
tvars = tvars_prefix + tvars_suffix
else:
tvars = tuple(instance.type.defn.type_vars)
instance_args = instance.args
for binder, arg in zip(tvars, instance_args):
assert isinstance(binder, TypeVarLikeType)
variables[binder.id] = arg
return expand_type(typ, variables)
F = TypeVar("F", bound=FunctionLike)
def freshen_function_type_vars(callee: F) -> F:
"""Substitute fresh type variables for generic function type variables."""
if isinstance(callee, CallableType):
if not callee.is_generic():
return cast(F, callee)
tvs = []
tvmap: dict[TypeVarId, Type] = {}
for v in callee.variables:
tv = v.new_unification_variable(v)
tvs.append(tv)
tvmap[v.id] = tv
fresh = expand_type(callee, tvmap).copy_modified(variables=tvs)
return cast(F, fresh)
else:
assert isinstance(callee, Overloaded)
fresh_overload = Overloaded([freshen_function_type_vars(item) for item in callee.items])
return cast(F, fresh_overload)
class HasGenericCallable(BoolTypeQuery):
def __init__(self) -> None:
super().__init__(ANY_STRATEGY)
def visit_callable_type(self, t: CallableType) -> bool:
return t.is_generic() or super().visit_callable_type(t)
# Share a singleton since this is performance sensitive
has_generic_callable: Final = HasGenericCallable()
T = TypeVar("T", bound=Type)
def freshen_all_functions_type_vars(t: T) -> T:
result: Type
has_generic_callable.reset()
if not t.accept(has_generic_callable):
return t # Fast path to avoid expensive freshening
else:
result = t.accept(FreshenCallableVisitor())
assert isinstance(result, type(t))
return result
class FreshenCallableVisitor(mypy.type_visitor.TypeTranslator):
def visit_callable_type(self, t: CallableType) -> Type:
result = super().visit_callable_type(t)
assert isinstance(result, ProperType) and isinstance(result, CallableType)
return freshen_function_type_vars(result)
def visit_type_alias_type(self, t: TypeAliasType) -> Type:
# Same as for ExpandTypeVisitor
return t.copy_modified(args=[arg.accept(self) for arg in t.args])
class ExpandTypeVisitor(TrivialSyntheticTypeTranslator):
"""Visitor that substitutes type variables with values."""
variables: Mapping[TypeVarId, Type] # TypeVar id -> TypeVar value
def __init__(self, variables: Mapping[TypeVarId, Type]) -> None:
super().__init__()
self.variables = variables
self.recursive_tvar_guard: dict[TypeVarId, Type | None] = {}
def visit_unbound_type(self, t: UnboundType) -> Type:
return t
def visit_any(self, t: AnyType) -> Type:
return t
def visit_none_type(self, t: NoneType) -> Type:
return t
def visit_uninhabited_type(self, t: UninhabitedType) -> Type:
return t
def visit_deleted_type(self, t: DeletedType) -> Type:
return t
def visit_erased_type(self, t: ErasedType) -> Type:
# This may happen during type inference if some function argument
# type is a generic callable, and its erased form will appear in inferred
# constraints, then solver may check subtyping between them, which will trigger
# unify_generic_callables(), this is why we can get here. Another example is
# when inferring type of lambda in generic context, the lambda body contains
# a generic method in generic class.
return t
def visit_instance(self, t: Instance) -> Type:
args = self.expand_types_with_unpack(list(t.args))
if isinstance(t.type, FakeInfo):
# The type checker expands function definitions and bodies
# if they depend on constrained type variables but the body
# might contain a tuple type comment (e.g., # type: (int, float)),
# in which case 't.type' is not yet available.
#
# See: https://github.com/python/mypy/issues/16649
return t.copy_modified(args=args)
if t.type.fullname == "builtins.tuple":
# Normalize Tuple[*Tuple[X, ...], ...] -> Tuple[X, ...]
arg = args[0]
if isinstance(arg, UnpackType):
unpacked = get_proper_type(arg.type)
if isinstance(unpacked, Instance):
assert unpacked.type.fullname == "builtins.tuple"
args = list(unpacked.args)
return t.copy_modified(args=args)
def visit_type_var(self, t: TypeVarType) -> Type:
# Normally upper bounds can't contain other type variables, the only exception is
# special type variable Self`0 <: C[T, S], where C is the class where Self is used.
if t.id.is_self():
t = t.copy_modified(upper_bound=t.upper_bound.accept(self))
repl = self.variables.get(t.id, t)
if isinstance(repl, ProperType) and isinstance(repl, Instance):
# TODO: do we really need to do this?
# If I try to remove this special-casing ~40 tests fail on reveal_type().
return repl.copy_modified(last_known_value=None)
if isinstance(repl, TypeVarType) and repl.has_default():
if (tvar_id := repl.id) in self.recursive_tvar_guard:
return self.recursive_tvar_guard[tvar_id] or repl
self.recursive_tvar_guard[tvar_id] = None
repl = repl.accept(self)
if isinstance(repl, TypeVarType):
repl.default = repl.default.accept(self)
self.recursive_tvar_guard[tvar_id] = repl
return repl
def visit_param_spec(self, t: ParamSpecType) -> Type:
# Set prefix to something empty, so we don't duplicate it below.
repl = self.variables.get(t.id, t.copy_modified(prefix=Parameters([], [], [])))
if isinstance(repl, ParamSpecType):
return repl.copy_modified(
flavor=t.flavor,
prefix=t.prefix.copy_modified(
arg_types=self.expand_types(t.prefix.arg_types) + repl.prefix.arg_types,
arg_kinds=t.prefix.arg_kinds + repl.prefix.arg_kinds,
arg_names=t.prefix.arg_names + repl.prefix.arg_names,
),
)
elif isinstance(repl, Parameters):
assert t.flavor == ParamSpecFlavor.BARE
return Parameters(
self.expand_types(t.prefix.arg_types) + repl.arg_types,
t.prefix.arg_kinds + repl.arg_kinds,
t.prefix.arg_names + repl.arg_names,
variables=[*t.prefix.variables, *repl.variables],
imprecise_arg_kinds=repl.imprecise_arg_kinds,
)
else:
# We could encode Any as trivial parameters etc., but it would be too verbose.
# TODO: assert this is a trivial type, like Any, Never, or object.
return repl
def visit_type_var_tuple(self, t: TypeVarTupleType) -> Type:
# Sometimes solver may need to expand a type variable with (a copy of) itself
# (usually together with other TypeVars, but it is hard to filter out TypeVarTuples).
repl = self.variables.get(t.id, t)
if isinstance(repl, TypeVarTupleType):
return repl
elif isinstance(repl, ProperType) and isinstance(repl, (AnyType, UninhabitedType)):
# Some failed inference scenarios will try to set all type variables to Never.
# Instead of being picky and require all the callers to wrap them,
# do this here instead.
# Note: most cases when this happens are handled in expand unpack below, but
# in rare cases (e.g. ParamSpec containing Unpack star args) it may be skipped.
return t.tuple_fallback.copy_modified(args=[repl])
raise NotImplementedError
def visit_unpack_type(self, t: UnpackType) -> Type:
# It is impossible to reasonably implement visit_unpack_type, because
# unpacking inherently expands to something more like a list of types.
#
# Relevant sections that can call unpack should call expand_unpack()
# instead.
# However, if the item is a variadic tuple, we can simply carry it over.
# In particular, if we expand A[*tuple[T, ...]] with substitutions {T: str},
# it is hard to assert this without getting proper type. Another important
# example is non-normalized types when called from semanal.py.
return UnpackType(t.type.accept(self))
def expand_unpack(self, t: UnpackType) -> list[Type]:
assert isinstance(t.type, TypeVarTupleType)
repl = get_proper_type(self.variables.get(t.type.id, t.type))
if isinstance(repl, UnpackType):
repl = get_proper_type(repl.type)
if isinstance(repl, TupleType):
return repl.items
elif (
isinstance(repl, Instance)
and repl.type.fullname == "builtins.tuple"
or isinstance(repl, TypeVarTupleType)
):
return [UnpackType(typ=repl)]
elif isinstance(repl, (AnyType, UninhabitedType)):
# Replace *Ts = Any with *Ts = *tuple[Any, ...] and same for Never.
# These types may appear here as a result of user error or failed inference.
return [UnpackType(t.type.tuple_fallback.copy_modified(args=[repl]))]
else:
raise RuntimeError(f"Invalid type replacement to expand: {repl}")
def visit_parameters(self, t: Parameters) -> Type:
return t.copy_modified(arg_types=self.expand_types(t.arg_types))
def interpolate_args_for_unpack(self, t: CallableType, var_arg: UnpackType) -> list[Type]:
star_index = t.arg_kinds.index(ARG_STAR)
prefix = self.expand_types(t.arg_types[:star_index])
suffix = self.expand_types(t.arg_types[star_index + 1 :])
var_arg_type = get_proper_type(var_arg.type)
new_unpack: Type
if isinstance(var_arg_type, Instance):
# we have something like Unpack[Tuple[Any, ...]]
new_unpack = UnpackType(var_arg.type.accept(self))
elif isinstance(var_arg_type, TupleType):
# We have something like Unpack[Tuple[Unpack[Ts], X1, X2]]
expanded_tuple = var_arg_type.accept(self)
assert isinstance(expanded_tuple, ProperType) and isinstance(expanded_tuple, TupleType)
expanded_items = expanded_tuple.items
fallback = var_arg_type.partial_fallback
new_unpack = UnpackType(TupleType(expanded_items, fallback))
elif isinstance(var_arg_type, TypeVarTupleType):
# We have plain Unpack[Ts]
fallback = var_arg_type.tuple_fallback
expanded_items = self.expand_unpack(var_arg)
new_unpack = UnpackType(TupleType(expanded_items, fallback))
else:
# We have invalid type in Unpack. This can happen when expanding aliases
# to Callable[[*Invalid], Ret]
new_unpack = AnyType(TypeOfAny.from_error, line=var_arg.line, column=var_arg.column)
return prefix + [new_unpack] + suffix
def visit_callable_type(self, t: CallableType) -> CallableType:
param_spec = t.param_spec()
if param_spec is not None:
repl = self.variables.get(param_spec.id)
# If a ParamSpec in a callable type is substituted with a
# callable type, we can't use normal substitution logic,
# since ParamSpec is actually split into two components
# *P.args and **P.kwargs in the original type. Instead, we
# must expand both of them with all the argument types,
# kinds and names in the replacement. The return type in
# the replacement is ignored.
if isinstance(repl, Parameters):
# We need to expand both the types in the prefix and the ParamSpec itself
expanded = t.copy_modified(
arg_types=self.expand_types(t.arg_types[:-2]) + repl.arg_types,
arg_kinds=t.arg_kinds[:-2] + repl.arg_kinds,
arg_names=t.arg_names[:-2] + repl.arg_names,
ret_type=t.ret_type.accept(self),
type_guard=(t.type_guard.accept(self) if t.type_guard is not None else None),
type_is=(t.type_is.accept(self) if t.type_is is not None else None),
imprecise_arg_kinds=(t.imprecise_arg_kinds or repl.imprecise_arg_kinds),
variables=[*repl.variables, *t.variables],
)
var_arg = expanded.var_arg()
if var_arg is not None and isinstance(var_arg.typ, UnpackType):
# Sometimes we get new unpacks after expanding ParamSpec.
expanded.normalize_trivial_unpack()
return expanded
elif isinstance(repl, ParamSpecType):
# We're substituting one ParamSpec for another; this can mean that the prefix
# changes, e.g. substitute Concatenate[int, P] in place of Q.
prefix = repl.prefix
clean_repl = repl.copy_modified(prefix=Parameters([], [], []))
return t.copy_modified(
arg_types=self.expand_types(t.arg_types[:-2])
+ prefix.arg_types
+ [
clean_repl.with_flavor(ParamSpecFlavor.ARGS),
clean_repl.with_flavor(ParamSpecFlavor.KWARGS),
],
arg_kinds=t.arg_kinds[:-2] + prefix.arg_kinds + t.arg_kinds[-2:],
arg_names=t.arg_names[:-2] + prefix.arg_names + t.arg_names[-2:],
ret_type=t.ret_type.accept(self),
from_concatenate=t.from_concatenate or bool(repl.prefix.arg_types),
imprecise_arg_kinds=(t.imprecise_arg_kinds or prefix.imprecise_arg_kinds),
)
var_arg = t.var_arg()
needs_normalization = False
if var_arg is not None and isinstance(var_arg.typ, UnpackType):
needs_normalization = True
arg_types = self.interpolate_args_for_unpack(t, var_arg.typ)
else:
arg_types = self.expand_types(t.arg_types)
expanded = t.copy_modified(
arg_types=arg_types,
ret_type=t.ret_type.accept(self),
type_guard=(t.type_guard.accept(self) if t.type_guard is not None else None),
type_is=(t.type_is.accept(self) if t.type_is is not None else None),
)
if needs_normalization:
return expanded.with_normalized_var_args()
return expanded
def visit_overloaded(self, t: Overloaded) -> Type:
items: list[CallableType] = []
for item in t.items:
new_item = item.accept(self)
assert isinstance(new_item, ProperType)
assert isinstance(new_item, CallableType)
items.append(new_item)
return Overloaded(items)
def expand_types_with_unpack(self, typs: Sequence[Type]) -> list[Type]:
"""Expands a list of types that has an unpack."""
items: list[Type] = []
for item in typs:
if isinstance(item, UnpackType) and isinstance(item.type, TypeVarTupleType):
items.extend(self.expand_unpack(item))
else:
items.append(item.accept(self))
return items
def visit_tuple_type(self, t: TupleType) -> Type:
items = self.expand_types_with_unpack(t.items)
if len(items) == 1:
# Normalize Tuple[*Tuple[X, ...]] -> Tuple[X, ...]
item = items[0]
if isinstance(item, UnpackType):
unpacked = get_proper_type(item.type)
if isinstance(unpacked, Instance):
assert unpacked.type.fullname == "builtins.tuple"
if t.partial_fallback.type.fullname != "builtins.tuple":
# If it is a subtype (like named tuple) we need to preserve it,
# this essentially mimics the logic in tuple_fallback().
return t.partial_fallback.accept(self)
return unpacked
fallback = t.partial_fallback.accept(self)
assert isinstance(fallback, ProperType) and isinstance(fallback, Instance)
return t.copy_modified(items=items, fallback=fallback)
def visit_typeddict_type(self, t: TypedDictType) -> Type:
if cached := self.get_cached(t):
return cached
fallback = t.fallback.accept(self)
assert isinstance(fallback, ProperType) and isinstance(fallback, Instance)
result = t.copy_modified(item_types=self.expand_types(t.items.values()), fallback=fallback)
self.set_cached(t, result)
return result
def visit_literal_type(self, t: LiteralType) -> Type:
# TODO: Verify this implementation is correct
return t
def visit_union_type(self, t: UnionType) -> Type:
# Use cache to avoid O(n**2) or worse expansion of types during translation
# (only for large unions, since caching adds overhead)
use_cache = len(t.items) > 3
if use_cache and (cached := self.get_cached(t)):
return cached
expanded = self.expand_types(t.items)
# After substituting for type variables in t.items, some resulting types
# might be subtypes of others, however calling make_simplified_union()
# can cause recursion, so we just remove strict duplicates.
simplified = UnionType.make_union(
remove_trivial(flatten_nested_unions(expanded)), t.line, t.column
)
# This call to get_proper_type() is unfortunate but is required to preserve
# the invariant that ProperType will stay ProperType after applying expand_type(),
# otherwise a single item union of a type alias will break it. Note this should not
# cause infinite recursion since pathological aliases like A = Union[A, B] are
# banned at the semantic analysis level.
result = get_proper_type(simplified)
if use_cache:
self.set_cached(t, result)
return result
def visit_partial_type(self, t: PartialType) -> Type:
return t
def visit_type_type(self, t: TypeType) -> Type:
# TODO: Verify that the new item type is valid (instance or
# union of instances or Any). Sadly we can't report errors
# here yet.
item = t.item.accept(self)
return TypeType.make_normalized(item)
def visit_type_alias_type(self, t: TypeAliasType) -> Type:
# Target of the type alias cannot contain type variables (not bound by the type
# alias itself), so we just expand the arguments.
args = self.expand_types_with_unpack(t.args)
# TODO: normalize if target is Tuple, and args are [*tuple[X, ...]]?
return t.copy_modified(args=args)
def expand_types(self, types: Iterable[Type]) -> list[Type]:
a: list[Type] = []
for t in types:
a.append(t.accept(self))
return a
@overload
def expand_self_type(var: Var, typ: ProperType, replacement: ProperType) -> ProperType: ...
@overload
def expand_self_type(var: Var, typ: Type, replacement: Type) -> Type: ...
def expand_self_type(var: Var, typ: Type, replacement: Type) -> Type:
"""Expand appearances of Self type in a variable type."""
if var.info.self_type is not None and not var.is_property:
return expand_type(typ, {var.info.self_type.id: replacement})
return typ
def remove_trivial(types: Iterable[Type]) -> list[Type]:
"""Make trivial simplifications on a list of types without calling is_subtype().
This makes following simplifications:
* Remove bottom types (taking into account strict optional setting)
* Remove everything else if there is an `object`
* Remove strict duplicate types
"""
removed_none = False
new_types = []
all_types = set()
for t in types:
p_t = get_proper_type(t)
if isinstance(p_t, UninhabitedType):
continue
if isinstance(p_t, NoneType) and not state.strict_optional:
removed_none = True
continue
if isinstance(p_t, Instance) and p_t.type.fullname == "builtins.object":
return [p_t]
if p_t not in all_types:
new_types.append(t)
all_types.add(p_t)
if new_types:
return new_types
if removed_none:
return [NoneType()]
return [UninhabitedType()]
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/expandtype.py
|
Python
|
NOASSERTION
| 23,834 |
"""Translate an Expression to a Type value."""
from __future__ import annotations
from typing import Callable
from mypy.fastparse import parse_type_string
from mypy.nodes import (
MISSING_FALLBACK,
BytesExpr,
CallExpr,
ComplexExpr,
Context,
DictExpr,
EllipsisExpr,
Expression,
FloatExpr,
IndexExpr,
IntExpr,
ListExpr,
MemberExpr,
NameExpr,
OpExpr,
RefExpr,
StarExpr,
StrExpr,
SymbolTableNode,
TupleExpr,
UnaryExpr,
get_member_expr_fullname,
)
from mypy.options import Options
from mypy.types import (
ANNOTATED_TYPE_NAMES,
AnyType,
CallableArgument,
EllipsisType,
Instance,
ProperType,
RawExpressionType,
Type,
TypedDictType,
TypeList,
TypeOfAny,
UnboundType,
UnionType,
UnpackType,
)
class TypeTranslationError(Exception):
"""Exception raised when an expression is not valid as a type."""
def _extract_argument_name(expr: Expression) -> str | None:
if isinstance(expr, NameExpr) and expr.name == "None":
return None
elif isinstance(expr, StrExpr):
return expr.value
else:
raise TypeTranslationError()
def expr_to_unanalyzed_type(
expr: Expression,
options: Options,
allow_new_syntax: bool = False,
_parent: Expression | None = None,
allow_unpack: bool = False,
lookup_qualified: Callable[[str, Context], SymbolTableNode | None] | None = None,
) -> ProperType:
"""Translate an expression to the corresponding type.
The result is not semantically analyzed. It can be UnboundType or TypeList.
Raise TypeTranslationError if the expression cannot represent a type.
If lookup_qualified is not provided, the expression is expected to be semantically
analyzed.
If allow_new_syntax is True, allow all type syntax independent of the target
Python version (used in stubs).
# TODO: a lot of code here is duplicated in fastparse.py, refactor this.
"""
# The `parent` parameter is used in recursive calls to provide context for
# understanding whether an CallableArgument is ok.
name: str | None = None
if isinstance(expr, NameExpr):
name = expr.name
if name == "True":
return RawExpressionType(True, "builtins.bool", line=expr.line, column=expr.column)
elif name == "False":
return RawExpressionType(False, "builtins.bool", line=expr.line, column=expr.column)
else:
return UnboundType(name, line=expr.line, column=expr.column)
elif isinstance(expr, MemberExpr):
fullname = get_member_expr_fullname(expr)
if fullname:
return UnboundType(fullname, line=expr.line, column=expr.column)
else:
raise TypeTranslationError()
elif isinstance(expr, IndexExpr):
base = expr_to_unanalyzed_type(expr.base, options, allow_new_syntax, expr)
if isinstance(base, UnboundType):
if base.args:
raise TypeTranslationError()
if isinstance(expr.index, TupleExpr):
args = expr.index.items
else:
args = [expr.index]
if isinstance(expr.base, RefExpr):
# Check if the type is Annotated[...]. For this we need the fullname,
# which must be looked up if the expression hasn't been semantically analyzed.
base_fullname = None
if lookup_qualified is not None:
sym = lookup_qualified(base.name, expr)
if sym and sym.node:
base_fullname = sym.node.fullname
else:
base_fullname = expr.base.fullname
if base_fullname is not None and base_fullname in ANNOTATED_TYPE_NAMES:
# TODO: this is not the optimal solution as we are basically getting rid
# of the Annotation definition and only returning the type information,
# losing all the annotations.
return expr_to_unanalyzed_type(args[0], options, allow_new_syntax, expr)
base.args = tuple(
expr_to_unanalyzed_type(arg, options, allow_new_syntax, expr, allow_unpack=True)
for arg in args
)
if not base.args:
base.empty_tuple_index = True
return base
else:
raise TypeTranslationError()
elif (
isinstance(expr, OpExpr)
and expr.op == "|"
and ((options.python_version >= (3, 10)) or allow_new_syntax)
):
return UnionType(
[
expr_to_unanalyzed_type(expr.left, options, allow_new_syntax),
expr_to_unanalyzed_type(expr.right, options, allow_new_syntax),
],
uses_pep604_syntax=True,
)
elif isinstance(expr, CallExpr) and isinstance(_parent, ListExpr):
c = expr.callee
names = []
# Go through the dotted member expr chain to get the full arg
# constructor name to look up
while True:
if isinstance(c, NameExpr):
names.append(c.name)
break
elif isinstance(c, MemberExpr):
names.append(c.name)
c = c.expr
else:
raise TypeTranslationError()
arg_const = ".".join(reversed(names))
# Go through the constructor args to get its name and type.
name = None
default_type = AnyType(TypeOfAny.unannotated)
typ: Type = default_type
for i, arg in enumerate(expr.args):
if expr.arg_names[i] is not None:
if expr.arg_names[i] == "name":
if name is not None:
# Two names
raise TypeTranslationError()
name = _extract_argument_name(arg)
continue
elif expr.arg_names[i] == "type":
if typ is not default_type:
# Two types
raise TypeTranslationError()
typ = expr_to_unanalyzed_type(arg, options, allow_new_syntax, expr)
continue
else:
raise TypeTranslationError()
elif i == 0:
typ = expr_to_unanalyzed_type(arg, options, allow_new_syntax, expr)
elif i == 1:
name = _extract_argument_name(arg)
else:
raise TypeTranslationError()
return CallableArgument(typ, name, arg_const, expr.line, expr.column)
elif isinstance(expr, ListExpr):
return TypeList(
[
expr_to_unanalyzed_type(t, options, allow_new_syntax, expr, allow_unpack=True)
for t in expr.items
],
line=expr.line,
column=expr.column,
)
elif isinstance(expr, StrExpr):
return parse_type_string(expr.value, "builtins.str", expr.line, expr.column)
elif isinstance(expr, BytesExpr):
return parse_type_string(expr.value, "builtins.bytes", expr.line, expr.column)
elif isinstance(expr, UnaryExpr):
typ = expr_to_unanalyzed_type(expr.expr, options, allow_new_syntax)
if isinstance(typ, RawExpressionType):
if isinstance(typ.literal_value, int):
if expr.op == "-":
typ.literal_value *= -1
return typ
elif expr.op == "+":
return typ
raise TypeTranslationError()
elif isinstance(expr, IntExpr):
return RawExpressionType(expr.value, "builtins.int", line=expr.line, column=expr.column)
elif isinstance(expr, FloatExpr):
# Floats are not valid parameters for RawExpressionType , so we just
# pass in 'None' for now. We'll report the appropriate error at a later stage.
return RawExpressionType(None, "builtins.float", line=expr.line, column=expr.column)
elif isinstance(expr, ComplexExpr):
# Same thing as above with complex numbers.
return RawExpressionType(None, "builtins.complex", line=expr.line, column=expr.column)
elif isinstance(expr, EllipsisExpr):
return EllipsisType(expr.line)
elif allow_unpack and isinstance(expr, StarExpr):
return UnpackType(
expr_to_unanalyzed_type(expr.expr, options, allow_new_syntax), from_star_syntax=True
)
elif isinstance(expr, DictExpr):
if not expr.items:
raise TypeTranslationError()
items: dict[str, Type] = {}
extra_items_from = []
for item_name, value in expr.items:
if not isinstance(item_name, StrExpr):
if item_name is None:
extra_items_from.append(
expr_to_unanalyzed_type(value, options, allow_new_syntax, expr)
)
continue
raise TypeTranslationError()
items[item_name.value] = expr_to_unanalyzed_type(
value, options, allow_new_syntax, expr
)
result = TypedDictType(
items, set(), set(), Instance(MISSING_FALLBACK, ()), expr.line, expr.column
)
result.extra_items_from = extra_items_from
return result
else:
raise TypeTranslationError()
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/exprtotype.py
|
Python
|
NOASSERTION
| 9,431 |
from __future__ import annotations
import copy
import re
import sys
import warnings
from typing import Any, Callable, Final, List, Optional, Sequence, TypeVar, Union, cast
from typing_extensions import Literal, overload
from mypy import defaults, errorcodes as codes, message_registry
from mypy.errors import Errors
from mypy.message_registry import ErrorMessage
from mypy.nodes import (
ARG_NAMED,
ARG_NAMED_OPT,
ARG_OPT,
ARG_POS,
ARG_STAR,
ARG_STAR2,
MISSING_FALLBACK,
PARAM_SPEC_KIND,
TYPE_VAR_KIND,
TYPE_VAR_TUPLE_KIND,
ArgKind,
Argument,
AssertStmt,
AssignmentExpr,
AssignmentStmt,
AwaitExpr,
Block,
BreakStmt,
BytesExpr,
CallExpr,
ClassDef,
ComparisonExpr,
ComplexExpr,
ConditionalExpr,
ContinueStmt,
Decorator,
DelStmt,
DictExpr,
DictionaryComprehension,
EllipsisExpr,
Expression,
ExpressionStmt,
FloatExpr,
ForStmt,
FuncDef,
GeneratorExpr,
GlobalDecl,
IfStmt,
Import,
ImportAll,
ImportBase,
ImportFrom,
IndexExpr,
IntExpr,
LambdaExpr,
ListComprehension,
ListExpr,
MatchStmt,
MemberExpr,
MypyFile,
NameExpr,
Node,
NonlocalDecl,
OperatorAssignmentStmt,
OpExpr,
OverloadedFuncDef,
OverloadPart,
PassStmt,
RaiseStmt,
RefExpr,
ReturnStmt,
SetComprehension,
SetExpr,
SliceExpr,
StarExpr,
Statement,
StrExpr,
SuperExpr,
TempNode,
TryStmt,
TupleExpr,
TypeAliasStmt,
TypeParam,
UnaryExpr,
Var,
WhileStmt,
WithStmt,
YieldExpr,
YieldFromExpr,
check_arg_names,
)
from mypy.options import Options
from mypy.patterns import (
AsPattern,
ClassPattern,
MappingPattern,
OrPattern,
SequencePattern,
SingletonPattern,
StarredPattern,
ValuePattern,
)
from mypy.reachability import infer_reachability_of_if_statement, mark_block_unreachable
from mypy.sharedparse import argument_elide_name, special_function_elide_names
from mypy.traverser import TraverserVisitor
from mypy.types import (
AnyType,
CallableArgument,
CallableType,
EllipsisType,
Instance,
ProperType,
RawExpressionType,
TupleType,
Type,
TypedDictType,
TypeList,
TypeOfAny,
UnboundType,
UnionType,
UnpackType,
)
from mypy.util import bytes_to_human_readable_repr, unnamed_function
# pull this into a final variable to make mypyc be quiet about the
# the default argument warning
PY_MINOR_VERSION: Final = sys.version_info[1]
import ast as ast3
# TODO: Index, ExtSlice are deprecated in 3.9.
from ast import AST, Attribute, Call, FunctionType, Index, Name, Starred, UAdd, UnaryOp, USub
def ast3_parse(
source: str | bytes, filename: str, mode: str, feature_version: int = PY_MINOR_VERSION
) -> AST:
return ast3.parse(
source,
filename,
mode,
type_comments=True, # This works the magic
feature_version=feature_version,
)
NamedExpr = ast3.NamedExpr
Constant = ast3.Constant
if sys.version_info >= (3, 10):
Match = ast3.Match
MatchValue = ast3.MatchValue
MatchSingleton = ast3.MatchSingleton
MatchSequence = ast3.MatchSequence
MatchStar = ast3.MatchStar
MatchMapping = ast3.MatchMapping
MatchClass = ast3.MatchClass
MatchAs = ast3.MatchAs
MatchOr = ast3.MatchOr
AstNode = Union[ast3.expr, ast3.stmt, ast3.pattern, ast3.ExceptHandler]
else:
Match = Any
MatchValue = Any
MatchSingleton = Any
MatchSequence = Any
MatchStar = Any
MatchMapping = Any
MatchClass = Any
MatchAs = Any
MatchOr = Any
AstNode = Union[ast3.expr, ast3.stmt, ast3.ExceptHandler]
if sys.version_info >= (3, 11):
TryStar = ast3.TryStar
else:
TryStar = Any
if sys.version_info >= (3, 12):
ast_TypeAlias = ast3.TypeAlias
ast_ParamSpec = ast3.ParamSpec
ast_TypeVar = ast3.TypeVar
ast_TypeVarTuple = ast3.TypeVarTuple
else:
ast_TypeAlias = Any
ast_ParamSpec = Any
ast_TypeVar = Any
ast_TypeVarTuple = Any
N = TypeVar("N", bound=Node)
# There is no way to create reasonable fallbacks at this stage,
# they must be patched later.
_dummy_fallback: Final = Instance(MISSING_FALLBACK, [], -1)
TYPE_IGNORE_PATTERN: Final = re.compile(r"[^#]*#\s*type:\s*ignore\s*(.*)")
def parse(
source: str | bytes,
fnam: str,
module: str | None,
errors: Errors,
options: Options | None = None,
) -> MypyFile:
"""Parse a source file, without doing any semantic analysis.
Return the parse tree. If errors is not provided, raise ParseError
on failure. Otherwise, use the errors object to report parse errors.
"""
ignore_errors = (options is not None and options.ignore_errors) or (
fnam in errors.ignored_files
)
# If errors are ignored, we can drop many function bodies to speed up type checking.
strip_function_bodies = ignore_errors and (options is None or not options.preserve_asts)
if options is None:
options = Options()
errors.set_file(fnam, module, options=options)
is_stub_file = fnam.endswith(".pyi")
if is_stub_file:
feature_version = defaults.PYTHON3_VERSION[1]
if options.python_version[0] == 3 and options.python_version[1] > feature_version:
feature_version = options.python_version[1]
else:
assert options.python_version[0] >= 3
feature_version = options.python_version[1]
try:
# Disable deprecation warnings about \u
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
ast = ast3_parse(source, fnam, "exec", feature_version=feature_version)
tree = ASTConverter(
options=options,
is_stub=is_stub_file,
errors=errors,
strip_function_bodies=strip_function_bodies,
path=fnam,
).visit(ast)
except SyntaxError as e:
# alias to please mypyc
is_py38_or_earlier = sys.version_info < (3, 9)
if is_py38_or_earlier and e.filename == "<fstring>":
# In Python 3.8 and earlier, syntax errors in f-strings have lineno relative to the
# start of the f-string. This would be misleading, as mypy will report the error as the
# lineno within the file.
e.lineno = None
message = e.msg
if feature_version > sys.version_info.minor and message.startswith("invalid syntax"):
python_version_str = f"{options.python_version[0]}.{options.python_version[1]}"
message += f"; you likely need to run mypy using Python {python_version_str} or newer"
errors.report(
e.lineno if e.lineno is not None else -1,
e.offset,
message,
blocker=True,
code=codes.SYNTAX,
)
tree = MypyFile([], [], False, {})
assert isinstance(tree, MypyFile)
return tree
def parse_type_ignore_tag(tag: str | None) -> list[str] | None:
"""Parse optional "[code, ...]" tag after "# type: ignore".
Return:
* [] if no tag was found (ignore all errors)
* list of ignored error codes if a tag was found
* None if the tag was invalid.
"""
if not tag or tag.strip() == "" or tag.strip().startswith("#"):
# No tag -- ignore all errors.
return []
m = re.match(r"\s*\[([^]#]*)\]\s*(#.*)?$", tag)
if m is None:
# Invalid "# type: ignore" comment.
return None
return [code.strip() for code in m.group(1).split(",")]
def parse_type_comment(
type_comment: str, line: int, column: int, errors: Errors | None
) -> tuple[list[str] | None, ProperType | None]:
"""Parse type portion of a type comment (+ optional type ignore).
Return (ignore info, parsed type).
"""
try:
typ = ast3_parse(type_comment, "<type_comment>", "eval")
except SyntaxError:
if errors is not None:
stripped_type = type_comment.split("#", 2)[0].strip()
err_msg = message_registry.TYPE_COMMENT_SYNTAX_ERROR_VALUE.format(stripped_type)
errors.report(line, column, err_msg.value, blocker=True, code=err_msg.code)
return None, None
else:
raise
else:
extra_ignore = TYPE_IGNORE_PATTERN.match(type_comment)
if extra_ignore:
tag: str | None = extra_ignore.group(1)
ignored: list[str] | None = parse_type_ignore_tag(tag)
if ignored is None:
if errors is not None:
errors.report(
line, column, message_registry.INVALID_TYPE_IGNORE.value, code=codes.SYNTAX
)
else:
raise SyntaxError
else:
ignored = None
assert isinstance(typ, ast3.Expression)
converted = TypeConverter(
errors, line=line, override_column=column, is_evaluated=False
).visit(typ.body)
return ignored, converted
def parse_type_string(
expr_string: str, expr_fallback_name: str, line: int, column: int
) -> ProperType:
"""Parses a type that was originally present inside of an explicit string.
For example, suppose we have the type `Foo["blah"]`. We should parse the
string expression "blah" using this function.
"""
try:
_, node = parse_type_comment(f"({expr_string})", line=line, column=column, errors=None)
if isinstance(node, (UnboundType, UnionType)) and node.original_str_expr is None:
node.original_str_expr = expr_string
node.original_str_fallback = expr_fallback_name
return node
else:
return RawExpressionType(expr_string, expr_fallback_name, line, column)
except (SyntaxError, ValueError):
# Note: the parser will raise a `ValueError` instead of a SyntaxError if
# the string happens to contain things like \x00.
return RawExpressionType(expr_string, expr_fallback_name, line, column)
def is_no_type_check_decorator(expr: ast3.expr) -> bool:
if isinstance(expr, Name):
return expr.id == "no_type_check"
elif isinstance(expr, Attribute):
if isinstance(expr.value, Name):
return expr.value.id == "typing" and expr.attr == "no_type_check"
return False
def find_disallowed_expression_in_annotation_scope(expr: ast3.expr | None) -> ast3.expr | None:
if expr is None:
return None
for node in ast3.walk(expr):
if isinstance(node, (ast3.Yield, ast3.YieldFrom, ast3.NamedExpr, ast3.Await)):
return node
return None
class ASTConverter:
def __init__(
self,
options: Options,
is_stub: bool,
errors: Errors,
*,
strip_function_bodies: bool,
path: str,
) -> None:
# 'C' for class, 'D' for function signature, 'F' for function, 'L' for lambda
self.class_and_function_stack: list[Literal["C", "D", "F", "L"]] = []
self.imports: list[ImportBase] = []
self.options = options
self.is_stub = is_stub
self.errors = errors
self.strip_function_bodies = strip_function_bodies
self.path = path
self.type_ignores: dict[int, list[str]] = {}
# Cache of visit_X methods keyed by type of visited object
self.visitor_cache: dict[type, Callable[[AST | None], Any]] = {}
def note(self, msg: str, line: int, column: int) -> None:
self.errors.report(line, column, msg, severity="note", code=codes.SYNTAX)
def fail(self, msg: ErrorMessage, line: int, column: int, blocker: bool = True) -> None:
if blocker or not self.options.ignore_errors:
# Make sure self.errors reflects any type ignores that we have parsed
self.errors.set_file_ignored_lines(
self.path, self.type_ignores, self.options.ignore_errors
)
self.errors.report(line, column, msg.value, blocker=blocker, code=msg.code)
def fail_merge_overload(self, node: IfStmt) -> None:
self.fail(
message_registry.FAILED_TO_MERGE_OVERLOADS,
line=node.line,
column=node.column,
blocker=False,
)
def visit(self, node: AST | None) -> Any:
if node is None:
return None
typeobj = type(node)
visitor = self.visitor_cache.get(typeobj)
if visitor is None:
method = "visit_" + node.__class__.__name__
visitor = getattr(self, method)
self.visitor_cache[typeobj] = visitor
return visitor(node)
def set_line(self, node: N, n: AstNode) -> N:
node.line = n.lineno
node.column = n.col_offset
node.end_line = getattr(n, "end_lineno", None)
node.end_column = getattr(n, "end_col_offset", None)
return node
def translate_opt_expr_list(self, l: Sequence[AST | None]) -> list[Expression | None]:
res: list[Expression | None] = []
for e in l:
exp = self.visit(e)
res.append(exp)
return res
def translate_expr_list(self, l: Sequence[AST]) -> list[Expression]:
return cast(List[Expression], self.translate_opt_expr_list(l))
def get_lineno(self, node: ast3.expr | ast3.stmt) -> int:
if (
isinstance(node, (ast3.AsyncFunctionDef, ast3.ClassDef, ast3.FunctionDef))
and node.decorator_list
):
return node.decorator_list[0].lineno
return node.lineno
def translate_stmt_list(
self,
stmts: Sequence[ast3.stmt],
*,
ismodule: bool = False,
can_strip: bool = False,
is_coroutine: bool = False,
) -> list[Statement]:
# A "# type: ignore" comment before the first statement of a module
# ignores the whole module:
if (
ismodule
and stmts
and self.type_ignores
and min(self.type_ignores) < self.get_lineno(stmts[0])
):
ignores = self.type_ignores[min(self.type_ignores)]
if ignores:
joined_ignores = ", ".join(ignores)
self.fail(
message_registry.TYPE_IGNORE_WITH_ERRCODE_ON_MODULE.format(joined_ignores),
line=min(self.type_ignores),
column=0,
blocker=False,
)
self.errors.used_ignored_lines[self.errors.file][min(self.type_ignores)].append(
codes.FILE.code
)
block = Block(self.fix_function_overloads(self.translate_stmt_list(stmts)))
self.set_block_lines(block, stmts)
mark_block_unreachable(block)
return [block]
stack = self.class_and_function_stack
# Fast case for stripping function bodies
if (
can_strip
and self.strip_function_bodies
and len(stack) == 1
and stack[0] == "F"
and not is_coroutine
):
return []
res: list[Statement] = []
for stmt in stmts:
node = self.visit(stmt)
res.append(node)
# Slow case for stripping function bodies
if can_strip and self.strip_function_bodies:
if stack[-2:] == ["C", "F"]:
if is_possible_trivial_body(res):
can_strip = False
else:
# We only strip method bodies if they don't assign to an attribute, as
# this may define an attribute which has an externally visible effect.
visitor = FindAttributeAssign()
for s in res:
s.accept(visitor)
if visitor.found:
can_strip = False
break
if can_strip and stack[-1] == "F" and is_coroutine:
# Yields inside an async function affect the return type and should not
# be stripped.
yield_visitor = FindYield()
for s in res:
s.accept(yield_visitor)
if yield_visitor.found:
can_strip = False
break
if can_strip:
return []
return res
def translate_type_comment(
self, n: ast3.stmt | ast3.arg, type_comment: str | None
) -> ProperType | None:
if type_comment is None:
return None
else:
lineno = n.lineno
extra_ignore, typ = parse_type_comment(type_comment, lineno, n.col_offset, self.errors)
if extra_ignore is not None:
self.type_ignores[lineno] = extra_ignore
return typ
op_map: Final[dict[type[AST], str]] = {
ast3.Add: "+",
ast3.Sub: "-",
ast3.Mult: "*",
ast3.MatMult: "@",
ast3.Div: "/",
ast3.Mod: "%",
ast3.Pow: "**",
ast3.LShift: "<<",
ast3.RShift: ">>",
ast3.BitOr: "|",
ast3.BitXor: "^",
ast3.BitAnd: "&",
ast3.FloorDiv: "//",
}
def from_operator(self, op: ast3.operator) -> str:
op_name = ASTConverter.op_map.get(type(op))
if op_name is None:
raise RuntimeError("Unknown operator " + str(type(op)))
else:
return op_name
comp_op_map: Final[dict[type[AST], str]] = {
ast3.Gt: ">",
ast3.Lt: "<",
ast3.Eq: "==",
ast3.GtE: ">=",
ast3.LtE: "<=",
ast3.NotEq: "!=",
ast3.Is: "is",
ast3.IsNot: "is not",
ast3.In: "in",
ast3.NotIn: "not in",
}
def from_comp_operator(self, op: ast3.cmpop) -> str:
op_name = ASTConverter.comp_op_map.get(type(op))
if op_name is None:
raise RuntimeError("Unknown comparison operator " + str(type(op)))
else:
return op_name
def set_block_lines(self, b: Block, stmts: Sequence[ast3.stmt]) -> None:
first, last = stmts[0], stmts[-1]
b.line = first.lineno
b.column = first.col_offset
b.end_line = getattr(last, "end_lineno", None)
b.end_column = getattr(last, "end_col_offset", None)
if not b.body:
return
new_first = b.body[0]
if isinstance(new_first, (Decorator, OverloadedFuncDef)):
# Decorated function lines are different between Python versions.
# copy the normalization we do for them to block first lines.
b.line = new_first.line
b.column = new_first.column
def as_block(self, stmts: list[ast3.stmt]) -> Block | None:
b = None
if stmts:
b = Block(self.fix_function_overloads(self.translate_stmt_list(stmts)))
self.set_block_lines(b, stmts)
return b
def as_required_block(
self, stmts: list[ast3.stmt], *, can_strip: bool = False, is_coroutine: bool = False
) -> Block:
assert stmts # must be non-empty
b = Block(
self.fix_function_overloads(
self.translate_stmt_list(stmts, can_strip=can_strip, is_coroutine=is_coroutine)
)
)
self.set_block_lines(b, stmts)
return b
def fix_function_overloads(self, stmts: list[Statement]) -> list[Statement]:
ret: list[Statement] = []
current_overload: list[OverloadPart] = []
current_overload_name: str | None = None
seen_unconditional_func_def = False
last_if_stmt: IfStmt | None = None
last_if_overload: Decorator | FuncDef | OverloadedFuncDef | None = None
last_if_stmt_overload_name: str | None = None
last_if_unknown_truth_value: IfStmt | None = None
skipped_if_stmts: list[IfStmt] = []
for stmt in stmts:
if_overload_name: str | None = None
if_block_with_overload: Block | None = None
if_unknown_truth_value: IfStmt | None = None
if isinstance(stmt, IfStmt) and seen_unconditional_func_def is False:
# Check IfStmt block to determine if function overloads can be merged
if_overload_name = self._check_ifstmt_for_overloads(stmt, current_overload_name)
if if_overload_name is not None:
(if_block_with_overload, if_unknown_truth_value) = (
self._get_executable_if_block_with_overloads(stmt)
)
if (
current_overload_name is not None
and isinstance(stmt, (Decorator, FuncDef))
and stmt.name == current_overload_name
):
if last_if_stmt is not None:
skipped_if_stmts.append(last_if_stmt)
if last_if_overload is not None:
# Last stmt was an IfStmt with same overload name
# Add overloads to current_overload
if isinstance(last_if_overload, OverloadedFuncDef):
current_overload.extend(last_if_overload.items)
else:
current_overload.append(last_if_overload)
last_if_stmt, last_if_overload = None, None
if last_if_unknown_truth_value:
self.fail_merge_overload(last_if_unknown_truth_value)
last_if_unknown_truth_value = None
current_overload.append(stmt)
if isinstance(stmt, FuncDef):
seen_unconditional_func_def = True
elif (
current_overload_name is not None
and isinstance(stmt, IfStmt)
and if_overload_name == current_overload_name
):
# IfStmt only contains stmts relevant to current_overload.
# Check if stmts are reachable and add them to current_overload,
# otherwise skip IfStmt to allow subsequent overload
# or function definitions.
skipped_if_stmts.append(stmt)
if if_block_with_overload is None:
if if_unknown_truth_value is not None:
self.fail_merge_overload(if_unknown_truth_value)
continue
if last_if_overload is not None:
# Last stmt was an IfStmt with same overload name
# Add overloads to current_overload
if isinstance(last_if_overload, OverloadedFuncDef):
current_overload.extend(last_if_overload.items)
else:
current_overload.append(last_if_overload)
last_if_stmt, last_if_overload = None, None
if isinstance(if_block_with_overload.body[-1], OverloadedFuncDef):
skipped_if_stmts.extend(cast(List[IfStmt], if_block_with_overload.body[:-1]))
current_overload.extend(if_block_with_overload.body[-1].items)
else:
current_overload.append(
cast(Union[Decorator, FuncDef], if_block_with_overload.body[0])
)
else:
if last_if_stmt is not None:
ret.append(last_if_stmt)
last_if_stmt_overload_name = current_overload_name
last_if_stmt, last_if_overload = None, None
last_if_unknown_truth_value = None
if current_overload and current_overload_name == last_if_stmt_overload_name:
# Remove last stmt (IfStmt) from ret if the overload names matched
# Only happens if no executable block had been found in IfStmt
popped = ret.pop()
assert isinstance(popped, IfStmt)
skipped_if_stmts.append(popped)
if current_overload and skipped_if_stmts:
# Add bare IfStmt (without overloads) to ret
# Required for mypy to be able to still check conditions
for if_stmt in skipped_if_stmts:
self._strip_contents_from_if_stmt(if_stmt)
ret.append(if_stmt)
skipped_if_stmts = []
if len(current_overload) == 1:
ret.append(current_overload[0])
elif len(current_overload) > 1:
ret.append(OverloadedFuncDef(current_overload))
# If we have multiple decorated functions named "_" next to each, we want to treat
# them as a series of regular FuncDefs instead of one OverloadedFuncDef because
# most of mypy/mypyc assumes that all the functions in an OverloadedFuncDef are
# related, but multiple underscore functions next to each other aren't necessarily
# related
seen_unconditional_func_def = False
if isinstance(stmt, Decorator) and not unnamed_function(stmt.name):
current_overload = [stmt]
current_overload_name = stmt.name
elif isinstance(stmt, IfStmt) and if_overload_name is not None:
current_overload = []
current_overload_name = if_overload_name
last_if_stmt = stmt
last_if_stmt_overload_name = None
if if_block_with_overload is not None:
skipped_if_stmts.extend(
cast(List[IfStmt], if_block_with_overload.body[:-1])
)
last_if_overload = cast(
Union[Decorator, FuncDef, OverloadedFuncDef],
if_block_with_overload.body[-1],
)
last_if_unknown_truth_value = if_unknown_truth_value
else:
current_overload = []
current_overload_name = None
ret.append(stmt)
if current_overload and skipped_if_stmts:
# Add bare IfStmt (without overloads) to ret
# Required for mypy to be able to still check conditions
for if_stmt in skipped_if_stmts:
self._strip_contents_from_if_stmt(if_stmt)
ret.append(if_stmt)
if len(current_overload) == 1:
ret.append(current_overload[0])
elif len(current_overload) > 1:
ret.append(OverloadedFuncDef(current_overload))
elif last_if_overload is not None:
ret.append(last_if_overload)
elif last_if_stmt is not None:
ret.append(last_if_stmt)
return ret
def _check_ifstmt_for_overloads(
self, stmt: IfStmt, current_overload_name: str | None = None
) -> str | None:
"""Check if IfStmt contains only overloads with the same name.
Return overload_name if found, None otherwise.
"""
# Check that block only contains a single Decorator, FuncDef, or OverloadedFuncDef.
# Multiple overloads have already been merged as OverloadedFuncDef.
if not (
len(stmt.body[0].body) == 1
and (
isinstance(stmt.body[0].body[0], (Decorator, OverloadedFuncDef))
or current_overload_name is not None
and isinstance(stmt.body[0].body[0], FuncDef)
)
or len(stmt.body[0].body) > 1
and isinstance(stmt.body[0].body[-1], OverloadedFuncDef)
and all(self._is_stripped_if_stmt(if_stmt) for if_stmt in stmt.body[0].body[:-1])
):
return None
overload_name = cast(
Union[Decorator, FuncDef, OverloadedFuncDef], stmt.body[0].body[-1]
).name
if stmt.else_body is None:
return overload_name
if len(stmt.else_body.body) == 1:
# For elif: else_body contains an IfStmt itself -> do a recursive check.
if (
isinstance(stmt.else_body.body[0], (Decorator, FuncDef, OverloadedFuncDef))
and stmt.else_body.body[0].name == overload_name
):
return overload_name
if (
isinstance(stmt.else_body.body[0], IfStmt)
and self._check_ifstmt_for_overloads(stmt.else_body.body[0], current_overload_name)
== overload_name
):
return overload_name
return None
def _get_executable_if_block_with_overloads(
self, stmt: IfStmt
) -> tuple[Block | None, IfStmt | None]:
"""Return block from IfStmt that will get executed.
Return
0 -> A block if sure that alternative blocks are unreachable.
1 -> An IfStmt if the reachability of it can't be inferred,
i.e. the truth value is unknown.
"""
infer_reachability_of_if_statement(stmt, self.options)
if stmt.else_body is None and stmt.body[0].is_unreachable is True:
# always False condition with no else
return None, None
if (
stmt.else_body is None
or stmt.body[0].is_unreachable is False
and stmt.else_body.is_unreachable is False
):
# The truth value is unknown, thus not conclusive
return None, stmt
if stmt.else_body.is_unreachable is True:
# else_body will be set unreachable if condition is always True
return stmt.body[0], None
if stmt.body[0].is_unreachable is True:
# body will be set unreachable if condition is always False
# else_body can contain an IfStmt itself (for elif) -> do a recursive check
if isinstance(stmt.else_body.body[0], IfStmt):
return self._get_executable_if_block_with_overloads(stmt.else_body.body[0])
return stmt.else_body, None
return None, stmt
def _strip_contents_from_if_stmt(self, stmt: IfStmt) -> None:
"""Remove contents from IfStmt.
Needed to still be able to check the conditions after the contents
have been merged with the surrounding function overloads.
"""
if len(stmt.body) == 1:
stmt.body[0].body = []
if stmt.else_body and len(stmt.else_body.body) == 1:
if isinstance(stmt.else_body.body[0], IfStmt):
self._strip_contents_from_if_stmt(stmt.else_body.body[0])
else:
stmt.else_body.body = []
def _is_stripped_if_stmt(self, stmt: Statement) -> bool:
"""Check stmt to make sure it is a stripped IfStmt.
See also: _strip_contents_from_if_stmt
"""
if not isinstance(stmt, IfStmt):
return False
if not (len(stmt.body) == 1 and len(stmt.body[0].body) == 0):
# Body not empty
return False
if not stmt.else_body or len(stmt.else_body.body) == 0:
# No or empty else_body
return True
# For elif, IfStmt are stored recursively in else_body
return self._is_stripped_if_stmt(stmt.else_body.body[0])
def translate_module_id(self, id: str) -> str:
"""Return the actual, internal module id for a source text id."""
if id == self.options.custom_typing_module:
return "typing"
return id
def visit_Module(self, mod: ast3.Module) -> MypyFile:
self.type_ignores = {}
for ti in mod.type_ignores:
parsed = parse_type_ignore_tag(ti.tag)
if parsed is not None:
self.type_ignores[ti.lineno] = parsed
else:
self.fail(message_registry.INVALID_TYPE_IGNORE, ti.lineno, -1, blocker=False)
body = self.fix_function_overloads(self.translate_stmt_list(mod.body, ismodule=True))
ret = MypyFile(body, self.imports, False, ignored_lines=self.type_ignores)
ret.is_stub = self.is_stub
ret.path = self.path
return ret
# --- stmt ---
# FunctionDef(identifier name, arguments args,
# stmt* body, expr* decorator_list, expr? returns, string? type_comment)
# arguments = (arg* args, arg? vararg, arg* kwonlyargs, expr* kw_defaults,
# arg? kwarg, expr* defaults)
def visit_FunctionDef(self, n: ast3.FunctionDef) -> FuncDef | Decorator:
return self.do_func_def(n)
# AsyncFunctionDef(identifier name, arguments args,
# stmt* body, expr* decorator_list, expr? returns, string? type_comment)
def visit_AsyncFunctionDef(self, n: ast3.AsyncFunctionDef) -> FuncDef | Decorator:
return self.do_func_def(n, is_coroutine=True)
def do_func_def(
self, n: ast3.FunctionDef | ast3.AsyncFunctionDef, is_coroutine: bool = False
) -> FuncDef | Decorator:
"""Helper shared between visit_FunctionDef and visit_AsyncFunctionDef."""
self.class_and_function_stack.append("D")
no_type_check = bool(
n.decorator_list and any(is_no_type_check_decorator(d) for d in n.decorator_list)
)
lineno = n.lineno
args = self.transform_args(n.args, lineno, no_type_check=no_type_check)
if special_function_elide_names(n.name):
for arg in args:
arg.pos_only = True
arg_kinds = [arg.kind for arg in args]
arg_names = [None if arg.pos_only else arg.variable.name for arg in args]
# Type parameters, if using new syntax for generics (PEP 695)
explicit_type_params: list[TypeParam] | None = None
arg_types: list[Type | None] = []
if no_type_check:
arg_types = [None] * len(args)
return_type = None
elif n.type_comment is not None:
try:
func_type_ast = ast3_parse(n.type_comment, "<func_type>", "func_type")
assert isinstance(func_type_ast, FunctionType)
# for ellipsis arg
if (
len(func_type_ast.argtypes) == 1
and isinstance(func_type_ast.argtypes[0], Constant)
and func_type_ast.argtypes[0].value is Ellipsis
):
if n.returns:
# PEP 484 disallows both type annotations and type comments
self.fail(message_registry.DUPLICATE_TYPE_SIGNATURES, lineno, n.col_offset)
arg_types = [
(
a.type_annotation
if a.type_annotation is not None
else AnyType(TypeOfAny.unannotated)
)
for a in args
]
else:
# PEP 484 disallows both type annotations and type comments
if n.returns or any(a.type_annotation is not None for a in args):
self.fail(message_registry.DUPLICATE_TYPE_SIGNATURES, lineno, n.col_offset)
translated_args: list[Type] = TypeConverter(
self.errors, line=lineno, override_column=n.col_offset
).translate_expr_list(func_type_ast.argtypes)
# Use a cast to work around `list` invariance
arg_types = cast(List[Optional[Type]], translated_args)
return_type = TypeConverter(self.errors, line=lineno).visit(func_type_ast.returns)
# add implicit self type
in_method_scope = self.class_and_function_stack[-2:] == ["C", "D"]
if in_method_scope and len(arg_types) < len(args):
arg_types.insert(0, AnyType(TypeOfAny.special_form))
except SyntaxError:
stripped_type = n.type_comment.split("#", 2)[0].strip()
err_msg = message_registry.TYPE_COMMENT_SYNTAX_ERROR_VALUE.format(stripped_type)
self.fail(err_msg, lineno, n.col_offset)
if n.type_comment and n.type_comment[0] not in ["(", "#"]:
self.note(
"Suggestion: wrap argument types in parentheses", lineno, n.col_offset
)
arg_types = [AnyType(TypeOfAny.from_error)] * len(args)
return_type = AnyType(TypeOfAny.from_error)
else:
if sys.version_info >= (3, 12) and n.type_params:
explicit_type_params = self.translate_type_params(n.type_params)
arg_types = [a.type_annotation for a in args]
return_type = TypeConverter(
self.errors, line=n.returns.lineno if n.returns else lineno
).visit(n.returns)
for arg, arg_type in zip(args, arg_types):
self.set_type_optional(arg_type, arg.initializer)
func_type = None
if any(arg_types) or return_type:
if len(arg_types) != 1 and any(isinstance(t, EllipsisType) for t in arg_types):
self.fail(message_registry.ELLIPSIS_WITH_OTHER_TYPEARGS, lineno, n.col_offset)
elif len(arg_types) > len(arg_kinds):
self.fail(
message_registry.TYPE_SIGNATURE_TOO_MANY_ARGS,
lineno,
n.col_offset,
blocker=False,
)
elif len(arg_types) < len(arg_kinds):
self.fail(
message_registry.TYPE_SIGNATURE_TOO_FEW_ARGS,
lineno,
n.col_offset,
blocker=False,
)
else:
func_type = CallableType(
[a if a is not None else AnyType(TypeOfAny.unannotated) for a in arg_types],
arg_kinds,
arg_names,
return_type if return_type is not None else AnyType(TypeOfAny.unannotated),
_dummy_fallback,
)
# End position is always the same.
end_line = getattr(n, "end_lineno", None)
end_column = getattr(n, "end_col_offset", None)
self.class_and_function_stack.pop()
self.class_and_function_stack.append("F")
body = self.as_required_block(n.body, can_strip=True, is_coroutine=is_coroutine)
func_def = FuncDef(n.name, args, body, func_type, explicit_type_params)
if isinstance(func_def.type, CallableType):
# semanal.py does some in-place modifications we want to avoid
func_def.unanalyzed_type = func_def.type.copy_modified()
if is_coroutine:
func_def.is_coroutine = True
if func_type is not None:
func_type.definition = func_def
func_type.line = lineno
if n.decorator_list:
# Set deco_line to the old pre-3.8 lineno, in order to keep
# existing "# type: ignore" comments working:
deco_line = n.decorator_list[0].lineno
var = Var(func_def.name)
var.is_ready = False
var.set_line(lineno)
func_def.is_decorated = True
func_def.deco_line = deco_line
func_def.set_line(lineno, n.col_offset, end_line, end_column)
deco = Decorator(func_def, self.translate_expr_list(n.decorator_list), var)
first = n.decorator_list[0]
deco.set_line(first.lineno, first.col_offset, end_line, end_column)
retval: FuncDef | Decorator = deco
else:
# FuncDef overrides set_line -- can't use self.set_line
func_def.set_line(lineno, n.col_offset, end_line, end_column)
retval = func_def
if self.options.include_docstrings:
func_def.docstring = ast3.get_docstring(n, clean=False)
self.class_and_function_stack.pop()
return retval
def set_type_optional(self, type: Type | None, initializer: Expression | None) -> None:
if not self.options.implicit_optional:
return
# Indicate that type should be wrapped in an Optional if arg is initialized to None.
optional = isinstance(initializer, NameExpr) and initializer.name == "None"
if isinstance(type, UnboundType):
type.optional = optional
def transform_args(
self, args: ast3.arguments, line: int, no_type_check: bool = False
) -> list[Argument]:
new_args = []
names: list[ast3.arg] = []
posonlyargs = getattr(args, "posonlyargs", cast(List[ast3.arg], []))
args_args = posonlyargs + args.args
args_defaults = args.defaults
num_no_defaults = len(args_args) - len(args_defaults)
# positional arguments without defaults
for i, a in enumerate(args_args[:num_no_defaults]):
pos_only = i < len(posonlyargs)
new_args.append(self.make_argument(a, None, ARG_POS, no_type_check, pos_only))
names.append(a)
# positional arguments with defaults
for i, (a, d) in enumerate(zip(args_args[num_no_defaults:], args_defaults)):
pos_only = num_no_defaults + i < len(posonlyargs)
new_args.append(self.make_argument(a, d, ARG_OPT, no_type_check, pos_only))
names.append(a)
# *arg
if args.vararg is not None:
new_args.append(self.make_argument(args.vararg, None, ARG_STAR, no_type_check))
names.append(args.vararg)
# keyword-only arguments with defaults
for a, kd in zip(args.kwonlyargs, args.kw_defaults):
new_args.append(
self.make_argument(
a, kd, ARG_NAMED if kd is None else ARG_NAMED_OPT, no_type_check
)
)
names.append(a)
# **kwarg
if args.kwarg is not None:
new_args.append(self.make_argument(args.kwarg, None, ARG_STAR2, no_type_check))
names.append(args.kwarg)
check_arg_names([arg.variable.name for arg in new_args], names, self.fail_arg)
return new_args
def make_argument(
self,
arg: ast3.arg,
default: ast3.expr | None,
kind: ArgKind,
no_type_check: bool,
pos_only: bool = False,
) -> Argument:
if no_type_check:
arg_type = None
else:
annotation = arg.annotation
type_comment = arg.type_comment
if annotation is not None and type_comment is not None:
self.fail(message_registry.DUPLICATE_TYPE_SIGNATURES, arg.lineno, arg.col_offset)
arg_type = None
if annotation is not None:
arg_type = TypeConverter(self.errors, line=arg.lineno).visit(annotation)
else:
arg_type = self.translate_type_comment(arg, type_comment)
if argument_elide_name(arg.arg):
pos_only = True
argument = Argument(Var(arg.arg, arg_type), arg_type, self.visit(default), kind, pos_only)
argument.set_line(
arg.lineno,
arg.col_offset,
getattr(arg, "end_lineno", None),
getattr(arg, "end_col_offset", None),
)
return argument
def fail_arg(self, msg: str, arg: ast3.arg) -> None:
self.fail(ErrorMessage(msg), arg.lineno, arg.col_offset)
# ClassDef(identifier name,
# expr* bases,
# keyword* keywords,
# stmt* body,
# expr* decorator_list)
def visit_ClassDef(self, n: ast3.ClassDef) -> ClassDef:
self.class_and_function_stack.append("C")
keywords = [(kw.arg, self.visit(kw.value)) for kw in n.keywords if kw.arg]
# Type parameters, if using new syntax for generics (PEP 695)
explicit_type_params: list[TypeParam] | None = None
if sys.version_info >= (3, 12) and n.type_params:
explicit_type_params = self.translate_type_params(n.type_params)
cdef = ClassDef(
n.name,
self.as_required_block(n.body),
None,
self.translate_expr_list(n.bases),
metaclass=dict(keywords).get("metaclass"),
keywords=keywords,
type_args=explicit_type_params,
)
cdef.decorators = self.translate_expr_list(n.decorator_list)
# Set lines to match the old mypy 0.700 lines, in order to keep
# existing "# type: ignore" comments working:
cdef.line = n.lineno
cdef.deco_line = n.decorator_list[0].lineno if n.decorator_list else None
if self.options.include_docstrings:
cdef.docstring = ast3.get_docstring(n, clean=False)
cdef.column = n.col_offset
cdef.end_line = getattr(n, "end_lineno", None)
cdef.end_column = getattr(n, "end_col_offset", None)
self.class_and_function_stack.pop()
return cdef
def validate_type_param(self, type_param: ast_TypeVar) -> None:
incorrect_expr = find_disallowed_expression_in_annotation_scope(type_param.bound)
if incorrect_expr is None:
return
if isinstance(incorrect_expr, (ast3.Yield, ast3.YieldFrom)):
self.fail(
message_registry.TYPE_VAR_YIELD_EXPRESSION_IN_BOUND,
type_param.lineno,
type_param.col_offset,
)
if isinstance(incorrect_expr, ast3.NamedExpr):
self.fail(
message_registry.TYPE_VAR_NAMED_EXPRESSION_IN_BOUND,
type_param.lineno,
type_param.col_offset,
)
if isinstance(incorrect_expr, ast3.Await):
self.fail(
message_registry.TYPE_VAR_AWAIT_EXPRESSION_IN_BOUND,
type_param.lineno,
type_param.col_offset,
)
def translate_type_params(self, type_params: list[Any]) -> list[TypeParam]:
explicit_type_params = []
for p in type_params:
bound = None
values: list[Type] = []
if sys.version_info >= (3, 13) and p.default_value is not None:
self.fail(
message_registry.TYPE_PARAM_DEFAULT_NOT_SUPPORTED,
p.lineno,
p.col_offset,
blocker=False,
)
if isinstance(p, ast_ParamSpec): # type: ignore[misc]
explicit_type_params.append(TypeParam(p.name, PARAM_SPEC_KIND, None, []))
elif isinstance(p, ast_TypeVarTuple): # type: ignore[misc]
explicit_type_params.append(TypeParam(p.name, TYPE_VAR_TUPLE_KIND, None, []))
else:
if isinstance(p.bound, ast3.Tuple):
if len(p.bound.elts) < 2:
self.fail(
message_registry.TYPE_VAR_TOO_FEW_CONSTRAINED_TYPES,
p.lineno,
p.col_offset,
blocker=False,
)
else:
conv = TypeConverter(self.errors, line=p.lineno)
values = [conv.visit(t) for t in p.bound.elts]
elif p.bound is not None:
self.validate_type_param(p)
bound = TypeConverter(self.errors, line=p.lineno).visit(p.bound)
explicit_type_params.append(TypeParam(p.name, TYPE_VAR_KIND, bound, values))
return explicit_type_params
# Return(expr? value)
def visit_Return(self, n: ast3.Return) -> ReturnStmt:
node = ReturnStmt(self.visit(n.value))
return self.set_line(node, n)
# Delete(expr* targets)
def visit_Delete(self, n: ast3.Delete) -> DelStmt:
if len(n.targets) > 1:
tup = TupleExpr(self.translate_expr_list(n.targets))
tup.set_line(n.lineno)
node = DelStmt(tup)
else:
node = DelStmt(self.visit(n.targets[0]))
return self.set_line(node, n)
# Assign(expr* targets, expr? value, string? type_comment, expr? annotation)
def visit_Assign(self, n: ast3.Assign) -> AssignmentStmt:
lvalues = self.translate_expr_list(n.targets)
rvalue = self.visit(n.value)
typ = self.translate_type_comment(n, n.type_comment)
s = AssignmentStmt(lvalues, rvalue, type=typ, new_syntax=False)
return self.set_line(s, n)
# AnnAssign(expr target, expr annotation, expr? value, int simple)
def visit_AnnAssign(self, n: ast3.AnnAssign) -> AssignmentStmt:
line = n.lineno
if n.value is None: # always allow 'x: int'
rvalue: Expression = TempNode(AnyType(TypeOfAny.special_form), no_rhs=True)
rvalue.line = line
rvalue.column = n.col_offset
else:
rvalue = self.visit(n.value)
typ = TypeConverter(self.errors, line=line).visit(n.annotation)
assert typ is not None
typ.column = n.annotation.col_offset
s = AssignmentStmt([self.visit(n.target)], rvalue, type=typ, new_syntax=True)
return self.set_line(s, n)
# AugAssign(expr target, operator op, expr value)
def visit_AugAssign(self, n: ast3.AugAssign) -> OperatorAssignmentStmt:
s = OperatorAssignmentStmt(
self.from_operator(n.op), self.visit(n.target), self.visit(n.value)
)
return self.set_line(s, n)
# For(expr target, expr iter, stmt* body, stmt* orelse, string? type_comment)
def visit_For(self, n: ast3.For) -> ForStmt:
target_type = self.translate_type_comment(n, n.type_comment)
node = ForStmt(
self.visit(n.target),
self.visit(n.iter),
self.as_required_block(n.body),
self.as_block(n.orelse),
target_type,
)
return self.set_line(node, n)
# AsyncFor(expr target, expr iter, stmt* body, stmt* orelse, string? type_comment)
def visit_AsyncFor(self, n: ast3.AsyncFor) -> ForStmt:
target_type = self.translate_type_comment(n, n.type_comment)
node = ForStmt(
self.visit(n.target),
self.visit(n.iter),
self.as_required_block(n.body),
self.as_block(n.orelse),
target_type,
)
node.is_async = True
return self.set_line(node, n)
# While(expr test, stmt* body, stmt* orelse)
def visit_While(self, n: ast3.While) -> WhileStmt:
node = WhileStmt(
self.visit(n.test), self.as_required_block(n.body), self.as_block(n.orelse)
)
return self.set_line(node, n)
# If(expr test, stmt* body, stmt* orelse)
def visit_If(self, n: ast3.If) -> IfStmt:
node = IfStmt(
[self.visit(n.test)], [self.as_required_block(n.body)], self.as_block(n.orelse)
)
return self.set_line(node, n)
# With(withitem* items, stmt* body, string? type_comment)
def visit_With(self, n: ast3.With) -> WithStmt:
target_type = self.translate_type_comment(n, n.type_comment)
node = WithStmt(
[self.visit(i.context_expr) for i in n.items],
[self.visit(i.optional_vars) for i in n.items],
self.as_required_block(n.body),
target_type,
)
return self.set_line(node, n)
# AsyncWith(withitem* items, stmt* body, string? type_comment)
def visit_AsyncWith(self, n: ast3.AsyncWith) -> WithStmt:
target_type = self.translate_type_comment(n, n.type_comment)
s = WithStmt(
[self.visit(i.context_expr) for i in n.items],
[self.visit(i.optional_vars) for i in n.items],
self.as_required_block(n.body),
target_type,
)
s.is_async = True
return self.set_line(s, n)
# Raise(expr? exc, expr? cause)
def visit_Raise(self, n: ast3.Raise) -> RaiseStmt:
node = RaiseStmt(self.visit(n.exc), self.visit(n.cause))
return self.set_line(node, n)
# Try(stmt* body, excepthandler* handlers, stmt* orelse, stmt* finalbody)
def visit_Try(self, n: ast3.Try) -> TryStmt:
vs = [
self.set_line(NameExpr(h.name), h) if h.name is not None else None for h in n.handlers
]
types = [self.visit(h.type) for h in n.handlers]
handlers = [self.as_required_block(h.body) for h in n.handlers]
node = TryStmt(
self.as_required_block(n.body),
vs,
types,
handlers,
self.as_block(n.orelse),
self.as_block(n.finalbody),
)
return self.set_line(node, n)
def visit_TryStar(self, n: TryStar) -> TryStmt:
vs = [
self.set_line(NameExpr(h.name), h) if h.name is not None else None for h in n.handlers
]
types = [self.visit(h.type) for h in n.handlers]
handlers = [self.as_required_block(h.body) for h in n.handlers]
node = TryStmt(
self.as_required_block(n.body),
vs,
types,
handlers,
self.as_block(n.orelse),
self.as_block(n.finalbody),
)
node.is_star = True
return self.set_line(node, n)
# Assert(expr test, expr? msg)
def visit_Assert(self, n: ast3.Assert) -> AssertStmt:
node = AssertStmt(self.visit(n.test), self.visit(n.msg))
return self.set_line(node, n)
# Import(alias* names)
def visit_Import(self, n: ast3.Import) -> Import:
names: list[tuple[str, str | None]] = []
for alias in n.names:
name = self.translate_module_id(alias.name)
asname = alias.asname
if asname is None and name != alias.name:
# if the module name has been translated (and it's not already
# an explicit import-as), make it an implicit import-as the
# original name
asname = alias.name
names.append((name, asname))
i = Import(names)
self.imports.append(i)
return self.set_line(i, n)
# ImportFrom(identifier? module, alias* names, int? level)
def visit_ImportFrom(self, n: ast3.ImportFrom) -> ImportBase:
assert n.level is not None
if len(n.names) == 1 and n.names[0].name == "*":
mod = n.module if n.module is not None else ""
i: ImportBase = ImportAll(mod, n.level)
else:
i = ImportFrom(
self.translate_module_id(n.module) if n.module is not None else "",
n.level,
[(a.name, a.asname) for a in n.names],
)
self.imports.append(i)
return self.set_line(i, n)
# Global(identifier* names)
def visit_Global(self, n: ast3.Global) -> GlobalDecl:
g = GlobalDecl(n.names)
return self.set_line(g, n)
# Nonlocal(identifier* names)
def visit_Nonlocal(self, n: ast3.Nonlocal) -> NonlocalDecl:
d = NonlocalDecl(n.names)
return self.set_line(d, n)
# Expr(expr value)
def visit_Expr(self, n: ast3.Expr) -> ExpressionStmt:
value = self.visit(n.value)
node = ExpressionStmt(value)
return self.set_line(node, n)
# Pass
def visit_Pass(self, n: ast3.Pass) -> PassStmt:
s = PassStmt()
return self.set_line(s, n)
# Break
def visit_Break(self, n: ast3.Break) -> BreakStmt:
s = BreakStmt()
return self.set_line(s, n)
# Continue
def visit_Continue(self, n: ast3.Continue) -> ContinueStmt:
s = ContinueStmt()
return self.set_line(s, n)
# --- expr ---
def visit_NamedExpr(self, n: NamedExpr) -> AssignmentExpr:
s = AssignmentExpr(self.visit(n.target), self.visit(n.value))
return self.set_line(s, n)
# BoolOp(boolop op, expr* values)
def visit_BoolOp(self, n: ast3.BoolOp) -> OpExpr:
# mypy translates (1 and 2 and 3) as (1 and (2 and 3))
assert len(n.values) >= 2
op_node = n.op
if isinstance(op_node, ast3.And):
op = "and"
elif isinstance(op_node, ast3.Or):
op = "or"
else:
raise RuntimeError("unknown BoolOp " + str(type(n)))
# potentially inefficient!
return self.group(op, self.translate_expr_list(n.values), n)
def group(self, op: str, vals: list[Expression], n: ast3.expr) -> OpExpr:
if len(vals) == 2:
e = OpExpr(op, vals[0], vals[1])
else:
e = OpExpr(op, vals[0], self.group(op, vals[1:], n))
return self.set_line(e, n)
# BinOp(expr left, operator op, expr right)
def visit_BinOp(self, n: ast3.BinOp) -> OpExpr:
op = self.from_operator(n.op)
if op is None:
raise RuntimeError("cannot translate BinOp " + str(type(n.op)))
e = OpExpr(op, self.visit(n.left), self.visit(n.right))
return self.set_line(e, n)
# UnaryOp(unaryop op, expr operand)
def visit_UnaryOp(self, n: ast3.UnaryOp) -> UnaryExpr:
op = None
if isinstance(n.op, ast3.Invert):
op = "~"
elif isinstance(n.op, ast3.Not):
op = "not"
elif isinstance(n.op, ast3.UAdd):
op = "+"
elif isinstance(n.op, ast3.USub):
op = "-"
if op is None:
raise RuntimeError("cannot translate UnaryOp " + str(type(n.op)))
e = UnaryExpr(op, self.visit(n.operand))
return self.set_line(e, n)
# Lambda(arguments args, expr body)
def visit_Lambda(self, n: ast3.Lambda) -> LambdaExpr:
body = ast3.Return(n.body)
body.lineno = n.body.lineno
body.col_offset = n.body.col_offset
self.class_and_function_stack.append("L")
e = LambdaExpr(self.transform_args(n.args, n.lineno), self.as_required_block([body]))
self.class_and_function_stack.pop()
e.set_line(n.lineno, n.col_offset) # Overrides set_line -- can't use self.set_line
return e
# IfExp(expr test, expr body, expr orelse)
def visit_IfExp(self, n: ast3.IfExp) -> ConditionalExpr:
e = ConditionalExpr(self.visit(n.test), self.visit(n.body), self.visit(n.orelse))
return self.set_line(e, n)
# Dict(expr* keys, expr* values)
def visit_Dict(self, n: ast3.Dict) -> DictExpr:
e = DictExpr(
list(zip(self.translate_opt_expr_list(n.keys), self.translate_expr_list(n.values)))
)
return self.set_line(e, n)
# Set(expr* elts)
def visit_Set(self, n: ast3.Set) -> SetExpr:
e = SetExpr(self.translate_expr_list(n.elts))
return self.set_line(e, n)
# ListComp(expr elt, comprehension* generators)
def visit_ListComp(self, n: ast3.ListComp) -> ListComprehension:
e = ListComprehension(self.visit_GeneratorExp(cast(ast3.GeneratorExp, n)))
return self.set_line(e, n)
# SetComp(expr elt, comprehension* generators)
def visit_SetComp(self, n: ast3.SetComp) -> SetComprehension:
e = SetComprehension(self.visit_GeneratorExp(cast(ast3.GeneratorExp, n)))
return self.set_line(e, n)
# DictComp(expr key, expr value, comprehension* generators)
def visit_DictComp(self, n: ast3.DictComp) -> DictionaryComprehension:
targets = [self.visit(c.target) for c in n.generators]
iters = [self.visit(c.iter) for c in n.generators]
ifs_list = [self.translate_expr_list(c.ifs) for c in n.generators]
is_async = [bool(c.is_async) for c in n.generators]
e = DictionaryComprehension(
self.visit(n.key), self.visit(n.value), targets, iters, ifs_list, is_async
)
return self.set_line(e, n)
# GeneratorExp(expr elt, comprehension* generators)
def visit_GeneratorExp(self, n: ast3.GeneratorExp) -> GeneratorExpr:
targets = [self.visit(c.target) for c in n.generators]
iters = [self.visit(c.iter) for c in n.generators]
ifs_list = [self.translate_expr_list(c.ifs) for c in n.generators]
is_async = [bool(c.is_async) for c in n.generators]
e = GeneratorExpr(self.visit(n.elt), targets, iters, ifs_list, is_async)
return self.set_line(e, n)
# Await(expr value)
def visit_Await(self, n: ast3.Await) -> AwaitExpr:
v = self.visit(n.value)
e = AwaitExpr(v)
return self.set_line(e, n)
# Yield(expr? value)
def visit_Yield(self, n: ast3.Yield) -> YieldExpr:
e = YieldExpr(self.visit(n.value))
return self.set_line(e, n)
# YieldFrom(expr value)
def visit_YieldFrom(self, n: ast3.YieldFrom) -> YieldFromExpr:
e = YieldFromExpr(self.visit(n.value))
return self.set_line(e, n)
# Compare(expr left, cmpop* ops, expr* comparators)
def visit_Compare(self, n: ast3.Compare) -> ComparisonExpr:
operators = [self.from_comp_operator(o) for o in n.ops]
operands = self.translate_expr_list([n.left] + n.comparators)
e = ComparisonExpr(operators, operands)
return self.set_line(e, n)
# Call(expr func, expr* args, keyword* keywords)
# keyword = (identifier? arg, expr value)
def visit_Call(self, n: Call) -> CallExpr:
args = n.args
keywords = n.keywords
keyword_names = [k.arg for k in keywords]
arg_types = self.translate_expr_list(
[a.value if isinstance(a, Starred) else a for a in args] + [k.value for k in keywords]
)
arg_kinds = [ARG_STAR if type(a) is Starred else ARG_POS for a in args] + [
ARG_STAR2 if arg is None else ARG_NAMED for arg in keyword_names
]
e = CallExpr(
self.visit(n.func),
arg_types,
arg_kinds,
cast("List[Optional[str]]", [None] * len(args)) + keyword_names,
)
return self.set_line(e, n)
# Constant(object value) -- a constant, in Python 3.8.
def visit_Constant(self, n: Constant) -> Any:
val = n.value
e: Any = None
if val is None:
e = NameExpr("None")
elif isinstance(val, str):
e = StrExpr(val)
elif isinstance(val, bytes):
e = BytesExpr(bytes_to_human_readable_repr(val))
elif isinstance(val, bool): # Must check before int!
e = NameExpr(str(val))
elif isinstance(val, int):
e = IntExpr(val)
elif isinstance(val, float):
e = FloatExpr(val)
elif isinstance(val, complex):
e = ComplexExpr(val)
elif val is Ellipsis:
e = EllipsisExpr()
else:
raise RuntimeError("Constant not implemented for " + str(type(val)))
return self.set_line(e, n)
# JoinedStr(expr* values)
def visit_JoinedStr(self, n: ast3.JoinedStr) -> Expression:
# Each of n.values is a str or FormattedValue; we just concatenate
# them all using ''.join.
empty_string = StrExpr("")
empty_string.set_line(n.lineno, n.col_offset)
strs_to_join = ListExpr(self.translate_expr_list(n.values))
strs_to_join.set_line(empty_string)
# Don't make unnecessary join call if there is only one str to join
if len(strs_to_join.items) == 1:
return self.set_line(strs_to_join.items[0], n)
elif len(strs_to_join.items) > 1:
last = strs_to_join.items[-1]
if isinstance(last, StrExpr) and last.value == "":
# 3.12 can add an empty literal at the end. Delete it for consistency
# between Python versions.
del strs_to_join.items[-1:]
join_method = MemberExpr(empty_string, "join")
join_method.set_line(empty_string)
result_expression = CallExpr(join_method, [strs_to_join], [ARG_POS], [None])
return self.set_line(result_expression, n)
# FormattedValue(expr value)
def visit_FormattedValue(self, n: ast3.FormattedValue) -> Expression:
# A FormattedValue is a component of a JoinedStr, or it can exist
# on its own. We translate them to individual '{}'.format(value)
# calls. Format specifier and conversion information is passed along
# to allow mypyc to support f-strings with format specifiers and conversions.
val_exp = self.visit(n.value)
val_exp.set_line(n.lineno, n.col_offset)
conv_str = "" if n.conversion < 0 else "!" + chr(n.conversion)
format_string = StrExpr("{" + conv_str + ":{}}")
format_spec_exp = self.visit(n.format_spec) if n.format_spec is not None else StrExpr("")
format_string.set_line(n.lineno, n.col_offset)
format_method = MemberExpr(format_string, "format")
format_method.set_line(format_string)
result_expression = CallExpr(
format_method, [val_exp, format_spec_exp], [ARG_POS, ARG_POS], [None, None]
)
return self.set_line(result_expression, n)
# Attribute(expr value, identifier attr, expr_context ctx)
def visit_Attribute(self, n: Attribute) -> MemberExpr | SuperExpr:
value = n.value
member_expr = MemberExpr(self.visit(value), n.attr)
obj = member_expr.expr
if (
isinstance(obj, CallExpr)
and isinstance(obj.callee, NameExpr)
and obj.callee.name == "super"
):
e: MemberExpr | SuperExpr = SuperExpr(member_expr.name, obj)
else:
e = member_expr
return self.set_line(e, n)
# Subscript(expr value, slice slice, expr_context ctx)
def visit_Subscript(self, n: ast3.Subscript) -> IndexExpr:
e = IndexExpr(self.visit(n.value), self.visit(n.slice))
self.set_line(e, n)
# alias to please mypyc
is_py38_or_earlier = sys.version_info < (3, 9)
if isinstance(n.slice, ast3.Slice) or (
is_py38_or_earlier and isinstance(n.slice, ast3.ExtSlice)
):
# Before Python 3.9, Slice has no line/column in the raw ast. To avoid incompatibility
# visit_Slice doesn't set_line, even in Python 3.9 on.
# ExtSlice also has no line/column info. In Python 3.9 on, line/column is set for
# e.index when visiting n.slice.
e.index.line = e.line
e.index.column = e.column
return e
# Starred(expr value, expr_context ctx)
def visit_Starred(self, n: Starred) -> StarExpr:
e = StarExpr(self.visit(n.value))
return self.set_line(e, n)
# Name(identifier id, expr_context ctx)
def visit_Name(self, n: Name) -> NameExpr:
e = NameExpr(n.id)
return self.set_line(e, n)
# List(expr* elts, expr_context ctx)
def visit_List(self, n: ast3.List) -> ListExpr | TupleExpr:
expr_list: list[Expression] = [self.visit(e) for e in n.elts]
if isinstance(n.ctx, ast3.Store):
# [x, y] = z and (x, y) = z means exactly the same thing
e: ListExpr | TupleExpr = TupleExpr(expr_list)
else:
e = ListExpr(expr_list)
return self.set_line(e, n)
# Tuple(expr* elts, expr_context ctx)
def visit_Tuple(self, n: ast3.Tuple) -> TupleExpr:
e = TupleExpr(self.translate_expr_list(n.elts))
return self.set_line(e, n)
# --- slice ---
# Slice(expr? lower, expr? upper, expr? step)
def visit_Slice(self, n: ast3.Slice) -> SliceExpr:
return SliceExpr(self.visit(n.lower), self.visit(n.upper), self.visit(n.step))
# ExtSlice(slice* dims)
def visit_ExtSlice(self, n: ast3.ExtSlice) -> TupleExpr:
# cast for mypyc's benefit on Python 3.9
return TupleExpr(self.translate_expr_list(cast(Any, n).dims))
# Index(expr value)
def visit_Index(self, n: Index) -> Node:
# cast for mypyc's benefit on Python 3.9
value = self.visit(cast(Any, n).value)
assert isinstance(value, Node)
return value
# Match(expr subject, match_case* cases) # python 3.10 and later
def visit_Match(self, n: Match) -> MatchStmt:
node = MatchStmt(
self.visit(n.subject),
[self.visit(c.pattern) for c in n.cases],
[self.visit(c.guard) for c in n.cases],
[self.as_required_block(c.body) for c in n.cases],
)
return self.set_line(node, n)
def visit_MatchValue(self, n: MatchValue) -> ValuePattern:
node = ValuePattern(self.visit(n.value))
return self.set_line(node, n)
def visit_MatchSingleton(self, n: MatchSingleton) -> SingletonPattern:
node = SingletonPattern(n.value)
return self.set_line(node, n)
def visit_MatchSequence(self, n: MatchSequence) -> SequencePattern:
patterns = [self.visit(p) for p in n.patterns]
stars = [p for p in patterns if isinstance(p, StarredPattern)]
assert len(stars) < 2
node = SequencePattern(patterns)
return self.set_line(node, n)
def visit_MatchStar(self, n: MatchStar) -> StarredPattern:
if n.name is None:
node = StarredPattern(None)
else:
name = self.set_line(NameExpr(n.name), n)
node = StarredPattern(name)
return self.set_line(node, n)
def visit_MatchMapping(self, n: MatchMapping) -> MappingPattern:
keys = [self.visit(k) for k in n.keys]
values = [self.visit(v) for v in n.patterns]
if n.rest is None:
rest = None
else:
rest = NameExpr(n.rest)
node = MappingPattern(keys, values, rest)
return self.set_line(node, n)
def visit_MatchClass(self, n: MatchClass) -> ClassPattern:
class_ref = self.visit(n.cls)
assert isinstance(class_ref, RefExpr)
positionals = [self.visit(p) for p in n.patterns]
keyword_keys = n.kwd_attrs
keyword_values = [self.visit(p) for p in n.kwd_patterns]
node = ClassPattern(class_ref, positionals, keyword_keys, keyword_values)
return self.set_line(node, n)
# MatchAs(expr pattern, identifier name)
def visit_MatchAs(self, n: MatchAs) -> AsPattern:
if n.name is None:
name = None
else:
name = NameExpr(n.name)
name = self.set_line(name, n)
node = AsPattern(self.visit(n.pattern), name)
return self.set_line(node, n)
# MatchOr(expr* pattern)
def visit_MatchOr(self, n: MatchOr) -> OrPattern:
node = OrPattern([self.visit(pattern) for pattern in n.patterns])
return self.set_line(node, n)
def validate_type_alias(self, n: ast_TypeAlias) -> None:
incorrect_expr = find_disallowed_expression_in_annotation_scope(n.value)
if incorrect_expr is None:
return
if isinstance(incorrect_expr, (ast3.Yield, ast3.YieldFrom)):
self.fail(message_registry.TYPE_ALIAS_WITH_YIELD_EXPRESSION, n.lineno, n.col_offset)
if isinstance(incorrect_expr, ast3.NamedExpr):
self.fail(message_registry.TYPE_ALIAS_WITH_NAMED_EXPRESSION, n.lineno, n.col_offset)
if isinstance(incorrect_expr, ast3.Await):
self.fail(message_registry.TYPE_ALIAS_WITH_AWAIT_EXPRESSION, n.lineno, n.col_offset)
# TypeAlias(identifier name, type_param* type_params, expr value)
def visit_TypeAlias(self, n: ast_TypeAlias) -> TypeAliasStmt | AssignmentStmt:
node: TypeAliasStmt | AssignmentStmt
type_params = self.translate_type_params(n.type_params)
self.validate_type_alias(n)
value = self.visit(n.value)
# Since the value is evaluated lazily, wrap the value inside a lambda.
# This helps mypyc.
ret = ReturnStmt(value)
self.set_line(ret, n.value)
value_func = LambdaExpr(body=Block([ret]))
self.set_line(value_func, n.value)
node = TypeAliasStmt(self.visit_Name(n.name), type_params, value_func)
return self.set_line(node, n)
class TypeConverter:
def __init__(
self,
errors: Errors | None,
line: int = -1,
override_column: int = -1,
is_evaluated: bool = True,
) -> None:
self.errors = errors
self.line = line
self.override_column = override_column
self.node_stack: list[AST] = []
self.is_evaluated = is_evaluated
def convert_column(self, column: int) -> int:
"""Apply column override if defined; otherwise return column.
Column numbers are sometimes incorrect in the AST and the column
override can be used to work around that.
"""
if self.override_column < 0:
return column
else:
return self.override_column
def invalid_type(self, node: AST, note: str | None = None) -> RawExpressionType:
"""Constructs a type representing some expression that normally forms an invalid type.
For example, if we see a type hint that says "3 + 4", we would transform that
expression into a RawExpressionType.
The semantic analysis layer will report an "Invalid type" error when it
encounters this type, along with the given note if one is provided.
See RawExpressionType's docstring for more details on how it's used.
"""
return RawExpressionType(
None, "typing.Any", line=self.line, column=getattr(node, "col_offset", -1), note=note
)
@overload
def visit(self, node: ast3.expr) -> ProperType: ...
@overload
def visit(self, node: AST | None) -> ProperType | None: ...
def visit(self, node: AST | None) -> ProperType | None:
"""Modified visit -- keep track of the stack of nodes"""
if node is None:
return None
self.node_stack.append(node)
try:
method = "visit_" + node.__class__.__name__
visitor = getattr(self, method, None)
if visitor is not None:
typ = visitor(node)
assert isinstance(typ, ProperType)
return typ
else:
return self.invalid_type(node)
finally:
self.node_stack.pop()
def parent(self) -> AST | None:
"""Return the AST node above the one we are processing"""
if len(self.node_stack) < 2:
return None
return self.node_stack[-2]
def fail(self, msg: ErrorMessage, line: int, column: int) -> None:
if self.errors:
self.errors.report(line, column, msg.value, blocker=True, code=msg.code)
def note(self, msg: str, line: int, column: int) -> None:
if self.errors:
self.errors.report(line, column, msg, severity="note", code=codes.SYNTAX)
def translate_expr_list(self, l: Sequence[ast3.expr]) -> list[Type]:
return [self.visit(e) for e in l]
def visit_Call(self, e: Call) -> Type:
# Parse the arg constructor
f = e.func
constructor = stringify_name(f)
if not isinstance(self.parent(), ast3.List):
note = None
if constructor:
note = "Suggestion: use {0}[...] instead of {0}(...)".format(constructor)
return self.invalid_type(e, note=note)
if not constructor:
self.fail(message_registry.ARG_CONSTRUCTOR_NAME_EXPECTED, e.lineno, e.col_offset)
name: str | None = None
default_type = AnyType(TypeOfAny.special_form)
typ: Type = default_type
for i, arg in enumerate(e.args):
if i == 0:
converted = self.visit(arg)
assert converted is not None
typ = converted
elif i == 1:
name = self._extract_argument_name(arg)
else:
self.fail(message_registry.ARG_CONSTRUCTOR_TOO_MANY_ARGS, f.lineno, f.col_offset)
for k in e.keywords:
value = k.value
if k.arg == "name":
if name is not None:
self.fail(
message_registry.MULTIPLE_VALUES_FOR_NAME_KWARG.format(constructor),
f.lineno,
f.col_offset,
)
name = self._extract_argument_name(value)
elif k.arg == "type":
if typ is not default_type:
self.fail(
message_registry.MULTIPLE_VALUES_FOR_TYPE_KWARG.format(constructor),
f.lineno,
f.col_offset,
)
converted = self.visit(value)
assert converted is not None
typ = converted
else:
self.fail(
message_registry.ARG_CONSTRUCTOR_UNEXPECTED_ARG.format(k.arg),
value.lineno,
value.col_offset,
)
return CallableArgument(typ, name, constructor, e.lineno, e.col_offset)
def translate_argument_list(self, l: Sequence[ast3.expr]) -> TypeList:
return TypeList([self.visit(e) for e in l], line=self.line)
def _extract_argument_name(self, n: ast3.expr) -> str | None:
if isinstance(n, Constant) and isinstance(n.value, str):
return n.value.strip()
elif isinstance(n, Constant) and n.value is None:
return None
self.fail(
message_registry.ARG_NAME_EXPECTED_STRING_LITERAL.format(type(n).__name__),
self.line,
0,
)
return None
def visit_Name(self, n: Name) -> Type:
return UnboundType(n.id, line=self.line, column=self.convert_column(n.col_offset))
def visit_BinOp(self, n: ast3.BinOp) -> Type:
if not isinstance(n.op, ast3.BitOr):
return self.invalid_type(n)
left = self.visit(n.left)
right = self.visit(n.right)
return UnionType(
[left, right],
line=self.line,
column=self.convert_column(n.col_offset),
is_evaluated=self.is_evaluated,
uses_pep604_syntax=True,
)
def visit_Constant(self, n: Constant) -> Type:
val = n.value
if val is None:
# None is a type.
return UnboundType("None", line=self.line)
if isinstance(val, str):
# Parse forward reference.
return parse_type_string(val, "builtins.str", self.line, n.col_offset)
if val is Ellipsis:
# '...' is valid in some types.
return EllipsisType(line=self.line)
if isinstance(val, bool):
# Special case for True/False.
return RawExpressionType(val, "builtins.bool", line=self.line)
if isinstance(val, (int, float, complex)):
return self.numeric_type(val, n)
if isinstance(val, bytes):
contents = bytes_to_human_readable_repr(val)
return RawExpressionType(contents, "builtins.bytes", self.line, column=n.col_offset)
# Everything else is invalid.
return self.invalid_type(n)
# UnaryOp(op, operand)
def visit_UnaryOp(self, n: UnaryOp) -> Type:
# We support specifically Literal[-4], Literal[+4], and nothing else.
# For example, Literal[~6] or Literal[not False] is not supported.
typ = self.visit(n.operand)
if (
isinstance(typ, RawExpressionType)
# Use type() because we do not want to allow bools.
and type(typ.literal_value) is int # noqa: E721
):
if isinstance(n.op, USub):
typ.literal_value *= -1
return typ
if isinstance(n.op, UAdd):
return typ
return self.invalid_type(n)
def numeric_type(self, value: object, n: AST) -> Type:
# The node's field has the type complex, but complex isn't *really*
# a parent of int and float, and this causes isinstance below
# to think that the complex branch is always picked. Avoid
# this by throwing away the type.
if isinstance(value, int):
numeric_value: int | None = value
type_name = "builtins.int"
else:
# Other kinds of numbers (floats, complex) are not valid parameters for
# RawExpressionType so we just pass in 'None' for now. We'll report the
# appropriate error at a later stage.
numeric_value = None
type_name = f"builtins.{type(value).__name__}"
return RawExpressionType(
numeric_value, type_name, line=self.line, column=getattr(n, "col_offset", -1)
)
def visit_Index(self, n: ast3.Index) -> Type:
# cast for mypyc's benefit on Python 3.9
value = self.visit(cast(Any, n).value)
assert isinstance(value, Type)
return value
def visit_Slice(self, n: ast3.Slice) -> Type:
return self.invalid_type(n, note="did you mean to use ',' instead of ':' ?")
# Subscript(expr value, slice slice, expr_context ctx) # Python 3.8 and before
# Subscript(expr value, expr slice, expr_context ctx) # Python 3.9 and later
def visit_Subscript(self, n: ast3.Subscript) -> Type:
if sys.version_info >= (3, 9): # Really 3.9a5 or later
sliceval: Any = n.slice
# Python 3.8 or earlier use a different AST structure for subscripts
elif isinstance(n.slice, ast3.Index):
sliceval: Any = n.slice.value
elif isinstance(n.slice, ast3.Slice):
sliceval = copy.deepcopy(n.slice) # so we don't mutate passed AST
if getattr(sliceval, "col_offset", None) is None:
# Fix column information so that we get Python 3.9+ message order
sliceval.col_offset = sliceval.lower.col_offset
else:
assert isinstance(n.slice, ast3.ExtSlice)
dims = cast(List[ast3.expr], copy.deepcopy(n.slice.dims))
for s in dims:
# These fields don't actually have a col_offset attribute but we add
# it manually.
if getattr(s, "col_offset", None) is None:
if isinstance(s, ast3.Index):
s.col_offset = s.value.col_offset
elif isinstance(s, ast3.Slice):
assert s.lower is not None
s.col_offset = s.lower.col_offset
sliceval = ast3.Tuple(dims, n.ctx)
empty_tuple_index = False
if isinstance(sliceval, ast3.Tuple):
params = self.translate_expr_list(sliceval.elts)
if len(sliceval.elts) == 0:
empty_tuple_index = True
else:
params = [self.visit(sliceval)]
value = self.visit(n.value)
if isinstance(value, UnboundType) and not value.args:
return UnboundType(
value.name,
params,
line=self.line,
column=value.column,
empty_tuple_index=empty_tuple_index,
)
else:
return self.invalid_type(n)
def visit_Tuple(self, n: ast3.Tuple) -> Type:
return TupleType(
self.translate_expr_list(n.elts),
_dummy_fallback,
implicit=True,
line=self.line,
column=self.convert_column(n.col_offset),
)
def visit_Dict(self, n: ast3.Dict) -> Type:
if not n.keys:
return self.invalid_type(n)
items: dict[str, Type] = {}
extra_items_from = []
for item_name, value in zip(n.keys, n.values):
if not isinstance(item_name, ast3.Constant) or not isinstance(item_name.value, str):
if item_name is None:
extra_items_from.append(self.visit(value))
continue
return self.invalid_type(n)
items[item_name.value] = self.visit(value)
result = TypedDictType(items, set(), set(), _dummy_fallback, n.lineno, n.col_offset)
result.extra_items_from = extra_items_from
return result
# Attribute(expr value, identifier attr, expr_context ctx)
def visit_Attribute(self, n: Attribute) -> Type:
before_dot = self.visit(n.value)
if isinstance(before_dot, UnboundType) and not before_dot.args:
return UnboundType(f"{before_dot.name}.{n.attr}", line=self.line)
else:
return self.invalid_type(n)
# Used for Callable[[X *Ys, Z], R] etc.
def visit_Starred(self, n: ast3.Starred) -> Type:
return UnpackType(self.visit(n.value), from_star_syntax=True)
# List(expr* elts, expr_context ctx)
def visit_List(self, n: ast3.List) -> Type:
assert isinstance(n.ctx, ast3.Load)
result = self.translate_argument_list(n.elts)
return result
def stringify_name(n: AST) -> str | None:
if isinstance(n, Name):
return n.id
elif isinstance(n, Attribute):
sv = stringify_name(n.value)
if sv is not None:
return f"{sv}.{n.attr}"
return None # Can't do it.
class FindAttributeAssign(TraverserVisitor):
"""Check if an AST contains attribute assignments (e.g. self.x = 0)."""
def __init__(self) -> None:
self.lvalue = False
self.found = False
def visit_assignment_stmt(self, s: AssignmentStmt) -> None:
self.lvalue = True
for lv in s.lvalues:
lv.accept(self)
self.lvalue = False
def visit_with_stmt(self, s: WithStmt) -> None:
self.lvalue = True
for lv in s.target:
if lv is not None:
lv.accept(self)
self.lvalue = False
s.body.accept(self)
def visit_for_stmt(self, s: ForStmt) -> None:
self.lvalue = True
s.index.accept(self)
self.lvalue = False
s.body.accept(self)
if s.else_body:
s.else_body.accept(self)
def visit_expression_stmt(self, s: ExpressionStmt) -> None:
# No need to look inside these
pass
def visit_call_expr(self, e: CallExpr) -> None:
# No need to look inside these
pass
def visit_index_expr(self, e: IndexExpr) -> None:
# No need to look inside these
pass
def visit_member_expr(self, e: MemberExpr) -> None:
if self.lvalue:
self.found = True
class FindYield(TraverserVisitor):
"""Check if an AST contains yields or yield froms."""
def __init__(self) -> None:
self.found = False
def visit_yield_expr(self, e: YieldExpr) -> None:
self.found = True
def visit_yield_from_expr(self, e: YieldFromExpr) -> None:
self.found = True
def is_possible_trivial_body(s: list[Statement]) -> bool:
"""Could the statements form a "trivial" function body, such as 'pass'?
This mimics mypy.semanal.is_trivial_body, but this runs before
semantic analysis so some checks must be conservative.
"""
l = len(s)
if l == 0:
return False
i = 0
if isinstance(s[0], ExpressionStmt) and isinstance(s[0].expr, StrExpr):
# Skip docstring
i += 1
if i == l:
return True
if l > i + 1:
return False
stmt = s[i]
return isinstance(stmt, (PassStmt, RaiseStmt)) or (
isinstance(stmt, ExpressionStmt) and isinstance(stmt.expr, EllipsisExpr)
)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/fastparse.py
|
Python
|
NOASSERTION
| 87,516 |
"""Routines for finding the sources that mypy will check"""
from __future__ import annotations
import functools
import os
from typing import Final, Sequence
from mypy.fscache import FileSystemCache
from mypy.modulefinder import PYTHON_EXTENSIONS, BuildSource, matches_exclude, mypy_path
from mypy.options import Options
PY_EXTENSIONS: Final = tuple(PYTHON_EXTENSIONS)
class InvalidSourceList(Exception):
"""Exception indicating a problem in the list of sources given to mypy."""
def create_source_list(
paths: Sequence[str],
options: Options,
fscache: FileSystemCache | None = None,
allow_empty_dir: bool = False,
) -> list[BuildSource]:
"""From a list of source files/directories, makes a list of BuildSources.
Raises InvalidSourceList on errors.
"""
fscache = fscache or FileSystemCache()
finder = SourceFinder(fscache, options)
sources = []
for path in paths:
path = os.path.normpath(path)
if path.endswith(PY_EXTENSIONS):
# Can raise InvalidSourceList if a directory doesn't have a valid module name.
name, base_dir = finder.crawl_up(path)
sources.append(BuildSource(path, name, None, base_dir))
elif fscache.isdir(path):
sub_sources = finder.find_sources_in_dir(path)
if not sub_sources and not allow_empty_dir:
raise InvalidSourceList(f"There are no .py[i] files in directory '{path}'")
sources.extend(sub_sources)
else:
mod = os.path.basename(path) if options.scripts_are_modules else None
sources.append(BuildSource(path, mod, None))
return sources
def keyfunc(name: str) -> tuple[bool, int, str]:
"""Determines sort order for directory listing.
The desirable properties are:
1) foo < foo.pyi < foo.py
2) __init__.py[i] < foo
"""
base, suffix = os.path.splitext(name)
for i, ext in enumerate(PY_EXTENSIONS):
if suffix == ext:
return (base != "__init__", i, base)
return (base != "__init__", -1, name)
def normalise_package_base(root: str) -> str:
if not root:
root = os.curdir
root = os.path.abspath(root)
if root.endswith(os.sep):
root = root[:-1]
return root
def get_explicit_package_bases(options: Options) -> list[str] | None:
"""Returns explicit package bases to use if the option is enabled, or None if disabled.
We currently use MYPYPATH and the current directory as the package bases. In the future,
when --namespace-packages is the default could also use the values passed with the
--package-root flag, see #9632.
Values returned are normalised so we can use simple string comparisons in
SourceFinder.is_explicit_package_base
"""
if not options.explicit_package_bases:
return None
roots = mypy_path() + options.mypy_path + [os.getcwd()]
return [normalise_package_base(root) for root in roots]
class SourceFinder:
def __init__(self, fscache: FileSystemCache, options: Options) -> None:
self.fscache = fscache
self.explicit_package_bases = get_explicit_package_bases(options)
self.namespace_packages = options.namespace_packages
self.exclude = options.exclude
self.verbosity = options.verbosity
def is_explicit_package_base(self, path: str) -> bool:
assert self.explicit_package_bases
return normalise_package_base(path) in self.explicit_package_bases
def find_sources_in_dir(self, path: str) -> list[BuildSource]:
sources = []
seen: set[str] = set()
names = sorted(self.fscache.listdir(path), key=keyfunc)
for name in names:
# Skip certain names altogether
if name in ("__pycache__", "site-packages", "node_modules") or name.startswith("."):
continue
subpath = os.path.join(path, name)
if matches_exclude(subpath, self.exclude, self.fscache, self.verbosity >= 2):
continue
if self.fscache.isdir(subpath):
sub_sources = self.find_sources_in_dir(subpath)
if sub_sources:
seen.add(name)
sources.extend(sub_sources)
else:
stem, suffix = os.path.splitext(name)
if stem not in seen and suffix in PY_EXTENSIONS:
seen.add(stem)
module, base_dir = self.crawl_up(subpath)
sources.append(BuildSource(subpath, module, None, base_dir))
return sources
def crawl_up(self, path: str) -> tuple[str, str]:
"""Given a .py[i] filename, return module and base directory.
For example, given "xxx/yyy/foo/bar.py", we might return something like:
("foo.bar", "xxx/yyy")
If namespace packages is off, we crawl upwards until we find a directory without
an __init__.py
If namespace packages is on, we crawl upwards until the nearest explicit base directory.
Failing that, we return one past the highest directory containing an __init__.py
We won't crawl past directories with invalid package names.
The base directory returned is an absolute path.
"""
path = os.path.abspath(path)
parent, filename = os.path.split(path)
module_name = strip_py(filename) or filename
parent_module, base_dir = self.crawl_up_dir(parent)
if module_name == "__init__":
return parent_module, base_dir
# Note that module_name might not actually be a valid identifier, but that's okay
# Ignoring this possibility sidesteps some search path confusion
module = module_join(parent_module, module_name)
return module, base_dir
def crawl_up_dir(self, dir: str) -> tuple[str, str]:
return self._crawl_up_helper(dir) or ("", dir)
@functools.lru_cache # noqa: B019
def _crawl_up_helper(self, dir: str) -> tuple[str, str] | None:
"""Given a directory, maybe returns module and base directory.
We return a non-None value if we were able to find something clearly intended as a base
directory (as adjudicated by being an explicit base directory or by containing a package
with __init__.py).
This distinction is necessary for namespace packages, so that we know when to treat
ourselves as a subpackage.
"""
# stop crawling if we're an explicit base directory
if self.explicit_package_bases is not None and self.is_explicit_package_base(dir):
return "", dir
parent, name = os.path.split(dir)
if name.endswith("-stubs"):
name = name[:-6] # PEP-561 stub-only directory
# recurse if there's an __init__.py
init_file = self.get_init_file(dir)
if init_file is not None:
if not name.isidentifier():
# in most cases the directory name is invalid, we'll just stop crawling upwards
# but if there's an __init__.py in the directory, something is messed up
raise InvalidSourceList(f"{name} is not a valid Python package name")
# we're definitely a package, so we always return a non-None value
mod_prefix, base_dir = self.crawl_up_dir(parent)
return module_join(mod_prefix, name), base_dir
# stop crawling if we're out of path components or our name is an invalid identifier
if not name or not parent or not name.isidentifier():
return None
# stop crawling if namespace packages is off (since we don't have an __init__.py)
if not self.namespace_packages:
return None
# at this point: namespace packages is on, we don't have an __init__.py and we're not an
# explicit base directory
result = self._crawl_up_helper(parent)
if result is None:
# we're not an explicit base directory and we don't have an __init__.py
# and none of our parents are either, so return
return None
# one of our parents was an explicit base directory or had an __init__.py, so we're
# definitely a subpackage! chain our name to the module.
mod_prefix, base_dir = result
return module_join(mod_prefix, name), base_dir
def get_init_file(self, dir: str) -> str | None:
"""Check whether a directory contains a file named __init__.py[i].
If so, return the file's name (with dir prefixed). If not, return None.
This prefers .pyi over .py (because of the ordering of PY_EXTENSIONS).
"""
for ext in PY_EXTENSIONS:
f = os.path.join(dir, "__init__" + ext)
if self.fscache.isfile(f):
return f
if ext == ".py" and self.fscache.init_under_package_root(f):
return f
return None
def module_join(parent: str, child: str) -> str:
"""Join module ids, accounting for a possibly empty parent."""
if parent:
return parent + "." + child
return child
def strip_py(arg: str) -> str | None:
"""Strip a trailing .py or .pyi suffix.
Return None if no such suffix is found.
"""
for ext in PY_EXTENSIONS:
if arg.endswith(ext):
return arg[: -len(ext)]
return None
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/find_sources.py
|
Python
|
NOASSERTION
| 9,362 |
"""Fix up various things after deserialization."""
from __future__ import annotations
from typing import Any, Final
from mypy.lookup import lookup_fully_qualified
from mypy.nodes import (
Block,
ClassDef,
Decorator,
FuncDef,
MypyFile,
OverloadedFuncDef,
ParamSpecExpr,
SymbolTable,
TypeAlias,
TypeInfo,
TypeVarExpr,
TypeVarTupleExpr,
Var,
)
from mypy.types import (
NOT_READY,
AnyType,
CallableType,
Instance,
LiteralType,
Overloaded,
Parameters,
ParamSpecType,
TupleType,
TypeAliasType,
TypedDictType,
TypeOfAny,
TypeType,
TypeVarTupleType,
TypeVarType,
TypeVisitor,
UnboundType,
UnionType,
UnpackType,
)
from mypy.visitor import NodeVisitor
# N.B: we do a allow_missing fixup when fixing up a fine-grained
# incremental cache load (since there may be cross-refs into deleted
# modules)
def fixup_module(tree: MypyFile, modules: dict[str, MypyFile], allow_missing: bool) -> None:
node_fixer = NodeFixer(modules, allow_missing)
node_fixer.visit_symbol_table(tree.names, tree.fullname)
# TODO: Fix up .info when deserializing, i.e. much earlier.
class NodeFixer(NodeVisitor[None]):
current_info: TypeInfo | None = None
def __init__(self, modules: dict[str, MypyFile], allow_missing: bool) -> None:
self.modules = modules
self.allow_missing = allow_missing
self.type_fixer = TypeFixer(self.modules, allow_missing)
# NOTE: This method isn't (yet) part of the NodeVisitor API.
def visit_type_info(self, info: TypeInfo) -> None:
save_info = self.current_info
try:
self.current_info = info
if info.defn:
info.defn.accept(self)
if info.names:
self.visit_symbol_table(info.names, info.fullname)
if info.bases:
for base in info.bases:
base.accept(self.type_fixer)
if info._promote:
for p in info._promote:
p.accept(self.type_fixer)
if info.tuple_type:
info.tuple_type.accept(self.type_fixer)
info.update_tuple_type(info.tuple_type)
if info.special_alias:
info.special_alias.alias_tvars = list(info.defn.type_vars)
for i, t in enumerate(info.defn.type_vars):
if isinstance(t, TypeVarTupleType):
info.special_alias.tvar_tuple_index = i
if info.typeddict_type:
info.typeddict_type.accept(self.type_fixer)
info.update_typeddict_type(info.typeddict_type)
if info.special_alias:
info.special_alias.alias_tvars = list(info.defn.type_vars)
for i, t in enumerate(info.defn.type_vars):
if isinstance(t, TypeVarTupleType):
info.special_alias.tvar_tuple_index = i
if info.declared_metaclass:
info.declared_metaclass.accept(self.type_fixer)
if info.metaclass_type:
info.metaclass_type.accept(self.type_fixer)
if info.alt_promote:
info.alt_promote.accept(self.type_fixer)
instance = Instance(info, [])
# Hack: We may also need to add a backwards promotion (from int to native int),
# since it might not be serialized.
if instance not in info.alt_promote.type._promote:
info.alt_promote.type._promote.append(instance)
if info._mro_refs:
info.mro = [
lookup_fully_qualified_typeinfo(
self.modules, name, allow_missing=self.allow_missing
)
for name in info._mro_refs
]
info._mro_refs = None
finally:
self.current_info = save_info
# NOTE: This method *definitely* isn't part of the NodeVisitor API.
def visit_symbol_table(self, symtab: SymbolTable, table_fullname: str) -> None:
# Copy the items because we may mutate symtab.
for key, value in list(symtab.items()):
cross_ref = value.cross_ref
if cross_ref is not None: # Fix up cross-reference.
value.cross_ref = None
if cross_ref in self.modules:
value.node = self.modules[cross_ref]
else:
stnode = lookup_fully_qualified(
cross_ref, self.modules, raise_on_missing=not self.allow_missing
)
if stnode is not None:
if stnode is value:
# The node seems to refer to itself, which can mean that
# the target is a deleted submodule of the current module,
# and thus lookup falls back to the symbol table of the parent
# package. Here's how this may happen:
#
# pkg/__init__.py:
# from pkg import sub
#
# Now if pkg.sub is deleted, the pkg.sub symbol table entry
# appears to refer to itself. Replace the entry with a
# placeholder to avoid a crash. We can't delete the entry,
# as it would stop dependency propagation.
value.node = Var(key + "@deleted")
else:
assert stnode.node is not None, (table_fullname + "." + key, cross_ref)
value.node = stnode.node
elif not self.allow_missing:
assert False, f"Could not find cross-ref {cross_ref}"
else:
# We have a missing crossref in allow missing mode, need to put something
value.node = missing_info(self.modules)
else:
if isinstance(value.node, TypeInfo):
# TypeInfo has no accept(). TODO: Add it?
self.visit_type_info(value.node)
elif value.node is not None:
value.node.accept(self)
else:
assert False, f"Unexpected empty node {key!r}: {value}"
def visit_func_def(self, func: FuncDef) -> None:
if self.current_info is not None:
func.info = self.current_info
if func.type is not None:
func.type.accept(self.type_fixer)
def visit_overloaded_func_def(self, o: OverloadedFuncDef) -> None:
if self.current_info is not None:
o.info = self.current_info
if o.type:
o.type.accept(self.type_fixer)
for item in o.items:
item.accept(self)
if o.impl:
o.impl.accept(self)
def visit_decorator(self, d: Decorator) -> None:
if self.current_info is not None:
d.var.info = self.current_info
if d.func:
d.func.accept(self)
if d.var:
d.var.accept(self)
for node in d.decorators:
node.accept(self)
def visit_class_def(self, c: ClassDef) -> None:
for v in c.type_vars:
v.accept(self.type_fixer)
def visit_type_var_expr(self, tv: TypeVarExpr) -> None:
for value in tv.values:
value.accept(self.type_fixer)
tv.upper_bound.accept(self.type_fixer)
tv.default.accept(self.type_fixer)
def visit_paramspec_expr(self, p: ParamSpecExpr) -> None:
p.upper_bound.accept(self.type_fixer)
p.default.accept(self.type_fixer)
def visit_type_var_tuple_expr(self, tv: TypeVarTupleExpr) -> None:
tv.upper_bound.accept(self.type_fixer)
tv.tuple_fallback.accept(self.type_fixer)
tv.default.accept(self.type_fixer)
def visit_var(self, v: Var) -> None:
if self.current_info is not None:
v.info = self.current_info
if v.type is not None:
v.type.accept(self.type_fixer)
def visit_type_alias(self, a: TypeAlias) -> None:
a.target.accept(self.type_fixer)
for v in a.alias_tvars:
v.accept(self.type_fixer)
class TypeFixer(TypeVisitor[None]):
def __init__(self, modules: dict[str, MypyFile], allow_missing: bool) -> None:
self.modules = modules
self.allow_missing = allow_missing
def visit_instance(self, inst: Instance) -> None:
# TODO: Combine Instances that are exactly the same?
type_ref = inst.type_ref
if type_ref is None:
return # We've already been here.
inst.type_ref = None
inst.type = lookup_fully_qualified_typeinfo(
self.modules, type_ref, allow_missing=self.allow_missing
)
# TODO: Is this needed or redundant?
# Also fix up the bases, just in case.
for base in inst.type.bases:
if base.type is NOT_READY:
base.accept(self)
for a in inst.args:
a.accept(self)
if inst.last_known_value is not None:
inst.last_known_value.accept(self)
if inst.extra_attrs:
for v in inst.extra_attrs.attrs.values():
v.accept(self)
def visit_type_alias_type(self, t: TypeAliasType) -> None:
type_ref = t.type_ref
if type_ref is None:
return # We've already been here.
t.type_ref = None
t.alias = lookup_fully_qualified_alias(
self.modules, type_ref, allow_missing=self.allow_missing
)
for a in t.args:
a.accept(self)
def visit_any(self, o: Any) -> None:
pass # Nothing to descend into.
def visit_callable_type(self, ct: CallableType) -> None:
if ct.fallback:
ct.fallback.accept(self)
for argt in ct.arg_types:
# argt may be None, e.g. for __self in NamedTuple constructors.
if argt is not None:
argt.accept(self)
if ct.ret_type is not None:
ct.ret_type.accept(self)
for v in ct.variables:
v.accept(self)
for arg in ct.bound_args:
if arg:
arg.accept(self)
if ct.type_guard is not None:
ct.type_guard.accept(self)
if ct.type_is is not None:
ct.type_is.accept(self)
def visit_overloaded(self, t: Overloaded) -> None:
for ct in t.items:
ct.accept(self)
def visit_erased_type(self, o: Any) -> None:
# This type should exist only temporarily during type inference
raise RuntimeError("Shouldn't get here", o)
def visit_deleted_type(self, o: Any) -> None:
pass # Nothing to descend into.
def visit_none_type(self, o: Any) -> None:
pass # Nothing to descend into.
def visit_uninhabited_type(self, o: Any) -> None:
pass # Nothing to descend into.
def visit_partial_type(self, o: Any) -> None:
raise RuntimeError("Shouldn't get here", o)
def visit_tuple_type(self, tt: TupleType) -> None:
if tt.items:
for it in tt.items:
it.accept(self)
if tt.partial_fallback is not None:
tt.partial_fallback.accept(self)
def visit_typeddict_type(self, tdt: TypedDictType) -> None:
if tdt.items:
for it in tdt.items.values():
it.accept(self)
if tdt.fallback is not None:
if tdt.fallback.type_ref is not None:
if (
lookup_fully_qualified(
tdt.fallback.type_ref,
self.modules,
raise_on_missing=not self.allow_missing,
)
is None
):
# We reject fake TypeInfos for TypedDict fallbacks because
# the latter are used in type checking and must be valid.
tdt.fallback.type_ref = "typing._TypedDict"
tdt.fallback.accept(self)
def visit_literal_type(self, lt: LiteralType) -> None:
lt.fallback.accept(self)
def visit_type_var(self, tvt: TypeVarType) -> None:
if tvt.values:
for vt in tvt.values:
vt.accept(self)
tvt.upper_bound.accept(self)
tvt.default.accept(self)
def visit_param_spec(self, p: ParamSpecType) -> None:
p.upper_bound.accept(self)
p.default.accept(self)
def visit_type_var_tuple(self, t: TypeVarTupleType) -> None:
t.tuple_fallback.accept(self)
t.upper_bound.accept(self)
t.default.accept(self)
def visit_unpack_type(self, u: UnpackType) -> None:
u.type.accept(self)
def visit_parameters(self, p: Parameters) -> None:
for argt in p.arg_types:
if argt is not None:
argt.accept(self)
for var in p.variables:
var.accept(self)
def visit_unbound_type(self, o: UnboundType) -> None:
for a in o.args:
a.accept(self)
def visit_union_type(self, ut: UnionType) -> None:
if ut.items:
for it in ut.items:
it.accept(self)
def visit_type_type(self, t: TypeType) -> None:
t.item.accept(self)
def lookup_fully_qualified_typeinfo(
modules: dict[str, MypyFile], name: str, *, allow_missing: bool
) -> TypeInfo:
stnode = lookup_fully_qualified(name, modules, raise_on_missing=not allow_missing)
node = stnode.node if stnode else None
if isinstance(node, TypeInfo):
return node
else:
# Looks like a missing TypeInfo during an initial daemon load, put something there
assert (
allow_missing
), "Should never get here in normal mode, got {}:{} instead of TypeInfo".format(
type(node).__name__, node.fullname if node else ""
)
return missing_info(modules)
def lookup_fully_qualified_alias(
modules: dict[str, MypyFile], name: str, *, allow_missing: bool
) -> TypeAlias:
stnode = lookup_fully_qualified(name, modules, raise_on_missing=not allow_missing)
node = stnode.node if stnode else None
if isinstance(node, TypeAlias):
return node
elif isinstance(node, TypeInfo):
if node.special_alias:
# Already fixed up.
return node.special_alias
if node.tuple_type:
alias = TypeAlias.from_tuple_type(node)
elif node.typeddict_type:
alias = TypeAlias.from_typeddict_type(node)
else:
assert allow_missing
return missing_alias()
node.special_alias = alias
return alias
else:
# Looks like a missing TypeAlias during an initial daemon load, put something there
assert (
allow_missing
), "Should never get here in normal mode, got {}:{} instead of TypeAlias".format(
type(node).__name__, node.fullname if node else ""
)
return missing_alias()
_SUGGESTION: Final = "<missing {}: *should* have gone away during fine-grained update>"
def missing_info(modules: dict[str, MypyFile]) -> TypeInfo:
suggestion = _SUGGESTION.format("info")
dummy_def = ClassDef(suggestion, Block([]))
dummy_def.fullname = suggestion
info = TypeInfo(SymbolTable(), dummy_def, "<missing>")
obj_type = lookup_fully_qualified_typeinfo(modules, "builtins.object", allow_missing=False)
info.bases = [Instance(obj_type, [])]
info.mro = [info, obj_type]
return info
def missing_alias() -> TypeAlias:
suggestion = _SUGGESTION.format("alias")
return TypeAlias(AnyType(TypeOfAny.special_form), suggestion, line=-1, column=-1)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/fixup.py
|
Python
|
NOASSERTION
| 15,988 |
"""Generic node traverser visitor"""
from __future__ import annotations
from mypy.nodes import Block, MypyFile
from mypy.traverser import TraverserVisitor
class TreeFreer(TraverserVisitor):
def visit_block(self, block: Block) -> None:
super().visit_block(block)
block.body.clear()
def free_tree(tree: MypyFile) -> None:
"""Free all the ASTs associated with a module.
This needs to be done recursively, since symbol tables contain
references to definitions, so those won't be freed but we want their
contents to be.
"""
tree.accept(TreeFreer())
tree.defs.clear()
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/freetree.py
|
Python
|
NOASSERTION
| 617 |
"""Interface for accessing the file system with automatic caching.
The idea is to cache the results of any file system state reads during
a single transaction. This has two main benefits:
* This avoids redundant syscalls, as we won't perform the same OS
operations multiple times.
* This makes it easier to reason about concurrent FS updates, as different
operations targeting the same paths can't report different state during
a transaction.
Note that this only deals with reading state, not writing.
Properties maintained by the API:
* The contents of the file are always from the same or later time compared
to the reported mtime of the file, even if mtime is queried after reading
a file.
* Repeating an operation produces the same result as the first one during
a transaction.
* Call flush() to start a new transaction (flush the caches).
The API is a bit limited. It's easy to add new cached operations, however.
You should perform all file system reads through the API to actually take
advantage of the benefits.
"""
from __future__ import annotations
import os
import stat
from mypy_extensions import mypyc_attr
from mypy.util import hash_digest
@mypyc_attr(allow_interpreted_subclasses=True) # for tests
class FileSystemCache:
def __init__(self) -> None:
# The package root is not flushed with the caches.
# It is set by set_package_root() below.
self.package_root: list[str] = []
self.flush()
def set_package_root(self, package_root: list[str]) -> None:
self.package_root = package_root
def flush(self) -> None:
"""Start another transaction and empty all caches."""
self.stat_or_none_cache: dict[str, os.stat_result | None] = {}
self.listdir_cache: dict[str, list[str]] = {}
self.listdir_error_cache: dict[str, OSError] = {}
self.isfile_case_cache: dict[str, bool] = {}
self.exists_case_cache: dict[str, bool] = {}
self.read_cache: dict[str, bytes] = {}
self.read_error_cache: dict[str, Exception] = {}
self.hash_cache: dict[str, str] = {}
self.fake_package_cache: set[str] = set()
def stat_or_none(self, path: str) -> os.stat_result | None:
if path in self.stat_or_none_cache:
return self.stat_or_none_cache[path]
st = None
try:
st = os.stat(path)
except OSError:
if self.init_under_package_root(path):
try:
st = self._fake_init(path)
except OSError:
pass
self.stat_or_none_cache[path] = st
return st
def init_under_package_root(self, path: str) -> bool:
"""Is this path an __init__.py under a package root?
This is used to detect packages that don't contain __init__.py
files, which is needed to support Bazel. The function should
only be called for non-existing files.
It will return True if it refers to a __init__.py file that
Bazel would create, so that at runtime Python would think the
directory containing it is a package. For this to work you
must pass one or more package roots using the --package-root
flag.
As an exceptional case, any directory that is a package root
itself will not be considered to contain a __init__.py file.
This is different from the rules Bazel itself applies, but is
necessary for mypy to properly distinguish packages from other
directories.
See https://docs.bazel.build/versions/master/be/python.html,
where this behavior is described under legacy_create_init.
"""
if not self.package_root:
return False
dirname, basename = os.path.split(path)
if basename != "__init__.py":
return False
if not os.path.basename(dirname).isidentifier():
# Can't put an __init__.py in a place that's not an identifier
return False
st = self.stat_or_none(dirname)
if st is None:
return False
else:
if not stat.S_ISDIR(st.st_mode):
return False
ok = False
drive, path = os.path.splitdrive(path) # Ignore Windows drive name
if os.path.isabs(path):
path = os.path.relpath(path)
path = os.path.normpath(path)
for root in self.package_root:
if path.startswith(root):
if path == root + basename:
# A package root itself is never a package.
ok = False
break
else:
ok = True
return ok
def _fake_init(self, path: str) -> os.stat_result:
"""Prime the cache with a fake __init__.py file.
This makes code that looks for path believe an empty file by
that name exists. Should only be called after
init_under_package_root() returns True.
"""
dirname, basename = os.path.split(path)
assert basename == "__init__.py", path
assert not os.path.exists(path), path # Not cached!
dirname = os.path.normpath(dirname)
st = os.stat(dirname) # May raise OSError
# Get stat result as a list so we can modify it.
seq: list[float] = list(st)
seq[stat.ST_MODE] = stat.S_IFREG | 0o444
seq[stat.ST_INO] = 1
seq[stat.ST_NLINK] = 1
seq[stat.ST_SIZE] = 0
st = os.stat_result(seq)
# Make listdir() and read() also pretend this file exists.
self.fake_package_cache.add(dirname)
return st
def listdir(self, path: str) -> list[str]:
path = os.path.normpath(path)
if path in self.listdir_cache:
res = self.listdir_cache[path]
# Check the fake cache.
if path in self.fake_package_cache and "__init__.py" not in res:
res.append("__init__.py") # Updates the result as well as the cache
return res
if path in self.listdir_error_cache:
raise copy_os_error(self.listdir_error_cache[path])
try:
results = os.listdir(path)
except OSError as err:
# Like above, take a copy to reduce memory use.
self.listdir_error_cache[path] = copy_os_error(err)
raise err
self.listdir_cache[path] = results
# Check the fake cache.
if path in self.fake_package_cache and "__init__.py" not in results:
results.append("__init__.py")
return results
def isfile(self, path: str) -> bool:
st = self.stat_or_none(path)
if st is None:
return False
return stat.S_ISREG(st.st_mode)
def isfile_case(self, path: str, prefix: str) -> bool:
"""Return whether path exists and is a file.
On case-insensitive filesystems (like Mac or Windows) this returns
False if the case of path's last component does not exactly match
the case found in the filesystem.
We check also the case of other path components up to prefix.
For example, if path is 'user-stubs/pack/mod.pyi' and prefix is 'user-stubs',
we check that the case of 'pack' and 'mod.py' matches exactly, 'user-stubs' will be
case insensitive on case insensitive filesystems.
The caller must ensure that prefix is a valid file system prefix of path.
"""
if not self.isfile(path):
# Fast path
return False
if path in self.isfile_case_cache:
return self.isfile_case_cache[path]
head, tail = os.path.split(path)
if not tail:
self.isfile_case_cache[path] = False
return False
try:
names = self.listdir(head)
# This allows one to check file name case sensitively in
# case-insensitive filesystems.
res = tail in names
except OSError:
res = False
if res:
# Also recursively check the other path components in case sensitive way.
res = self.exists_case(head, prefix)
self.isfile_case_cache[path] = res
return res
def exists_case(self, path: str, prefix: str) -> bool:
"""Return whether path exists - checking path components in case sensitive
fashion, up to prefix.
"""
if path in self.exists_case_cache:
return self.exists_case_cache[path]
head, tail = os.path.split(path)
if not head.startswith(prefix) or not tail:
# Only perform the check for paths under prefix.
self.exists_case_cache[path] = True
return True
try:
names = self.listdir(head)
# This allows one to check file name case sensitively in
# case-insensitive filesystems.
res = tail in names
except OSError:
res = False
if res:
# Also recursively check other path components.
res = self.exists_case(head, prefix)
self.exists_case_cache[path] = res
return res
def isdir(self, path: str) -> bool:
st = self.stat_or_none(path)
if st is None:
return False
return stat.S_ISDIR(st.st_mode)
def exists(self, path: str) -> bool:
st = self.stat_or_none(path)
return st is not None
def read(self, path: str) -> bytes:
if path in self.read_cache:
return self.read_cache[path]
if path in self.read_error_cache:
raise self.read_error_cache[path]
# Need to stat first so that the contents of file are from no
# earlier instant than the mtime reported by self.stat().
self.stat_or_none(path)
dirname, basename = os.path.split(path)
dirname = os.path.normpath(dirname)
# Check the fake cache.
if basename == "__init__.py" and dirname in self.fake_package_cache:
data = b""
else:
try:
with open(path, "rb") as f:
data = f.read()
except OSError as err:
self.read_error_cache[path] = err
raise
self.read_cache[path] = data
self.hash_cache[path] = hash_digest(data)
return data
def hash_digest(self, path: str) -> str:
if path not in self.hash_cache:
self.read(path)
return self.hash_cache[path]
def samefile(self, f1: str, f2: str) -> bool:
s1 = self.stat_or_none(f1)
s2 = self.stat_or_none(f2)
if s1 is None or s2 is None:
return False
return os.path.samestat(s1, s2)
def copy_os_error(e: OSError) -> OSError:
new = OSError(*e.args)
new.errno = e.errno
new.strerror = e.strerror
new.filename = e.filename
if e.filename2:
new.filename2 = e.filename2
return new
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/fscache.py
|
Python
|
NOASSERTION
| 10,975 |
"""Watch parts of the file system for changes."""
from __future__ import annotations
import os
from typing import AbstractSet, Iterable, NamedTuple
from mypy.fscache import FileSystemCache
class FileData(NamedTuple):
st_mtime: float
st_size: int
hash: str
class FileSystemWatcher:
"""Watcher for file system changes among specific paths.
All file system access is performed using FileSystemCache. We
detect changed files by stat()ing them all and comparing hashes
of potentially changed files. If a file has both size and mtime
unmodified, the file is assumed to be unchanged.
An important goal of this class is to make it easier to eventually
use file system events to detect file changes.
Note: This class doesn't flush the file system cache. If you don't
manually flush it, changes won't be seen.
"""
# TODO: Watching directories?
# TODO: Handle non-files
def __init__(self, fs: FileSystemCache) -> None:
self.fs = fs
self._paths: set[str] = set()
self._file_data: dict[str, FileData | None] = {}
def dump_file_data(self) -> dict[str, tuple[float, int, str]]:
return {k: v for k, v in self._file_data.items() if v is not None}
def set_file_data(self, path: str, data: FileData) -> None:
self._file_data[path] = data
def add_watched_paths(self, paths: Iterable[str]) -> None:
for path in paths:
if path not in self._paths:
# By storing None this path will get reported as changed by
# find_changed if it exists.
self._file_data[path] = None
self._paths |= set(paths)
def remove_watched_paths(self, paths: Iterable[str]) -> None:
for path in paths:
if path in self._file_data:
del self._file_data[path]
self._paths -= set(paths)
def _update(self, path: str, st: os.stat_result) -> None:
hash_digest = self.fs.hash_digest(path)
self._file_data[path] = FileData(st.st_mtime, st.st_size, hash_digest)
def _find_changed(self, paths: Iterable[str]) -> AbstractSet[str]:
changed = set()
for path in paths:
old = self._file_data[path]
st = self.fs.stat_or_none(path)
if st is None:
if old is not None:
# File was deleted.
changed.add(path)
self._file_data[path] = None
else:
if old is None:
# File is new.
changed.add(path)
self._update(path, st)
# Round mtimes down, to match the mtimes we write to meta files
elif st.st_size != old.st_size or int(st.st_mtime) != int(old.st_mtime):
# Only look for changes if size or mtime has changed as an
# optimization, since calculating hash is expensive.
new_hash = self.fs.hash_digest(path)
self._update(path, st)
if st.st_size != old.st_size or new_hash != old.hash:
# Changed file.
changed.add(path)
return changed
def find_changed(self) -> AbstractSet[str]:
"""Return paths that have changes since the last call, in the watched set."""
return self._find_changed(self._paths)
def update_changed(self, remove: list[str], update: list[str]) -> AbstractSet[str]:
"""Alternative to find_changed() given explicit changes.
This only calls self.fs.stat() on added or updated files, not
on all files. It believes all other files are unchanged!
Implies add_watched_paths() for add and update, and
remove_watched_paths() for remove.
"""
self.remove_watched_paths(remove)
self.add_watched_paths(update)
return self._find_changed(update)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/fswatcher.py
|
Python
|
NOASSERTION
| 3,951 |
from __future__ import annotations
import gc
import time
from typing import Mapping
class GcLogger:
"""Context manager to log GC stats and overall time."""
def __enter__(self) -> GcLogger:
self.gc_start_time: float | None = None
self.gc_time = 0.0
self.gc_calls = 0
self.gc_collected = 0
self.gc_uncollectable = 0
gc.callbacks.append(self.gc_callback)
self.start_time = time.time()
return self
def gc_callback(self, phase: str, info: Mapping[str, int]) -> None:
if phase == "start":
assert self.gc_start_time is None, "Start phase out of sequence"
self.gc_start_time = time.time()
elif phase == "stop":
assert self.gc_start_time is not None, "Stop phase out of sequence"
self.gc_calls += 1
self.gc_time += time.time() - self.gc_start_time
self.gc_start_time = None
self.gc_collected += info["collected"]
self.gc_uncollectable += info["uncollectable"]
else:
assert False, f"Unrecognized gc phase ({phase!r})"
def __exit__(self, *args: object) -> None:
while self.gc_callback in gc.callbacks:
gc.callbacks.remove(self.gc_callback)
def get_stats(self) -> Mapping[str, float]:
end_time = time.time()
result = {}
result["gc_time"] = self.gc_time
result["gc_calls"] = self.gc_calls
result["gc_collected"] = self.gc_collected
result["gc_uncollectable"] = self.gc_uncollectable
result["build_time"] = end_time - self.start_time
return result
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/gclogger.py
|
Python
|
NOASSERTION
| 1,641 |
"""Git utilities."""
# Used also from setup.py, so don't pull in anything additional here (like mypy or typing):
from __future__ import annotations
import os
import subprocess
def is_git_repo(dir: str) -> bool:
"""Is the given directory version-controlled with git?"""
return os.path.exists(os.path.join(dir, ".git"))
def have_git() -> bool:
"""Can we run the git executable?"""
try:
subprocess.check_output(["git", "--help"])
return True
except subprocess.CalledProcessError:
return False
except OSError:
return False
def git_revision(dir: str) -> bytes:
"""Get the SHA-1 of the HEAD of a git repository."""
return subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=dir).strip()
def is_dirty(dir: str) -> bool:
"""Check whether a git repository has uncommitted changes."""
output = subprocess.check_output(["git", "status", "-uno", "--porcelain"], cwd=dir)
return output.strip() != b""
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/git.py
|
Python
|
NOASSERTION
| 980 |
"""Helpers for manipulations with graphs."""
from __future__ import annotations
from typing import AbstractSet, Iterable, Iterator, TypeVar
T = TypeVar("T")
def strongly_connected_components(
vertices: AbstractSet[T], edges: dict[T, list[T]]
) -> Iterator[set[T]]:
"""Compute Strongly Connected Components of a directed graph.
Args:
vertices: the labels for the vertices
edges: for each vertex, gives the target vertices of its outgoing edges
Returns:
An iterator yielding strongly connected components, each
represented as a set of vertices. Each input vertex will occur
exactly once; vertices not part of a SCC are returned as
singleton sets.
From https://code.activestate.com/recipes/578507/.
"""
identified: set[T] = set()
stack: list[T] = []
index: dict[T, int] = {}
boundaries: list[int] = []
def dfs(v: T) -> Iterator[set[T]]:
index[v] = len(stack)
stack.append(v)
boundaries.append(index[v])
for w in edges[v]:
if w not in index:
yield from dfs(w)
elif w not in identified:
while index[w] < boundaries[-1]:
boundaries.pop()
if boundaries[-1] == index[v]:
boundaries.pop()
scc = set(stack[index[v] :])
del stack[index[v] :]
identified.update(scc)
yield scc
for v in vertices:
if v not in index:
yield from dfs(v)
def prepare_sccs(
sccs: list[set[T]], edges: dict[T, list[T]]
) -> dict[AbstractSet[T], set[AbstractSet[T]]]:
"""Use original edges to organize SCCs in a graph by dependencies between them."""
sccsmap = {v: frozenset(scc) for scc in sccs for v in scc}
data: dict[AbstractSet[T], set[AbstractSet[T]]] = {}
for scc in sccs:
deps: set[AbstractSet[T]] = set()
for v in scc:
deps.update(sccsmap[x] for x in edges[v])
data[frozenset(scc)] = deps
return data
def topsort(data: dict[T, set[T]]) -> Iterable[set[T]]:
"""Topological sort.
Args:
data: A map from vertices to all vertices that it has an edge
connecting it to. NOTE: This data structure
is modified in place -- for normalization purposes,
self-dependencies are removed and entries representing
orphans are added.
Returns:
An iterator yielding sets of vertices that have an equivalent
ordering.
Example:
Suppose the input has the following structure:
{A: {B, C}, B: {D}, C: {D}}
This is normalized to:
{A: {B, C}, B: {D}, C: {D}, D: {}}
The algorithm will yield the following values:
{D}
{B, C}
{A}
From https://code.activestate.com/recipes/577413/.
"""
# TODO: Use a faster algorithm?
for k, v in data.items():
v.discard(k) # Ignore self dependencies.
for item in set.union(*data.values()) - set(data.keys()):
data[item] = set()
while True:
ready = {item for item, dep in data.items() if not dep}
if not ready:
break
yield ready
data = {item: (dep - ready) for item, dep in data.items() if item not in ready}
assert not data, f"A cyclic dependency exists amongst {data!r}"
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/graph_utils.py
|
Python
|
NOASSERTION
| 3,343 |
from __future__ import annotations
from typing import Iterable, Set
import mypy.types as types
from mypy.types import TypeVisitor
from mypy.util import split_module_names
def extract_module_names(type_name: str | None) -> list[str]:
"""Returns the module names of a fully qualified type name."""
if type_name is not None:
# Discard the first one, which is just the qualified name of the type
possible_module_names = split_module_names(type_name)
return possible_module_names[1:]
else:
return []
class TypeIndirectionVisitor(TypeVisitor[Set[str]]):
"""Returns all module references within a particular type."""
def __init__(self) -> None:
self.cache: dict[types.Type, set[str]] = {}
self.seen_aliases: set[types.TypeAliasType] = set()
def find_modules(self, typs: Iterable[types.Type]) -> set[str]:
self.seen_aliases.clear()
return self._visit(typs)
def _visit(self, typ_or_typs: types.Type | Iterable[types.Type]) -> set[str]:
typs = [typ_or_typs] if isinstance(typ_or_typs, types.Type) else typ_or_typs
output: set[str] = set()
for typ in typs:
if isinstance(typ, types.TypeAliasType):
# Avoid infinite recursion for recursive type aliases.
if typ in self.seen_aliases:
continue
self.seen_aliases.add(typ)
if typ in self.cache:
modules = self.cache[typ]
else:
modules = typ.accept(self)
self.cache[typ] = set(modules)
output.update(modules)
return output
def visit_unbound_type(self, t: types.UnboundType) -> set[str]:
return self._visit(t.args)
def visit_any(self, t: types.AnyType) -> set[str]:
return set()
def visit_none_type(self, t: types.NoneType) -> set[str]:
return set()
def visit_uninhabited_type(self, t: types.UninhabitedType) -> set[str]:
return set()
def visit_erased_type(self, t: types.ErasedType) -> set[str]:
return set()
def visit_deleted_type(self, t: types.DeletedType) -> set[str]:
return set()
def visit_type_var(self, t: types.TypeVarType) -> set[str]:
return self._visit(t.values) | self._visit(t.upper_bound) | self._visit(t.default)
def visit_param_spec(self, t: types.ParamSpecType) -> set[str]:
return self._visit(t.upper_bound) | self._visit(t.default)
def visit_type_var_tuple(self, t: types.TypeVarTupleType) -> set[str]:
return self._visit(t.upper_bound) | self._visit(t.default)
def visit_unpack_type(self, t: types.UnpackType) -> set[str]:
return t.type.accept(self)
def visit_parameters(self, t: types.Parameters) -> set[str]:
return self._visit(t.arg_types)
def visit_instance(self, t: types.Instance) -> set[str]:
out = self._visit(t.args)
if t.type:
# Uses of a class depend on everything in the MRO,
# as changes to classes in the MRO can add types to methods,
# change property types, change the MRO itself, etc.
for s in t.type.mro:
out.update(split_module_names(s.module_name))
if t.type.metaclass_type is not None:
out.update(split_module_names(t.type.metaclass_type.type.module_name))
return out
def visit_callable_type(self, t: types.CallableType) -> set[str]:
out = self._visit(t.arg_types) | self._visit(t.ret_type)
if t.definition is not None:
out.update(extract_module_names(t.definition.fullname))
return out
def visit_overloaded(self, t: types.Overloaded) -> set[str]:
return self._visit(t.items) | self._visit(t.fallback)
def visit_tuple_type(self, t: types.TupleType) -> set[str]:
return self._visit(t.items) | self._visit(t.partial_fallback)
def visit_typeddict_type(self, t: types.TypedDictType) -> set[str]:
return self._visit(t.items.values()) | self._visit(t.fallback)
def visit_literal_type(self, t: types.LiteralType) -> set[str]:
return self._visit(t.fallback)
def visit_union_type(self, t: types.UnionType) -> set[str]:
return self._visit(t.items)
def visit_partial_type(self, t: types.PartialType) -> set[str]:
return set()
def visit_type_type(self, t: types.TypeType) -> set[str]:
return self._visit(t.item)
def visit_type_alias_type(self, t: types.TypeAliasType) -> set[str]:
return self._visit(types.get_proper_type(t))
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/indirection.py
|
Python
|
NOASSERTION
| 4,595 |
"""Utilities for type argument inference."""
from __future__ import annotations
from typing import NamedTuple, Sequence
from mypy.constraints import (
SUBTYPE_OF,
SUPERTYPE_OF,
infer_constraints,
infer_constraints_for_callable,
)
from mypy.nodes import ArgKind
from mypy.solve import solve_constraints
from mypy.types import CallableType, Instance, Type, TypeVarLikeType
class ArgumentInferContext(NamedTuple):
"""Type argument inference context.
We need this because we pass around ``Mapping`` and ``Iterable`` types.
These types are only known by ``TypeChecker`` itself.
It is required for ``*`` and ``**`` argument inference.
https://github.com/python/mypy/issues/11144
"""
mapping_type: Instance
iterable_type: Instance
def infer_function_type_arguments(
callee_type: CallableType,
arg_types: Sequence[Type | None],
arg_kinds: list[ArgKind],
arg_names: Sequence[str | None] | None,
formal_to_actual: list[list[int]],
context: ArgumentInferContext,
strict: bool = True,
allow_polymorphic: bool = False,
) -> tuple[list[Type | None], list[TypeVarLikeType]]:
"""Infer the type arguments of a generic function.
Return an array of lower bound types for the type variables -1 (at
index 0), -2 (at index 1), etc. A lower bound is None if a value
could not be inferred.
Arguments:
callee_type: the target generic function
arg_types: argument types at the call site (each optional; if None,
we are not considering this argument in the current pass)
arg_kinds: nodes.ARG_* values for arg_types
formal_to_actual: mapping from formal to actual variable indices
"""
# Infer constraints.
constraints = infer_constraints_for_callable(
callee_type, arg_types, arg_kinds, arg_names, formal_to_actual, context
)
# Solve constraints.
type_vars = callee_type.variables
return solve_constraints(type_vars, constraints, strict, allow_polymorphic)
def infer_type_arguments(
type_vars: Sequence[TypeVarLikeType],
template: Type,
actual: Type,
is_supertype: bool = False,
skip_unsatisfied: bool = False,
) -> list[Type | None]:
# Like infer_function_type_arguments, but only match a single type
# against a generic type.
constraints = infer_constraints(template, actual, SUPERTYPE_OF if is_supertype else SUBTYPE_OF)
return solve_constraints(type_vars, constraints, skip_unsatisfied=skip_unsatisfied)[0]
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/infer.py
|
Python
|
NOASSERTION
| 2,511 |
from __future__ import annotations
import os
from collections import defaultdict
from functools import cmp_to_key
from typing import Callable
from mypy.build import State
from mypy.messages import format_type
from mypy.modulefinder import PYTHON_EXTENSIONS
from mypy.nodes import (
LDEF,
Decorator,
Expression,
FuncBase,
MemberExpr,
MypyFile,
Node,
OverloadedFuncDef,
RefExpr,
SymbolNode,
TypeInfo,
Var,
)
from mypy.server.update import FineGrainedBuildManager
from mypy.traverser import ExtendedTraverserVisitor
from mypy.typeops import tuple_fallback
from mypy.types import (
FunctionLike,
Instance,
LiteralType,
ProperType,
TupleType,
TypedDictType,
TypeVarType,
UnionType,
get_proper_type,
)
from mypy.typevars import fill_typevars_with_any
def node_starts_after(o: Node, line: int, column: int) -> bool:
return o.line > line or o.line == line and o.column > column
def node_ends_before(o: Node, line: int, column: int) -> bool:
# Unfortunately, end positions for some statements are a mess,
# e.g. overloaded functions, so we return False when we don't know.
if o.end_line is not None and o.end_column is not None:
if o.end_line < line or o.end_line == line and o.end_column < column:
return True
return False
def expr_span(expr: Expression) -> str:
"""Format expression span as in mypy error messages."""
return f"{expr.line}:{expr.column + 1}:{expr.end_line}:{expr.end_column}"
def get_instance_fallback(typ: ProperType) -> list[Instance]:
"""Returns the Instance fallback for this type if one exists or None."""
if isinstance(typ, Instance):
return [typ]
elif isinstance(typ, TupleType):
return [tuple_fallback(typ)]
elif isinstance(typ, TypedDictType):
return [typ.fallback]
elif isinstance(typ, FunctionLike):
return [typ.fallback]
elif isinstance(typ, LiteralType):
return [typ.fallback]
elif isinstance(typ, TypeVarType):
if typ.values:
res = []
for t in typ.values:
res.extend(get_instance_fallback(get_proper_type(t)))
return res
return get_instance_fallback(get_proper_type(typ.upper_bound))
elif isinstance(typ, UnionType):
res = []
for t in typ.items:
res.extend(get_instance_fallback(get_proper_type(t)))
return res
return []
def find_node(name: str, info: TypeInfo) -> Var | FuncBase | None:
"""Find the node defining member 'name' in given TypeInfo."""
# TODO: this code shares some logic with checkmember.py
method = info.get_method(name)
if method:
if isinstance(method, Decorator):
return method.var
if method.is_property:
assert isinstance(method, OverloadedFuncDef)
dec = method.items[0]
assert isinstance(dec, Decorator)
return dec.var
return method
else:
# don't have such method, maybe variable?
node = info.get(name)
v = node.node if node else None
if isinstance(v, Var):
return v
return None
def find_module_by_fullname(fullname: str, modules: dict[str, State]) -> State | None:
"""Find module by a node fullname.
This logic mimics the one we use in fixup, so should be good enough.
"""
head = fullname
# Special case: a module symbol is considered to be defined in itself, not in enclosing
# package, since this is what users want when clicking go to definition on a module.
if head in modules:
return modules[head]
while True:
if "." not in head:
return None
head, tail = head.rsplit(".", maxsplit=1)
mod = modules.get(head)
if mod is not None:
return mod
class SearchVisitor(ExtendedTraverserVisitor):
"""Visitor looking for an expression whose span matches given one exactly."""
def __init__(self, line: int, column: int, end_line: int, end_column: int) -> None:
self.line = line
self.column = column
self.end_line = end_line
self.end_column = end_column
self.result: Expression | None = None
def visit(self, o: Node) -> bool:
if node_starts_after(o, self.line, self.column):
return False
if node_ends_before(o, self.end_line, self.end_column):
return False
if (
o.line == self.line
and o.end_line == self.end_line
and o.column == self.column
and o.end_column == self.end_column
):
if isinstance(o, Expression):
self.result = o
return self.result is None
def find_by_location(
tree: MypyFile, line: int, column: int, end_line: int, end_column: int
) -> Expression | None:
"""Find an expression matching given span, or None if not found."""
if end_line < line:
raise ValueError('"end_line" must not be before "line"')
if end_line == line and end_column <= column:
raise ValueError('"end_column" must be after "column"')
visitor = SearchVisitor(line, column, end_line, end_column)
tree.accept(visitor)
return visitor.result
class SearchAllVisitor(ExtendedTraverserVisitor):
"""Visitor looking for all expressions whose spans enclose given position."""
def __init__(self, line: int, column: int) -> None:
self.line = line
self.column = column
self.result: list[Expression] = []
def visit(self, o: Node) -> bool:
if node_starts_after(o, self.line, self.column):
return False
if node_ends_before(o, self.line, self.column):
return False
if isinstance(o, Expression):
self.result.append(o)
return True
def find_all_by_location(tree: MypyFile, line: int, column: int) -> list[Expression]:
"""Find all expressions enclosing given position starting from innermost."""
visitor = SearchAllVisitor(line, column)
tree.accept(visitor)
return list(reversed(visitor.result))
class InspectionEngine:
"""Engine for locating and statically inspecting expressions."""
def __init__(
self,
fg_manager: FineGrainedBuildManager,
*,
verbosity: int = 0,
limit: int = 0,
include_span: bool = False,
include_kind: bool = False,
include_object_attrs: bool = False,
union_attrs: bool = False,
force_reload: bool = False,
) -> None:
self.fg_manager = fg_manager
self.verbosity = verbosity
self.limit = limit
self.include_span = include_span
self.include_kind = include_kind
self.include_object_attrs = include_object_attrs
self.union_attrs = union_attrs
self.force_reload = force_reload
# Module for which inspection was requested.
self.module: State | None = None
def reload_module(self, state: State) -> None:
"""Reload given module while temporary exporting types."""
old = self.fg_manager.manager.options.export_types
self.fg_manager.manager.options.export_types = True
try:
self.fg_manager.flush_cache()
assert state.path is not None
self.fg_manager.update([(state.id, state.path)], [])
finally:
self.fg_manager.manager.options.export_types = old
def expr_type(self, expression: Expression) -> tuple[str, bool]:
"""Format type for an expression using current options.
If type is known, second item returned is True. If type is not known, an error
message is returned instead, and second item returned is False.
"""
expr_type = self.fg_manager.manager.all_types.get(expression)
if expr_type is None:
return self.missing_type(expression), False
type_str = format_type(
expr_type, self.fg_manager.manager.options, verbosity=self.verbosity
)
return self.add_prefixes(type_str, expression), True
def object_type(self) -> Instance:
builtins = self.fg_manager.graph["builtins"].tree
assert builtins is not None
object_node = builtins.names["object"].node
assert isinstance(object_node, TypeInfo)
return Instance(object_node, [])
def collect_attrs(self, instances: list[Instance]) -> dict[TypeInfo, list[str]]:
"""Collect attributes from all union/typevar variants."""
def item_attrs(attr_dict: dict[TypeInfo, list[str]]) -> set[str]:
attrs = set()
for base in attr_dict:
attrs |= set(attr_dict[base])
return attrs
def cmp_types(x: TypeInfo, y: TypeInfo) -> int:
if x in y.mro:
return 1
if y in x.mro:
return -1
return 0
# First gather all attributes for every union variant.
assert instances
all_attrs = []
for instance in instances:
attrs = {}
mro = instance.type.mro
if not self.include_object_attrs:
mro = mro[:-1]
for base in mro:
attrs[base] = sorted(base.names)
all_attrs.append(attrs)
# Find attributes valid for all variants in a union or type variable.
intersection = item_attrs(all_attrs[0])
for item in all_attrs[1:]:
intersection &= item_attrs(item)
# Combine attributes from all variants into a single dict while
# also removing invalid attributes (unless using --union-attrs).
combined_attrs = defaultdict(list)
for item in all_attrs:
for base in item:
if base in combined_attrs:
continue
for name in item[base]:
if self.union_attrs or name in intersection:
combined_attrs[base].append(name)
# Sort bases by MRO, unrelated will appear in the order they appeared as union variants.
sorted_bases = sorted(combined_attrs.keys(), key=cmp_to_key(cmp_types))
result = {}
for base in sorted_bases:
if not combined_attrs[base]:
# Skip bases where everytihng was filtered out.
continue
result[base] = combined_attrs[base]
return result
def _fill_from_dict(
self, attrs_strs: list[str], attrs_dict: dict[TypeInfo, list[str]]
) -> None:
for base in attrs_dict:
cls_name = base.name if self.verbosity < 1 else base.fullname
attrs = [f'"{attr}"' for attr in attrs_dict[base]]
attrs_strs.append(f'"{cls_name}": [{", ".join(attrs)}]')
def expr_attrs(self, expression: Expression) -> tuple[str, bool]:
"""Format attributes that are valid for a given expression.
If expression type is not an Instance, try using fallback. Attributes are
returned as a JSON (ordered by MRO) that maps base class name to list of
attributes. Attributes may appear in multiple bases if overridden (we simply
follow usual mypy logic for creating new Vars etc).
"""
expr_type = self.fg_manager.manager.all_types.get(expression)
if expr_type is None:
return self.missing_type(expression), False
expr_type = get_proper_type(expr_type)
instances = get_instance_fallback(expr_type)
if not instances:
# Everything is an object in Python.
instances = [self.object_type()]
attrs_dict = self.collect_attrs(instances)
# Special case: modules have names apart from those from ModuleType.
if isinstance(expression, RefExpr) and isinstance(expression.node, MypyFile):
node = expression.node
names = sorted(node.names)
if "__builtins__" in names:
# This is just to make tests stable. No one will really need ths name.
names.remove("__builtins__")
mod_dict = {f'"<{node.fullname}>"': [f'"{name}"' for name in names]}
else:
mod_dict = {}
# Special case: for class callables, prepend with the class attributes.
# TODO: also handle cases when such callable appears in a union.
if isinstance(expr_type, FunctionLike) and expr_type.is_type_obj():
template = fill_typevars_with_any(expr_type.type_object())
class_dict = self.collect_attrs(get_instance_fallback(template))
else:
class_dict = {}
# We don't use JSON dump to be sure keys order is always preserved.
base_attrs = []
if mod_dict:
for mod in mod_dict:
base_attrs.append(f'{mod}: [{", ".join(mod_dict[mod])}]')
self._fill_from_dict(base_attrs, class_dict)
self._fill_from_dict(base_attrs, attrs_dict)
return self.add_prefixes(f'{{{", ".join(base_attrs)}}}', expression), True
def format_node(self, module: State, node: FuncBase | SymbolNode) -> str:
return f"{module.path}:{node.line}:{node.column + 1}:{node.name}"
def collect_nodes(self, expression: RefExpr) -> list[FuncBase | SymbolNode]:
"""Collect nodes that can be referred to by an expression.
Note: it can be more than one for example in case of a union attribute.
"""
node: FuncBase | SymbolNode | None = expression.node
nodes: list[FuncBase | SymbolNode]
if node is None:
# Tricky case: instance attribute
if isinstance(expression, MemberExpr) and expression.kind is None:
base_type = self.fg_manager.manager.all_types.get(expression.expr)
if base_type is None:
return []
# Now we use the base type to figure out where the attribute is defined.
base_type = get_proper_type(base_type)
instances = get_instance_fallback(base_type)
nodes = []
for instance in instances:
node = find_node(expression.name, instance.type)
if node:
nodes.append(node)
if not nodes:
# Try checking class namespace if attribute is on a class object.
if isinstance(base_type, FunctionLike) and base_type.is_type_obj():
instances = get_instance_fallback(
fill_typevars_with_any(base_type.type_object())
)
for instance in instances:
node = find_node(expression.name, instance.type)
if node:
nodes.append(node)
else:
# Still no luck, give up.
return []
else:
return []
else:
# Easy case: a module-level definition
nodes = [node]
return nodes
def modules_for_nodes(
self, nodes: list[FuncBase | SymbolNode], expression: RefExpr
) -> tuple[dict[FuncBase | SymbolNode, State], bool]:
"""Gather modules where given nodes where defined.
Also check if they need to be refreshed (cached nodes may have
lines/columns missing).
"""
modules = {}
reload_needed = False
for node in nodes:
module = find_module_by_fullname(node.fullname, self.fg_manager.graph)
if not module:
if expression.kind == LDEF and self.module:
module = self.module
else:
continue
modules[node] = module
if not module.tree or module.tree.is_cache_skeleton or self.force_reload:
reload_needed |= not module.tree or module.tree.is_cache_skeleton
self.reload_module(module)
return modules, reload_needed
def expression_def(self, expression: Expression) -> tuple[str, bool]:
"""Find and format definition location for an expression.
If it is not a RefExpr, it is effectively skipped by returning an
empty result.
"""
if not isinstance(expression, RefExpr):
# If there are no suitable matches at all, we return error later.
return "", True
nodes = self.collect_nodes(expression)
if not nodes:
return self.missing_node(expression), False
modules, reload_needed = self.modules_for_nodes(nodes, expression)
if reload_needed:
# TODO: line/column are not stored in cache for vast majority of symbol nodes.
# Adding them will make thing faster, but will have visible memory impact.
nodes = self.collect_nodes(expression)
modules, reload_needed = self.modules_for_nodes(nodes, expression)
assert not reload_needed
result = []
for node in modules:
result.append(self.format_node(modules[node], node))
if not result:
return self.missing_node(expression), False
return self.add_prefixes(", ".join(result), expression), True
def missing_type(self, expression: Expression) -> str:
alt_suggestion = ""
if not self.force_reload:
alt_suggestion = " or try --force-reload"
return (
f'No known type available for "{type(expression).__name__}"'
f" (maybe unreachable{alt_suggestion})"
)
def missing_node(self, expression: Expression) -> str:
return (
f'Cannot find definition for "{type(expression).__name__}"'
f" at {expr_span(expression)}"
)
def add_prefixes(self, result: str, expression: Expression) -> str:
prefixes = []
if self.include_kind:
prefixes.append(f"{type(expression).__name__}")
if self.include_span:
prefixes.append(expr_span(expression))
if prefixes:
prefix = ":".join(prefixes) + " -> "
else:
prefix = ""
return prefix + result
def run_inspection_by_exact_location(
self,
tree: MypyFile,
line: int,
column: int,
end_line: int,
end_column: int,
method: Callable[[Expression], tuple[str, bool]],
) -> dict[str, object]:
"""Get type of an expression matching a span.
Type or error is returned as a standard daemon response dict.
"""
try:
expression = find_by_location(tree, line, column - 1, end_line, end_column)
except ValueError as err:
return {"error": str(err)}
if expression is None:
span = f"{line}:{column}:{end_line}:{end_column}"
return {"out": f"Can't find expression at span {span}", "err": "", "status": 1}
inspection_str, success = method(expression)
return {"out": inspection_str, "err": "", "status": 0 if success else 1}
def run_inspection_by_position(
self,
tree: MypyFile,
line: int,
column: int,
method: Callable[[Expression], tuple[str, bool]],
) -> dict[str, object]:
"""Get types of all expressions enclosing a position.
Types and/or errors are returned as a standard daemon response dict.
"""
expressions = find_all_by_location(tree, line, column - 1)
if not expressions:
position = f"{line}:{column}"
return {
"out": f"Can't find any expressions at position {position}",
"err": "",
"status": 1,
}
inspection_strs = []
status = 0
for expression in expressions:
inspection_str, success = method(expression)
if not success:
status = 1
if inspection_str:
inspection_strs.append(inspection_str)
if self.limit:
inspection_strs = inspection_strs[: self.limit]
return {"out": "\n".join(inspection_strs), "err": "", "status": status}
def find_module(self, file: str) -> tuple[State | None, dict[str, object]]:
"""Find module by path, or return a suitable error message.
Note we don't use exceptions to simplify handling 1 vs 2 statuses.
"""
if not any(file.endswith(ext) for ext in PYTHON_EXTENSIONS):
return None, {"error": "Source file is not a Python file"}
# We are using a bit slower but robust way to find a module by path,
# to be sure that namespace packages are handled properly.
abs_path = os.path.abspath(file)
state = next((s for s in self.fg_manager.graph.values() if s.abspath == abs_path), None)
self.module = state
return (
state,
{"out": f"Unknown module: {file}", "err": "", "status": 1} if state is None else {},
)
def run_inspection(
self, location: str, method: Callable[[Expression], tuple[str, bool]]
) -> dict[str, object]:
"""Top-level logic to inspect expression(s) at a location.
This can be re-used by various simple inspections.
"""
try:
file, pos = parse_location(location)
except ValueError as err:
return {"error": str(err)}
state, err_dict = self.find_module(file)
if state is None:
assert err_dict
return err_dict
# Force reloading to load from cache, account for any edits, etc.
if not state.tree or state.tree.is_cache_skeleton or self.force_reload:
self.reload_module(state)
assert state.tree is not None
if len(pos) == 4:
# Full span, return an exact match only.
line, column, end_line, end_column = pos
return self.run_inspection_by_exact_location(
state.tree, line, column, end_line, end_column, method
)
assert len(pos) == 2
# Inexact location, return all expressions.
line, column = pos
return self.run_inspection_by_position(state.tree, line, column, method)
def get_type(self, location: str) -> dict[str, object]:
"""Get types of expression(s) at a location."""
return self.run_inspection(location, self.expr_type)
def get_attrs(self, location: str) -> dict[str, object]:
"""Get attributes of expression(s) at a location."""
return self.run_inspection(location, self.expr_attrs)
def get_definition(self, location: str) -> dict[str, object]:
"""Get symbol definitions of expression(s) at a location."""
result = self.run_inspection(location, self.expression_def)
if "out" in result and not result["out"]:
# None of the expressions found turns out to be a RefExpr.
_, location = location.split(":", maxsplit=1)
result["out"] = f"No name or member expressions at {location}"
result["status"] = 1
return result
def parse_location(location: str) -> tuple[str, list[int]]:
if location.count(":") < 2:
raise ValueError("Format should be file:line:column[:end_line:end_column]")
parts = location.rsplit(":", maxsplit=2)
start, *rest = parts
# Note: we must allow drive prefix like `C:` on Windows.
if start.count(":") < 2:
return start, [int(p) for p in rest]
parts = start.rsplit(":", maxsplit=2)
start, *start_rest = parts
if start.count(":") < 2:
return start, [int(p) for p in start_rest + rest]
raise ValueError("Format should be file:line:column[:end_line:end_column]")
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/inspections.py
|
Python
|
NOASSERTION
| 23,820 |
"""Cross platform abstractions for inter-process communication
On Unix, this uses AF_UNIX sockets.
On Windows, this uses NamedPipes.
"""
from __future__ import annotations
import base64
import codecs
import os
import shutil
import sys
import tempfile
from types import TracebackType
from typing import Callable, Final
if sys.platform == "win32":
# This may be private, but it is needed for IPC on Windows, and is basically stable
import ctypes
import _winapi
_IPCHandle = int
kernel32 = ctypes.windll.kernel32
DisconnectNamedPipe: Callable[[_IPCHandle], int] = kernel32.DisconnectNamedPipe
FlushFileBuffers: Callable[[_IPCHandle], int] = kernel32.FlushFileBuffers
else:
import socket
_IPCHandle = socket.socket
class IPCException(Exception):
"""Exception for IPC issues."""
class IPCBase:
"""Base class for communication between the dmypy client and server.
This contains logic shared between the client and server, such as reading
and writing.
We want to be able to send multiple "messages" over a single connection and
to be able to separate the messages. We do this by encoding the messages
in an alphabet that does not contain spaces, then adding a space for
separation. The last framed message is also followed by a space.
"""
connection: _IPCHandle
def __init__(self, name: str, timeout: float | None) -> None:
self.name = name
self.timeout = timeout
self.buffer = bytearray()
def frame_from_buffer(self) -> bytearray | None:
"""Return a full frame from the bytes we have in the buffer."""
space_pos = self.buffer.find(b" ")
if space_pos == -1:
return None
# We have a full frame
bdata = self.buffer[:space_pos]
self.buffer = self.buffer[space_pos + 1 :]
return bdata
def read(self, size: int = 100000) -> str:
"""Read bytes from an IPC connection until we have a full frame."""
bdata: bytearray | None = bytearray()
if sys.platform == "win32":
while True:
# Check if we already have a message in the buffer before
# receiving any more data from the socket.
bdata = self.frame_from_buffer()
if bdata is not None:
break
# Receive more data into the buffer.
ov, err = _winapi.ReadFile(self.connection, size, overlapped=True)
try:
if err == _winapi.ERROR_IO_PENDING:
timeout = int(self.timeout * 1000) if self.timeout else _winapi.INFINITE
res = _winapi.WaitForSingleObject(ov.event, timeout)
if res != _winapi.WAIT_OBJECT_0:
raise IPCException(f"Bad result from I/O wait: {res}")
except BaseException:
ov.cancel()
raise
_, err = ov.GetOverlappedResult(True)
more = ov.getbuffer()
if more:
self.buffer.extend(more)
bdata = self.frame_from_buffer()
if bdata is not None:
break
if err == 0:
# we are done!
break
elif err == _winapi.ERROR_MORE_DATA:
# read again
continue
elif err == _winapi.ERROR_OPERATION_ABORTED:
raise IPCException("ReadFile operation aborted.")
else:
while True:
# Check if we already have a message in the buffer before
# receiving any more data from the socket.
bdata = self.frame_from_buffer()
if bdata is not None:
break
# Receive more data into the buffer.
more = self.connection.recv(size)
if not more:
# Connection closed
break
self.buffer.extend(more)
if not bdata:
# Socket was empty and we didn't get any frame.
# This should only happen if the socket was closed.
return ""
return codecs.decode(bdata, "base64").decode("utf8")
def write(self, data: str) -> None:
"""Write to an IPC connection."""
# Frame the data by urlencoding it and separating by space.
encoded_data = codecs.encode(data.encode("utf8"), "base64") + b" "
if sys.platform == "win32":
try:
ov, err = _winapi.WriteFile(self.connection, encoded_data, overlapped=True)
try:
if err == _winapi.ERROR_IO_PENDING:
timeout = int(self.timeout * 1000) if self.timeout else _winapi.INFINITE
res = _winapi.WaitForSingleObject(ov.event, timeout)
if res != _winapi.WAIT_OBJECT_0:
raise IPCException(f"Bad result from I/O wait: {res}")
elif err != 0:
raise IPCException(f"Failed writing to pipe with error: {err}")
except BaseException:
ov.cancel()
raise
bytes_written, err = ov.GetOverlappedResult(True)
assert err == 0, err
assert bytes_written == len(encoded_data)
except OSError as e:
raise IPCException(f"Failed to write with error: {e.winerror}") from e
else:
self.connection.sendall(encoded_data)
def close(self) -> None:
if sys.platform == "win32":
if self.connection != _winapi.NULL:
_winapi.CloseHandle(self.connection)
else:
self.connection.close()
class IPCClient(IPCBase):
"""The client side of an IPC connection."""
def __init__(self, name: str, timeout: float | None) -> None:
super().__init__(name, timeout)
if sys.platform == "win32":
timeout = int(self.timeout * 1000) if self.timeout else _winapi.NMPWAIT_WAIT_FOREVER
try:
_winapi.WaitNamedPipe(self.name, timeout)
except FileNotFoundError as e:
raise IPCException(f"The NamedPipe at {self.name} was not found.") from e
except OSError as e:
if e.winerror == _winapi.ERROR_SEM_TIMEOUT:
raise IPCException("Timed out waiting for connection.") from e
else:
raise
try:
self.connection = _winapi.CreateFile(
self.name,
_winapi.GENERIC_READ | _winapi.GENERIC_WRITE,
0,
_winapi.NULL,
_winapi.OPEN_EXISTING,
_winapi.FILE_FLAG_OVERLAPPED,
_winapi.NULL,
)
except OSError as e:
if e.winerror == _winapi.ERROR_PIPE_BUSY:
raise IPCException("The connection is busy.") from e
else:
raise
_winapi.SetNamedPipeHandleState(
self.connection, _winapi.PIPE_READMODE_MESSAGE, None, None
)
else:
self.connection = socket.socket(socket.AF_UNIX)
self.connection.settimeout(timeout)
self.connection.connect(name)
def __enter__(self) -> IPCClient:
return self
def __exit__(
self,
exc_ty: type[BaseException] | None = None,
exc_val: BaseException | None = None,
exc_tb: TracebackType | None = None,
) -> None:
self.close()
class IPCServer(IPCBase):
BUFFER_SIZE: Final = 2**16
def __init__(self, name: str, timeout: float | None = None) -> None:
if sys.platform == "win32":
name = r"\\.\pipe\{}-{}.pipe".format(
name, base64.urlsafe_b64encode(os.urandom(6)).decode()
)
else:
name = f"{name}.sock"
super().__init__(name, timeout)
if sys.platform == "win32":
self.connection = _winapi.CreateNamedPipe(
self.name,
_winapi.PIPE_ACCESS_DUPLEX
| _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
| _winapi.FILE_FLAG_OVERLAPPED,
_winapi.PIPE_READMODE_MESSAGE
| _winapi.PIPE_TYPE_MESSAGE
| _winapi.PIPE_WAIT
| 0x8, # PIPE_REJECT_REMOTE_CLIENTS
1, # one instance
self.BUFFER_SIZE,
self.BUFFER_SIZE,
_winapi.NMPWAIT_WAIT_FOREVER,
0, # Use default security descriptor
)
if self.connection == -1: # INVALID_HANDLE_VALUE
err = _winapi.GetLastError()
raise IPCException(f"Invalid handle to pipe: {err}")
else:
self.sock_directory = tempfile.mkdtemp()
sockfile = os.path.join(self.sock_directory, self.name)
self.sock = socket.socket(socket.AF_UNIX)
self.sock.bind(sockfile)
self.sock.listen(1)
if timeout is not None:
self.sock.settimeout(timeout)
def __enter__(self) -> IPCServer:
if sys.platform == "win32":
# NOTE: It is theoretically possible that this will hang forever if the
# client never connects, though this can be "solved" by killing the server
try:
ov = _winapi.ConnectNamedPipe(self.connection, overlapped=True)
except OSError as e:
# Don't raise if the client already exists, or the client already connected
if e.winerror not in (_winapi.ERROR_PIPE_CONNECTED, _winapi.ERROR_NO_DATA):
raise
else:
try:
timeout = int(self.timeout * 1000) if self.timeout else _winapi.INFINITE
res = _winapi.WaitForSingleObject(ov.event, timeout)
assert res == _winapi.WAIT_OBJECT_0
except BaseException:
ov.cancel()
_winapi.CloseHandle(self.connection)
raise
_, err = ov.GetOverlappedResult(True)
assert err == 0
else:
try:
self.connection, _ = self.sock.accept()
except socket.timeout as e:
raise IPCException("The socket timed out") from e
return self
def __exit__(
self,
exc_ty: type[BaseException] | None = None,
exc_val: BaseException | None = None,
exc_tb: TracebackType | None = None,
) -> None:
if sys.platform == "win32":
try:
# Wait for the client to finish reading the last write before disconnecting
if not FlushFileBuffers(self.connection):
raise IPCException(
"Failed to flush NamedPipe buffer, maybe the client hung up?"
)
finally:
DisconnectNamedPipe(self.connection)
else:
self.close()
def cleanup(self) -> None:
if sys.platform == "win32":
self.close()
else:
shutil.rmtree(self.sock_directory)
@property
def connection_name(self) -> str:
if sys.platform == "win32":
return self.name
else:
name = self.sock.getsockname()
assert isinstance(name, str)
return name
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/ipc.py
|
Python
|
NOASSERTION
| 11,702 |
"""Calculation of the least upper bound types (joins)."""
from __future__ import annotations
from typing import Sequence, overload
import mypy.typeops
from mypy.expandtype import expand_type
from mypy.maptype import map_instance_to_supertype
from mypy.nodes import CONTRAVARIANT, COVARIANT, INVARIANT, VARIANCE_NOT_READY
from mypy.state import state
from mypy.subtypes import (
SubtypeContext,
find_member,
is_equivalent,
is_proper_subtype,
is_protocol_implementation,
is_subtype,
)
from mypy.types import (
AnyType,
CallableType,
DeletedType,
ErasedType,
FunctionLike,
Instance,
LiteralType,
NoneType,
Overloaded,
Parameters,
ParamSpecType,
PartialType,
ProperType,
TupleType,
Type,
TypeAliasType,
TypedDictType,
TypeOfAny,
TypeType,
TypeVarId,
TypeVarLikeType,
TypeVarTupleType,
TypeVarType,
TypeVisitor,
UnboundType,
UninhabitedType,
UnionType,
UnpackType,
find_unpack_in_list,
get_proper_type,
get_proper_types,
split_with_prefix_and_suffix,
)
class InstanceJoiner:
def __init__(self) -> None:
self.seen_instances: list[tuple[Instance, Instance]] = []
def join_instances(self, t: Instance, s: Instance) -> ProperType:
if (t, s) in self.seen_instances or (s, t) in self.seen_instances:
return object_from_instance(t)
self.seen_instances.append((t, s))
# Calculate the join of two instance types
if t.type == s.type:
# Simplest case: join two types with the same base type (but
# potentially different arguments).
# Combine type arguments.
args: list[Type] = []
# N.B: We use zip instead of indexing because the lengths might have
# mismatches during daemon reprocessing.
if t.type.has_type_var_tuple_type:
# We handle joins of variadic instances by simply creating correct mapping
# for type arguments and compute the individual joins same as for regular
# instances. All the heavy lifting is done in the join of tuple types.
assert s.type.type_var_tuple_prefix is not None
assert s.type.type_var_tuple_suffix is not None
prefix = s.type.type_var_tuple_prefix
suffix = s.type.type_var_tuple_suffix
tvt = s.type.defn.type_vars[prefix]
assert isinstance(tvt, TypeVarTupleType)
fallback = tvt.tuple_fallback
s_prefix, s_middle, s_suffix = split_with_prefix_and_suffix(s.args, prefix, suffix)
t_prefix, t_middle, t_suffix = split_with_prefix_and_suffix(t.args, prefix, suffix)
s_args = s_prefix + (TupleType(list(s_middle), fallback),) + s_suffix
t_args = t_prefix + (TupleType(list(t_middle), fallback),) + t_suffix
else:
t_args = t.args
s_args = s.args
for ta, sa, type_var in zip(t_args, s_args, t.type.defn.type_vars):
ta_proper = get_proper_type(ta)
sa_proper = get_proper_type(sa)
new_type: Type | None = None
if isinstance(ta_proper, AnyType):
new_type = AnyType(TypeOfAny.from_another_any, ta_proper)
elif isinstance(sa_proper, AnyType):
new_type = AnyType(TypeOfAny.from_another_any, sa_proper)
elif isinstance(type_var, TypeVarType):
if type_var.variance in (COVARIANT, VARIANCE_NOT_READY):
new_type = join_types(ta, sa, self)
if len(type_var.values) != 0 and new_type not in type_var.values:
self.seen_instances.pop()
return object_from_instance(t)
if not is_subtype(new_type, type_var.upper_bound):
self.seen_instances.pop()
return object_from_instance(t)
# TODO: contravariant case should use meet but pass seen instances as
# an argument to keep track of recursive checks.
elif type_var.variance in (INVARIANT, CONTRAVARIANT):
if isinstance(ta_proper, UninhabitedType) and ta_proper.ambiguous:
new_type = sa
elif isinstance(sa_proper, UninhabitedType) and sa_proper.ambiguous:
new_type = ta
elif not is_equivalent(ta, sa):
self.seen_instances.pop()
return object_from_instance(t)
else:
# If the types are different but equivalent, then an Any is involved
# so using a join in the contravariant case is also OK.
new_type = join_types(ta, sa, self)
elif isinstance(type_var, TypeVarTupleType):
new_type = get_proper_type(join_types(ta, sa, self))
# Put the joined arguments back into instance in the normal form:
# a) Tuple[X, Y, Z] -> [X, Y, Z]
# b) tuple[X, ...] -> [*tuple[X, ...]]
if isinstance(new_type, Instance):
assert new_type.type.fullname == "builtins.tuple"
new_type = UnpackType(new_type)
else:
assert isinstance(new_type, TupleType)
args.extend(new_type.items)
continue
else:
# ParamSpec type variables behave the same, independent of variance
if not is_equivalent(ta, sa):
return get_proper_type(type_var.upper_bound)
new_type = join_types(ta, sa, self)
assert new_type is not None
args.append(new_type)
result: ProperType = Instance(t.type, args)
elif t.type.bases and is_proper_subtype(
t, s, subtype_context=SubtypeContext(ignore_type_params=True)
):
result = self.join_instances_via_supertype(t, s)
else:
# Now t is not a subtype of s, and t != s. Now s could be a subtype
# of t; alternatively, we need to find a common supertype. This works
# in of the both cases.
result = self.join_instances_via_supertype(s, t)
self.seen_instances.pop()
return result
def join_instances_via_supertype(self, t: Instance, s: Instance) -> ProperType:
# Give preference to joins via duck typing relationship, so that
# join(int, float) == float, for example.
for p in t.type._promote:
if is_subtype(p, s):
return join_types(p, s, self)
for p in s.type._promote:
if is_subtype(p, t):
return join_types(t, p, self)
# Compute the "best" supertype of t when joined with s.
# The definition of "best" may evolve; for now it is the one with
# the longest MRO. Ties are broken by using the earlier base.
best: ProperType | None = None
for base in t.type.bases:
mapped = map_instance_to_supertype(t, base.type)
res = self.join_instances(mapped, s)
if best is None or is_better(res, best):
best = res
assert best is not None
for promote in t.type._promote:
if isinstance(promote, Instance):
res = self.join_instances(promote, s)
if is_better(res, best):
best = res
return best
def join_simple(declaration: Type | None, s: Type, t: Type) -> ProperType:
"""Return a simple least upper bound given the declared type.
This function should be only used by binder, and should not recurse.
For all other uses, use `join_types()`.
"""
declaration = get_proper_type(declaration)
s = get_proper_type(s)
t = get_proper_type(t)
if (s.can_be_true, s.can_be_false) != (t.can_be_true, t.can_be_false):
# if types are restricted in different ways, use the more general versions
s = mypy.typeops.true_or_false(s)
t = mypy.typeops.true_or_false(t)
if isinstance(s, AnyType):
return s
if isinstance(s, ErasedType):
return t
if is_proper_subtype(s, t, ignore_promotions=True):
return t
if is_proper_subtype(t, s, ignore_promotions=True):
return s
if isinstance(declaration, UnionType):
return mypy.typeops.make_simplified_union([s, t])
if isinstance(s, NoneType) and not isinstance(t, NoneType):
s, t = t, s
if isinstance(s, UninhabitedType) and not isinstance(t, UninhabitedType):
s, t = t, s
# Meets/joins require callable type normalization.
s, t = normalize_callables(s, t)
if isinstance(s, UnionType) and not isinstance(t, UnionType):
s, t = t, s
value = t.accept(TypeJoinVisitor(s))
if declaration is None or is_subtype(value, declaration):
return value
return declaration
def trivial_join(s: Type, t: Type) -> Type:
"""Return one of types (expanded) if it is a supertype of other, otherwise top type."""
if is_subtype(s, t):
return t
elif is_subtype(t, s):
return s
else:
return object_or_any_from_type(get_proper_type(t))
@overload
def join_types(
s: ProperType, t: ProperType, instance_joiner: InstanceJoiner | None = None
) -> ProperType: ...
@overload
def join_types(s: Type, t: Type, instance_joiner: InstanceJoiner | None = None) -> Type: ...
def join_types(s: Type, t: Type, instance_joiner: InstanceJoiner | None = None) -> Type:
"""Return the least upper bound of s and t.
For example, the join of 'int' and 'object' is 'object'.
"""
if mypy.typeops.is_recursive_pair(s, t):
# This case can trigger an infinite recursion, general support for this will be
# tricky so we use a trivial join (like for protocols).
return trivial_join(s, t)
s = get_proper_type(s)
t = get_proper_type(t)
if (s.can_be_true, s.can_be_false) != (t.can_be_true, t.can_be_false):
# if types are restricted in different ways, use the more general versions
s = mypy.typeops.true_or_false(s)
t = mypy.typeops.true_or_false(t)
if isinstance(s, UnionType) and not isinstance(t, UnionType):
s, t = t, s
if isinstance(s, AnyType):
return s
if isinstance(s, ErasedType):
return t
if isinstance(s, NoneType) and not isinstance(t, NoneType):
s, t = t, s
if isinstance(s, UninhabitedType) and not isinstance(t, UninhabitedType):
s, t = t, s
# Meets/joins require callable type normalization.
s, t = normalize_callables(s, t)
# Use a visitor to handle non-trivial cases.
return t.accept(TypeJoinVisitor(s, instance_joiner))
class TypeJoinVisitor(TypeVisitor[ProperType]):
"""Implementation of the least upper bound algorithm.
Attributes:
s: The other (left) type operand.
"""
def __init__(self, s: ProperType, instance_joiner: InstanceJoiner | None = None) -> None:
self.s = s
self.instance_joiner = instance_joiner
def visit_unbound_type(self, t: UnboundType) -> ProperType:
return AnyType(TypeOfAny.special_form)
def visit_union_type(self, t: UnionType) -> ProperType:
if is_proper_subtype(self.s, t):
return t
else:
return mypy.typeops.make_simplified_union([self.s, t])
def visit_any(self, t: AnyType) -> ProperType:
return t
def visit_none_type(self, t: NoneType) -> ProperType:
if state.strict_optional:
if isinstance(self.s, (NoneType, UninhabitedType)):
return t
elif isinstance(self.s, UnboundType):
return AnyType(TypeOfAny.special_form)
else:
return mypy.typeops.make_simplified_union([self.s, t])
else:
return self.s
def visit_uninhabited_type(self, t: UninhabitedType) -> ProperType:
return self.s
def visit_deleted_type(self, t: DeletedType) -> ProperType:
return self.s
def visit_erased_type(self, t: ErasedType) -> ProperType:
return self.s
def visit_type_var(self, t: TypeVarType) -> ProperType:
if isinstance(self.s, TypeVarType) and self.s.id == t.id:
return self.s
else:
return self.default(self.s)
def visit_param_spec(self, t: ParamSpecType) -> ProperType:
if self.s == t:
return t
return self.default(self.s)
def visit_type_var_tuple(self, t: TypeVarTupleType) -> ProperType:
if self.s == t:
return t
return self.default(self.s)
def visit_unpack_type(self, t: UnpackType) -> UnpackType:
raise NotImplementedError
def visit_parameters(self, t: Parameters) -> ProperType:
if isinstance(self.s, Parameters):
if len(t.arg_types) != len(self.s.arg_types):
return self.default(self.s)
from mypy.meet import meet_types
return t.copy_modified(
arg_types=[
meet_types(s_a, t_a) for s_a, t_a in zip(self.s.arg_types, t.arg_types)
],
arg_names=combine_arg_names(self.s, t),
)
else:
return self.default(self.s)
def visit_instance(self, t: Instance) -> ProperType:
if isinstance(self.s, Instance):
if self.instance_joiner is None:
self.instance_joiner = InstanceJoiner()
nominal = self.instance_joiner.join_instances(t, self.s)
structural: Instance | None = None
if t.type.is_protocol and is_protocol_implementation(self.s, t):
structural = t
elif self.s.type.is_protocol and is_protocol_implementation(t, self.s):
structural = self.s
# Structural join is preferred in the case where we have found both
# structural and nominal and they have same MRO length (see two comments
# in join_instances_via_supertype). Otherwise, just return the nominal join.
if not structural or is_better(nominal, structural):
return nominal
return structural
elif isinstance(self.s, FunctionLike):
if t.type.is_protocol:
call = unpack_callback_protocol(t)
if call:
return join_types(call, self.s)
return join_types(t, self.s.fallback)
elif isinstance(self.s, TypeType):
return join_types(t, self.s)
elif isinstance(self.s, TypedDictType):
return join_types(t, self.s)
elif isinstance(self.s, TupleType):
return join_types(t, self.s)
elif isinstance(self.s, LiteralType):
return join_types(t, self.s)
else:
return self.default(self.s)
def visit_callable_type(self, t: CallableType) -> ProperType:
if isinstance(self.s, CallableType) and is_similar_callables(t, self.s):
if is_equivalent(t, self.s):
return combine_similar_callables(t, self.s)
result = join_similar_callables(t, self.s)
# We set the from_type_type flag to suppress error when a collection of
# concrete class objects gets inferred as their common abstract superclass.
if not (
(t.is_type_obj() and t.type_object().is_abstract)
or (self.s.is_type_obj() and self.s.type_object().is_abstract)
):
result.from_type_type = True
if any(
isinstance(tp, (NoneType, UninhabitedType))
for tp in get_proper_types(result.arg_types)
):
# We don't want to return unusable Callable, attempt fallback instead.
return join_types(t.fallback, self.s)
return result
elif isinstance(self.s, Overloaded):
# Switch the order of arguments to that we'll get to visit_overloaded.
return join_types(t, self.s)
elif isinstance(self.s, Instance) and self.s.type.is_protocol:
call = unpack_callback_protocol(self.s)
if call:
return join_types(t, call)
return join_types(t.fallback, self.s)
def visit_overloaded(self, t: Overloaded) -> ProperType:
# This is more complex than most other cases. Here are some
# examples that illustrate how this works.
#
# First let's define a concise notation:
# - Cn are callable types (for n in 1, 2, ...)
# - Ov(C1, C2, ...) is an overloaded type with items C1, C2, ...
# - Callable[[T, ...], S] is written as [T, ...] -> S.
#
# We want some basic properties to hold (assume Cn are all
# unrelated via Any-similarity):
#
# join(Ov(C1, C2), C1) == C1
# join(Ov(C1, C2), Ov(C1, C2)) == Ov(C1, C2)
# join(Ov(C1, C2), Ov(C1, C3)) == C1
# join(Ov(C2, C2), C3) == join of fallback types
#
# The presence of Any types makes things more interesting. The join is the
# most general type we can get with respect to Any:
#
# join(Ov([int] -> int, [str] -> str), [Any] -> str) == Any -> str
#
# We could use a simplification step that removes redundancies, but that's not
# implemented right now. Consider this example, where we get a redundancy:
#
# join(Ov([int, Any] -> Any, [str, Any] -> Any), [Any, int] -> Any) ==
# Ov([Any, int] -> Any, [Any, int] -> Any)
#
# TODO: Consider more cases of callable subtyping.
result: list[CallableType] = []
s = self.s
if isinstance(s, FunctionLike):
# The interesting case where both types are function types.
for t_item in t.items:
for s_item in s.items:
if is_similar_callables(t_item, s_item):
if is_equivalent(t_item, s_item):
result.append(combine_similar_callables(t_item, s_item))
elif is_subtype(t_item, s_item):
result.append(s_item)
if result:
# TODO: Simplify redundancies from the result.
if len(result) == 1:
return result[0]
else:
return Overloaded(result)
return join_types(t.fallback, s.fallback)
elif isinstance(s, Instance) and s.type.is_protocol:
call = unpack_callback_protocol(s)
if call:
return join_types(t, call)
return join_types(t.fallback, s)
def join_tuples(self, s: TupleType, t: TupleType) -> list[Type] | None:
"""Join two tuple types while handling variadic entries.
This is surprisingly tricky, and we don't handle some tricky corner cases.
Most of the trickiness comes from the variadic tuple items like *tuple[X, ...]
since they can have arbitrary partial overlaps (while *Ts can't be split).
"""
s_unpack_index = find_unpack_in_list(s.items)
t_unpack_index = find_unpack_in_list(t.items)
if s_unpack_index is None and t_unpack_index is None:
if s.length() == t.length():
items: list[Type] = []
for i in range(t.length()):
items.append(join_types(t.items[i], s.items[i]))
return items
return None
if s_unpack_index is not None and t_unpack_index is not None:
# The most complex case: both tuples have an upack item.
s_unpack = s.items[s_unpack_index]
assert isinstance(s_unpack, UnpackType)
s_unpacked = get_proper_type(s_unpack.type)
t_unpack = t.items[t_unpack_index]
assert isinstance(t_unpack, UnpackType)
t_unpacked = get_proper_type(t_unpack.type)
if s.length() == t.length() and s_unpack_index == t_unpack_index:
# We can handle a case where arity is perfectly aligned, e.g.
# join(Tuple[X1, *tuple[Y1, ...], Z1], Tuple[X2, *tuple[Y2, ...], Z2]).
# We can essentially perform the join elementwise.
prefix_len = t_unpack_index
suffix_len = t.length() - t_unpack_index - 1
items = []
for si, ti in zip(s.items[:prefix_len], t.items[:prefix_len]):
items.append(join_types(si, ti))
joined = join_types(s_unpacked, t_unpacked)
if isinstance(joined, TypeVarTupleType):
items.append(UnpackType(joined))
elif isinstance(joined, Instance) and joined.type.fullname == "builtins.tuple":
items.append(UnpackType(joined))
else:
if isinstance(t_unpacked, Instance):
assert t_unpacked.type.fullname == "builtins.tuple"
tuple_instance = t_unpacked
else:
assert isinstance(t_unpacked, TypeVarTupleType)
tuple_instance = t_unpacked.tuple_fallback
items.append(
UnpackType(
tuple_instance.copy_modified(
args=[object_from_instance(tuple_instance)]
)
)
)
if suffix_len:
for si, ti in zip(s.items[-suffix_len:], t.items[-suffix_len:]):
items.append(join_types(si, ti))
return items
if s.length() == 1 or t.length() == 1:
# Another case we can handle is when one of tuple is purely variadic
# (i.e. a non-normalized form of tuple[X, ...]), in this case the join
# will be again purely variadic.
if not (isinstance(s_unpacked, Instance) and isinstance(t_unpacked, Instance)):
return None
assert s_unpacked.type.fullname == "builtins.tuple"
assert t_unpacked.type.fullname == "builtins.tuple"
mid_joined = join_types(s_unpacked.args[0], t_unpacked.args[0])
t_other = [a for i, a in enumerate(t.items) if i != t_unpack_index]
s_other = [a for i, a in enumerate(s.items) if i != s_unpack_index]
other_joined = join_type_list(s_other + t_other)
mid_joined = join_types(mid_joined, other_joined)
return [UnpackType(s_unpacked.copy_modified(args=[mid_joined]))]
# TODO: are there other case we can handle (e.g. both prefix/suffix are shorter)?
return None
if s_unpack_index is not None:
variadic = s
unpack_index = s_unpack_index
fixed = t
else:
assert t_unpack_index is not None
variadic = t
unpack_index = t_unpack_index
fixed = s
# Case where one tuple has variadic item and the other one doesn't. The join will
# be variadic, since fixed tuple is a subtype of variadic, but not vice versa.
unpack = variadic.items[unpack_index]
assert isinstance(unpack, UnpackType)
unpacked = get_proper_type(unpack.type)
if not isinstance(unpacked, Instance):
return None
if fixed.length() < variadic.length() - 1:
# There are no non-trivial types that are supertype of both.
return None
prefix_len = unpack_index
suffix_len = variadic.length() - prefix_len - 1
prefix, middle, suffix = split_with_prefix_and_suffix(
tuple(fixed.items), prefix_len, suffix_len
)
items = []
for fi, vi in zip(prefix, variadic.items[:prefix_len]):
items.append(join_types(fi, vi))
mid_joined = join_type_list(list(middle))
mid_joined = join_types(mid_joined, unpacked.args[0])
items.append(UnpackType(unpacked.copy_modified(args=[mid_joined])))
if suffix_len:
for fi, vi in zip(suffix, variadic.items[-suffix_len:]):
items.append(join_types(fi, vi))
return items
def visit_tuple_type(self, t: TupleType) -> ProperType:
# When given two fixed-length tuples:
# * If they have the same length, join their subtypes item-wise:
# Tuple[int, bool] + Tuple[bool, bool] becomes Tuple[int, bool]
# * If lengths do not match, return a variadic tuple:
# Tuple[bool, int] + Tuple[bool] becomes Tuple[int, ...]
#
# Otherwise, `t` is a fixed-length tuple but `self.s` is NOT:
# * Joining with a variadic tuple returns variadic tuple:
# Tuple[int, bool] + Tuple[bool, ...] becomes Tuple[int, ...]
# * Joining with any Sequence also returns a Sequence:
# Tuple[int, bool] + List[bool] becomes Sequence[int]
if isinstance(self.s, TupleType):
if self.instance_joiner is None:
self.instance_joiner = InstanceJoiner()
fallback = self.instance_joiner.join_instances(
mypy.typeops.tuple_fallback(self.s), mypy.typeops.tuple_fallback(t)
)
assert isinstance(fallback, Instance)
items = self.join_tuples(self.s, t)
if items is not None:
return TupleType(items, fallback)
else:
# TODO: should this be a default fallback behaviour like for meet?
if is_proper_subtype(self.s, t):
return t
if is_proper_subtype(t, self.s):
return self.s
return fallback
else:
return join_types(self.s, mypy.typeops.tuple_fallback(t))
def visit_typeddict_type(self, t: TypedDictType) -> ProperType:
if isinstance(self.s, TypedDictType):
items = {
item_name: s_item_type
for (item_name, s_item_type, t_item_type) in self.s.zip(t)
if (
is_equivalent(s_item_type, t_item_type)
and (item_name in t.required_keys) == (item_name in self.s.required_keys)
)
}
fallback = self.s.create_anonymous_fallback()
all_keys = set(items.keys())
# We need to filter by items.keys() since some required keys present in both t and
# self.s might be missing from the join if the types are incompatible.
required_keys = all_keys & t.required_keys & self.s.required_keys
# If one type has a key as readonly, we mark it as readonly for both:
readonly_keys = (t.readonly_keys | t.readonly_keys) & all_keys
return TypedDictType(items, required_keys, readonly_keys, fallback)
elif isinstance(self.s, Instance):
return join_types(self.s, t.fallback)
else:
return self.default(self.s)
def visit_literal_type(self, t: LiteralType) -> ProperType:
if isinstance(self.s, LiteralType):
if t == self.s:
return t
if self.s.fallback.type.is_enum and t.fallback.type.is_enum:
return mypy.typeops.make_simplified_union([self.s, t])
return join_types(self.s.fallback, t.fallback)
else:
return join_types(self.s, t.fallback)
def visit_partial_type(self, t: PartialType) -> ProperType:
# We only have partial information so we can't decide the join result. We should
# never get here.
assert False, "Internal error"
def visit_type_type(self, t: TypeType) -> ProperType:
if isinstance(self.s, TypeType):
return TypeType.make_normalized(join_types(t.item, self.s.item), line=t.line)
elif isinstance(self.s, Instance) and self.s.type.fullname == "builtins.type":
return self.s
else:
return self.default(self.s)
def visit_type_alias_type(self, t: TypeAliasType) -> ProperType:
assert False, f"This should be never called, got {t}"
def default(self, typ: Type) -> ProperType:
typ = get_proper_type(typ)
if isinstance(typ, Instance):
return object_from_instance(typ)
elif isinstance(typ, UnboundType):
return AnyType(TypeOfAny.special_form)
elif isinstance(typ, TupleType):
return self.default(mypy.typeops.tuple_fallback(typ))
elif isinstance(typ, TypedDictType):
return self.default(typ.fallback)
elif isinstance(typ, FunctionLike):
return self.default(typ.fallback)
elif isinstance(typ, TypeVarType):
return self.default(typ.upper_bound)
elif isinstance(typ, ParamSpecType):
return self.default(typ.upper_bound)
else:
return AnyType(TypeOfAny.special_form)
def is_better(t: Type, s: Type) -> bool:
# Given two possible results from join_instances_via_supertype(),
# indicate whether t is the better one.
t = get_proper_type(t)
s = get_proper_type(s)
if isinstance(t, Instance):
if not isinstance(s, Instance):
return True
# Use len(mro) as a proxy for the better choice.
if len(t.type.mro) > len(s.type.mro):
return True
return False
def normalize_callables(s: ProperType, t: ProperType) -> tuple[ProperType, ProperType]:
if isinstance(s, (CallableType, Overloaded)):
s = s.with_unpacked_kwargs()
if isinstance(t, (CallableType, Overloaded)):
t = t.with_unpacked_kwargs()
return s, t
def is_similar_callables(t: CallableType, s: CallableType) -> bool:
"""Return True if t and s have identical numbers of
arguments, default arguments and varargs.
"""
return (
len(t.arg_types) == len(s.arg_types)
and t.min_args == s.min_args
and t.is_var_arg == s.is_var_arg
)
def update_callable_ids(c: CallableType, ids: list[TypeVarId]) -> CallableType:
tv_map = {}
tvs = []
for tv, new_id in zip(c.variables, ids):
new_tv = tv.copy_modified(id=new_id)
tvs.append(new_tv)
tv_map[tv.id] = new_tv
return expand_type(c, tv_map).copy_modified(variables=tvs)
def match_generic_callables(t: CallableType, s: CallableType) -> tuple[CallableType, CallableType]:
# The case where we combine/join/meet similar callables, situation where both are generic
# requires special care. A more principled solution may involve unify_generic_callable(),
# but it would have two problems:
# * This adds risk of infinite recursion: e.g. join -> unification -> solver -> join
# * Using unification is an incorrect thing for meets, as it "widens" the types
# Finally, this effectively falls back to an old behaviour before namespaces were added to
# type variables, and it worked relatively well.
max_len = max(len(t.variables), len(s.variables))
min_len = min(len(t.variables), len(s.variables))
if min_len == 0:
return t, s
new_ids = [TypeVarId.new(meta_level=0) for _ in range(max_len)]
# Note: this relies on variables being in order they appear in function definition.
return update_callable_ids(t, new_ids), update_callable_ids(s, new_ids)
def join_similar_callables(t: CallableType, s: CallableType) -> CallableType:
t, s = match_generic_callables(t, s)
arg_types: list[Type] = []
for i in range(len(t.arg_types)):
arg_types.append(safe_meet(t.arg_types[i], s.arg_types[i]))
# TODO in combine_similar_callables also applies here (names and kinds; user metaclasses)
# The fallback type can be either 'function', 'type', or some user-provided metaclass.
# The result should always use 'function' as a fallback if either operands are using it.
if t.fallback.type.fullname == "builtins.function":
fallback = t.fallback
else:
fallback = s.fallback
return t.copy_modified(
arg_types=arg_types,
arg_names=combine_arg_names(t, s),
ret_type=join_types(t.ret_type, s.ret_type),
fallback=fallback,
name=None,
)
def safe_join(t: Type, s: Type) -> Type:
# This is a temporary solution to prevent crashes in combine_similar_callables() etc.,
# until relevant TODOs on handling arg_kinds will be addressed there.
if not isinstance(t, UnpackType) and not isinstance(s, UnpackType):
return join_types(t, s)
if isinstance(t, UnpackType) and isinstance(s, UnpackType):
return UnpackType(join_types(t.type, s.type))
return object_or_any_from_type(get_proper_type(t))
def safe_meet(t: Type, s: Type) -> Type:
# Similar to above but for meet_types().
from mypy.meet import meet_types
if not isinstance(t, UnpackType) and not isinstance(s, UnpackType):
return meet_types(t, s)
if isinstance(t, UnpackType) and isinstance(s, UnpackType):
unpacked = get_proper_type(t.type)
if isinstance(unpacked, TypeVarTupleType):
fallback_type = unpacked.tuple_fallback.type
elif isinstance(unpacked, TupleType):
fallback_type = unpacked.partial_fallback.type
else:
assert isinstance(unpacked, Instance) and unpacked.type.fullname == "builtins.tuple"
fallback_type = unpacked.type
res = meet_types(t.type, s.type)
if isinstance(res, UninhabitedType):
res = Instance(fallback_type, [res])
return UnpackType(res)
return UninhabitedType()
def combine_similar_callables(t: CallableType, s: CallableType) -> CallableType:
t, s = match_generic_callables(t, s)
arg_types: list[Type] = []
for i in range(len(t.arg_types)):
arg_types.append(safe_join(t.arg_types[i], s.arg_types[i]))
# TODO kinds and argument names
# TODO what should happen if one fallback is 'type' and the other is a user-provided metaclass?
# The fallback type can be either 'function', 'type', or some user-provided metaclass.
# The result should always use 'function' as a fallback if either operands are using it.
if t.fallback.type.fullname == "builtins.function":
fallback = t.fallback
else:
fallback = s.fallback
return t.copy_modified(
arg_types=arg_types,
arg_names=combine_arg_names(t, s),
ret_type=join_types(t.ret_type, s.ret_type),
fallback=fallback,
name=None,
)
def combine_arg_names(
t: CallableType | Parameters, s: CallableType | Parameters
) -> list[str | None]:
"""Produces a list of argument names compatible with both callables.
For example, suppose 't' and 's' have the following signatures:
- t: (a: int, b: str, X: str) -> None
- s: (a: int, b: str, Y: str) -> None
This function would return ["a", "b", None]. This information
is then used above to compute the join of t and s, which results
in a signature of (a: int, b: str, str) -> None.
Note that the third argument's name is omitted and 't' and 's'
are both valid subtypes of this inferred signature.
Precondition: is_similar_types(t, s) is true.
"""
num_args = len(t.arg_types)
new_names = []
for i in range(num_args):
t_name = t.arg_names[i]
s_name = s.arg_names[i]
if t_name == s_name or t.arg_kinds[i].is_named() or s.arg_kinds[i].is_named():
new_names.append(t_name)
else:
new_names.append(None)
return new_names
def object_from_instance(instance: Instance) -> Instance:
"""Construct the type 'builtins.object' from an instance type."""
# Use the fact that 'object' is always the last class in the mro.
res = Instance(instance.type.mro[-1], [])
return res
def object_or_any_from_type(typ: ProperType) -> ProperType:
# Similar to object_from_instance() but tries hard for all types.
# TODO: find a better way to get object, or make this more reliable.
if isinstance(typ, Instance):
return object_from_instance(typ)
elif isinstance(typ, (CallableType, TypedDictType, LiteralType)):
return object_from_instance(typ.fallback)
elif isinstance(typ, TupleType):
return object_from_instance(typ.partial_fallback)
elif isinstance(typ, TypeType):
return object_or_any_from_type(typ.item)
elif isinstance(typ, TypeVarLikeType) and isinstance(typ.upper_bound, ProperType):
return object_or_any_from_type(typ.upper_bound)
elif isinstance(typ, UnionType):
for item in typ.items:
if isinstance(item, ProperType):
candidate = object_or_any_from_type(item)
if isinstance(candidate, Instance):
return candidate
elif isinstance(typ, UnpackType):
object_or_any_from_type(get_proper_type(typ.type))
return AnyType(TypeOfAny.implementation_artifact)
def join_type_list(types: Sequence[Type]) -> Type:
if not types:
# This is a little arbitrary but reasonable. Any empty tuple should be compatible
# with all variable length tuples, and this makes it possible.
return UninhabitedType()
joined = types[0]
for t in types[1:]:
joined = join_types(joined, t)
return joined
def unpack_callback_protocol(t: Instance) -> ProperType | None:
assert t.type.is_protocol
if t.type.protocol_members == ["__call__"]:
return get_proper_type(find_member("__call__", t, t, is_operator=True))
return None
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/join.py
|
Python
|
NOASSERTION
| 38,174 |
from __future__ import annotations
from typing import Any, Final, Iterable, Optional, Tuple
from typing_extensions import TypeAlias as _TypeAlias
from mypy.nodes import (
LITERAL_NO,
LITERAL_TYPE,
LITERAL_YES,
AssertTypeExpr,
AssignmentExpr,
AwaitExpr,
BytesExpr,
CallExpr,
CastExpr,
ComparisonExpr,
ComplexExpr,
ConditionalExpr,
DictExpr,
DictionaryComprehension,
EllipsisExpr,
EnumCallExpr,
Expression,
FloatExpr,
GeneratorExpr,
IndexExpr,
IntExpr,
LambdaExpr,
ListComprehension,
ListExpr,
MemberExpr,
NamedTupleExpr,
NameExpr,
NewTypeExpr,
OpExpr,
ParamSpecExpr,
PromoteExpr,
RevealExpr,
SetComprehension,
SetExpr,
SliceExpr,
StarExpr,
StrExpr,
SuperExpr,
TempNode,
TupleExpr,
TypeAliasExpr,
TypeApplication,
TypedDictExpr,
TypeVarExpr,
TypeVarTupleExpr,
UnaryExpr,
Var,
YieldExpr,
YieldFromExpr,
)
from mypy.visitor import ExpressionVisitor
# [Note Literals and literal_hash]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Mypy uses the term "literal" to refer to any expression built out of
# the following:
#
# * Plain literal expressions, like `1` (integer, float, string, etc.)
#
# * Compound literal expressions, like `(lit1, lit2)` (list, dict,
# set, or tuple)
#
# * Operator expressions, like `lit1 + lit2`
#
# * Variable references, like `x`
#
# * Member references, like `lit.m`
#
# * Index expressions, like `lit[0]`
#
# A typical "literal" looks like `x[(i,j+1)].m`.
#
# An expression that is a literal has a `literal_hash`, with the
# following properties.
#
# * `literal_hash` is a Key: a tuple containing basic data types and
# possibly other Keys. So it can be used as a key in a dictionary
# that will be compared by value (as opposed to the Node itself,
# which is compared by identity).
#
# * Two expressions have equal `literal_hash`es if and only if they
# are syntactically equal expressions. (NB: Actually, we also
# identify as equal expressions like `3` and `3.0`; is this a good
# idea?)
#
# * The elements of `literal_hash` that are tuples are exactly the
# subexpressions of the original expression (e.g. the base and index
# of an index expression, or the operands of an operator expression).
def literal(e: Expression) -> int:
if isinstance(e, ComparisonExpr):
return min(literal(o) for o in e.operands)
elif isinstance(e, OpExpr):
return min(literal(e.left), literal(e.right))
elif isinstance(e, (MemberExpr, UnaryExpr, StarExpr)):
return literal(e.expr)
elif isinstance(e, AssignmentExpr):
return literal(e.target)
elif isinstance(e, IndexExpr):
if literal(e.index) == LITERAL_YES:
return literal(e.base)
else:
return LITERAL_NO
elif isinstance(e, NameExpr):
if isinstance(e.node, Var) and e.node.is_final and e.node.final_value is not None:
return LITERAL_YES
return LITERAL_TYPE
if isinstance(e, (IntExpr, FloatExpr, ComplexExpr, StrExpr, BytesExpr)):
return LITERAL_YES
if literal_hash(e):
return LITERAL_YES
return LITERAL_NO
Key: _TypeAlias = Tuple[Any, ...]
def subkeys(key: Key) -> Iterable[Key]:
return [elt for elt in key if isinstance(elt, tuple)]
def literal_hash(e: Expression) -> Key | None:
return e.accept(_hasher)
def extract_var_from_literal_hash(key: Key) -> Var | None:
"""If key refers to a Var node, return it.
Return None otherwise.
"""
if len(key) == 2 and key[0] == "Var" and isinstance(key[1], Var):
return key[1]
return None
class _Hasher(ExpressionVisitor[Optional[Key]]):
def visit_int_expr(self, e: IntExpr) -> Key:
return ("Literal", e.value)
def visit_str_expr(self, e: StrExpr) -> Key:
return ("Literal", e.value)
def visit_bytes_expr(self, e: BytesExpr) -> Key:
return ("Literal", e.value)
def visit_float_expr(self, e: FloatExpr) -> Key:
return ("Literal", e.value)
def visit_complex_expr(self, e: ComplexExpr) -> Key:
return ("Literal", e.value)
def visit_star_expr(self, e: StarExpr) -> Key:
return ("Star", literal_hash(e.expr))
def visit_name_expr(self, e: NameExpr) -> Key:
if isinstance(e.node, Var) and e.node.is_final and e.node.final_value is not None:
return ("Literal", e.node.final_value)
# N.B: We use the node itself as the key, and not the name,
# because using the name causes issues when there is shadowing
# (for example, in list comprehensions).
return ("Var", e.node)
def visit_member_expr(self, e: MemberExpr) -> Key:
return ("Member", literal_hash(e.expr), e.name)
def visit_op_expr(self, e: OpExpr) -> Key:
return ("Binary", e.op, literal_hash(e.left), literal_hash(e.right))
def visit_comparison_expr(self, e: ComparisonExpr) -> Key:
rest: tuple[str | Key | None, ...] = tuple(e.operators)
rest += tuple(literal_hash(o) for o in e.operands)
return ("Comparison",) + rest
def visit_unary_expr(self, e: UnaryExpr) -> Key:
return ("Unary", e.op, literal_hash(e.expr))
def seq_expr(self, e: ListExpr | TupleExpr | SetExpr, name: str) -> Key | None:
if all(literal(x) == LITERAL_YES for x in e.items):
rest: tuple[Key | None, ...] = tuple(literal_hash(x) for x in e.items)
return (name,) + rest
return None
def visit_list_expr(self, e: ListExpr) -> Key | None:
return self.seq_expr(e, "List")
def visit_dict_expr(self, e: DictExpr) -> Key | None:
if all(a and literal(a) == literal(b) == LITERAL_YES for a, b in e.items):
rest: tuple[Key | None, ...] = tuple(
(literal_hash(a) if a else None, literal_hash(b)) for a, b in e.items
)
return ("Dict",) + rest
return None
def visit_tuple_expr(self, e: TupleExpr) -> Key | None:
return self.seq_expr(e, "Tuple")
def visit_set_expr(self, e: SetExpr) -> Key | None:
return self.seq_expr(e, "Set")
def visit_index_expr(self, e: IndexExpr) -> Key | None:
if literal(e.index) == LITERAL_YES:
return ("Index", literal_hash(e.base), literal_hash(e.index))
return None
def visit_assignment_expr(self, e: AssignmentExpr) -> Key | None:
return literal_hash(e.target)
def visit_call_expr(self, e: CallExpr) -> None:
return None
def visit_slice_expr(self, e: SliceExpr) -> None:
return None
def visit_cast_expr(self, e: CastExpr) -> None:
return None
def visit_assert_type_expr(self, e: AssertTypeExpr) -> None:
return None
def visit_conditional_expr(self, e: ConditionalExpr) -> None:
return None
def visit_ellipsis(self, e: EllipsisExpr) -> None:
return None
def visit_yield_from_expr(self, e: YieldFromExpr) -> None:
return None
def visit_yield_expr(self, e: YieldExpr) -> None:
return None
def visit_reveal_expr(self, e: RevealExpr) -> None:
return None
def visit_super_expr(self, e: SuperExpr) -> None:
return None
def visit_type_application(self, e: TypeApplication) -> None:
return None
def visit_lambda_expr(self, e: LambdaExpr) -> None:
return None
def visit_list_comprehension(self, e: ListComprehension) -> None:
return None
def visit_set_comprehension(self, e: SetComprehension) -> None:
return None
def visit_dictionary_comprehension(self, e: DictionaryComprehension) -> None:
return None
def visit_generator_expr(self, e: GeneratorExpr) -> None:
return None
def visit_type_var_expr(self, e: TypeVarExpr) -> None:
return None
def visit_paramspec_expr(self, e: ParamSpecExpr) -> None:
return None
def visit_type_var_tuple_expr(self, e: TypeVarTupleExpr) -> None:
return None
def visit_type_alias_expr(self, e: TypeAliasExpr) -> None:
return None
def visit_namedtuple_expr(self, e: NamedTupleExpr) -> None:
return None
def visit_enum_call_expr(self, e: EnumCallExpr) -> None:
return None
def visit_typeddict_expr(self, e: TypedDictExpr) -> None:
return None
def visit_newtype_expr(self, e: NewTypeExpr) -> None:
return None
def visit__promote_expr(self, e: PromoteExpr) -> None:
return None
def visit_await_expr(self, e: AwaitExpr) -> None:
return None
def visit_temp_node(self, e: TempNode) -> None:
return None
_hasher: Final = _Hasher()
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/literals.py
|
Python
|
NOASSERTION
| 8,749 |
"""
This is a module for various lookup functions:
functions that will find a semantic node by its name.
"""
from __future__ import annotations
from mypy.nodes import MypyFile, SymbolTableNode, TypeInfo
# TODO: gradually move existing lookup functions to this module.
def lookup_fully_qualified(
name: str, modules: dict[str, MypyFile], *, raise_on_missing: bool = False
) -> SymbolTableNode | None:
"""Find a symbol using it fully qualified name.
The algorithm has two steps: first we try splitting the name on '.' to find
the module, then iteratively look for each next chunk after a '.' (e.g. for
nested classes).
This function should *not* be used to find a module. Those should be looked
in the modules dictionary.
"""
head = name
rest = []
# 1. Find a module tree in modules dictionary.
while True:
if "." not in head:
if raise_on_missing:
assert "." in head, f"Cannot find module for {name}"
return None
head, tail = head.rsplit(".", maxsplit=1)
rest.append(tail)
mod = modules.get(head)
if mod is not None:
break
names = mod.names
# 2. Find the symbol in the module tree.
if not rest:
# Looks like a module, don't use this to avoid confusions.
if raise_on_missing:
assert rest, f"Cannot find {name}, got a module symbol"
return None
while True:
key = rest.pop()
if key not in names:
if raise_on_missing:
assert key in names, f"Cannot find component {key!r} for {name!r}"
return None
stnode = names[key]
if not rest:
return stnode
node = stnode.node
# In fine-grained mode, could be a cross-reference to a deleted module
# or a Var made up for a missing module.
if not isinstance(node, TypeInfo):
if raise_on_missing:
assert node, f"Cannot find {name}"
return None
names = node.names
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/lookup.py
|
Python
|
NOASSERTION
| 2,054 |
"""Mypy type checker command line tool."""
from __future__ import annotations
import argparse
import os
import subprocess
import sys
import time
from collections import defaultdict
from gettext import gettext
from typing import IO, Any, Final, NoReturn, Sequence, TextIO
from mypy import build, defaults, state, util
from mypy.config_parser import (
get_config_module_names,
parse_config_file,
parse_version,
validate_package_allow_list,
)
from mypy.error_formatter import OUTPUT_CHOICES
from mypy.errors import CompileError
from mypy.find_sources import InvalidSourceList, create_source_list
from mypy.fscache import FileSystemCache
from mypy.modulefinder import BuildSource, FindModuleCache, SearchPaths, get_search_dirs, mypy_path
from mypy.options import INCOMPLETE_FEATURES, BuildType, Options
from mypy.split_namespace import SplitNamespace
from mypy.version import __version__
orig_stat: Final = os.stat
MEM_PROFILE: Final = False # If True, dump memory profile
def stat_proxy(path: str) -> os.stat_result:
try:
st = orig_stat(path)
except OSError as err:
print(f"stat({path!r}) -> {err}")
raise
else:
print(
"stat(%r) -> (st_mode=%o, st_mtime=%d, st_size=%d)"
% (path, st.st_mode, st.st_mtime, st.st_size)
)
return st
def main(
*,
args: list[str] | None = None,
stdout: TextIO = sys.stdout,
stderr: TextIO = sys.stderr,
clean_exit: bool = False,
) -> None:
"""Main entry point to the type checker.
Args:
args: Custom command-line arguments. If not given, sys.argv[1:] will
be used.
clean_exit: Don't hard kill the process on exit. This allows catching
SystemExit.
"""
util.check_python_version("mypy")
t0 = time.time()
# To log stat() calls: os.stat = stat_proxy
sys.setrecursionlimit(2**14)
if args is None:
args = sys.argv[1:]
fscache = FileSystemCache()
sources, options = process_options(args, stdout=stdout, stderr=stderr, fscache=fscache)
if clean_exit:
options.fast_exit = False
formatter = util.FancyFormatter(
stdout, stderr, options.hide_error_codes, hide_success=bool(options.output)
)
if options.install_types and (stdout is not sys.stdout or stderr is not sys.stderr):
# Since --install-types performs user input, we want regular stdout and stderr.
fail("error: --install-types not supported in this mode of running mypy", stderr, options)
if options.non_interactive and not options.install_types:
fail("error: --non-interactive is only supported with --install-types", stderr, options)
if options.install_types and not options.incremental:
fail(
"error: --install-types not supported with incremental mode disabled", stderr, options
)
if options.install_types and options.python_executable is None:
fail(
"error: --install-types not supported without python executable or site packages",
stderr,
options,
)
if options.install_types and not sources:
install_types(formatter, options, non_interactive=options.non_interactive)
return
res, messages, blockers = run_build(sources, options, fscache, t0, stdout, stderr)
if options.non_interactive:
missing_pkgs = read_types_packages_to_install(options.cache_dir, after_run=True)
if missing_pkgs:
# Install missing type packages and rerun build.
install_types(formatter, options, after_run=True, non_interactive=True)
fscache.flush()
print()
res, messages, blockers = run_build(sources, options, fscache, t0, stdout, stderr)
show_messages(messages, stderr, formatter, options)
if MEM_PROFILE:
from mypy.memprofile import print_memory_profile
print_memory_profile()
code = 0
n_errors, n_notes, n_files = util.count_stats(messages)
if messages and n_notes < len(messages):
code = 2 if blockers else 1
if options.error_summary:
if n_errors:
summary = formatter.format_error(
n_errors, n_files, len(sources), blockers=blockers, use_color=options.color_output
)
stdout.write(summary + "\n")
# Only notes should also output success
elif not messages or n_notes == len(messages):
stdout.write(formatter.format_success(len(sources), options.color_output) + "\n")
stdout.flush()
if options.install_types and not options.non_interactive:
result = install_types(formatter, options, after_run=True, non_interactive=False)
if result:
print()
print("note: Run mypy again for up-to-date results with installed types")
code = 2
if options.fast_exit:
# Exit without freeing objects -- it's faster.
#
# NOTE: We don't flush all open files on exit (or run other destructors)!
util.hard_exit(code)
elif code:
sys.exit(code)
# HACK: keep res alive so that mypyc won't free it before the hard_exit
list([res]) # noqa: C410
def run_build(
sources: list[BuildSource],
options: Options,
fscache: FileSystemCache,
t0: float,
stdout: TextIO,
stderr: TextIO,
) -> tuple[build.BuildResult | None, list[str], bool]:
formatter = util.FancyFormatter(
stdout, stderr, options.hide_error_codes, hide_success=bool(options.output)
)
messages = []
messages_by_file = defaultdict(list)
def flush_errors(filename: str | None, new_messages: list[str], serious: bool) -> None:
if options.pretty:
new_messages = formatter.fit_in_terminal(new_messages)
messages.extend(new_messages)
if new_messages:
messages_by_file[filename].extend(new_messages)
if options.non_interactive:
# Collect messages and possibly show them later.
return
f = stderr if serious else stdout
show_messages(new_messages, f, formatter, options)
serious = False
blockers = False
res = None
try:
# Keep a dummy reference (res) for memory profiling afterwards, as otherwise
# the result could be freed.
res = build.build(sources, options, None, flush_errors, fscache, stdout, stderr)
except CompileError as e:
blockers = True
if not e.use_stdout:
serious = True
if (
options.warn_unused_configs
and options.unused_configs
and not options.incremental
and not options.non_interactive
):
print(
"Warning: unused section(s) in {}: {}".format(
options.config_file,
get_config_module_names(
options.config_file,
[
glob
for glob in options.per_module_options.keys()
if glob in options.unused_configs
],
),
),
file=stderr,
)
maybe_write_junit_xml(time.time() - t0, serious, messages, messages_by_file, options)
return res, messages, blockers
def show_messages(
messages: list[str], f: TextIO, formatter: util.FancyFormatter, options: Options
) -> None:
for msg in messages:
if options.color_output:
msg = formatter.colorize(msg)
f.write(msg + "\n")
f.flush()
# Make the help output a little less jarring.
class AugmentedHelpFormatter(argparse.RawDescriptionHelpFormatter):
def __init__(self, prog: str) -> None:
super().__init__(prog=prog, max_help_position=28)
def _fill_text(self, text: str, width: int, indent: str) -> str:
if "\n" in text:
# Assume we want to manually format the text
return super()._fill_text(text, width, indent)
else:
# Assume we want argparse to manage wrapping, indenting, and
# formatting the text for us.
return argparse.HelpFormatter._fill_text(self, text, width, indent)
# Define pairs of flag prefixes with inverse meaning.
flag_prefix_pairs: Final = [("allow", "disallow"), ("show", "hide")]
flag_prefix_map: Final[dict[str, str]] = {}
for a, b in flag_prefix_pairs:
flag_prefix_map[a] = b
flag_prefix_map[b] = a
def invert_flag_name(flag: str) -> str:
split = flag[2:].split("-", 1)
if len(split) == 2:
prefix, rest = split
if prefix in flag_prefix_map:
return f"--{flag_prefix_map[prefix]}-{rest}"
elif prefix == "no":
return f"--{rest}"
return f"--no-{flag[2:]}"
class PythonExecutableInferenceError(Exception):
"""Represents a failure to infer the version or executable while searching."""
def python_executable_prefix(v: str) -> list[str]:
if sys.platform == "win32":
# on Windows, all Python executables are named `python`. To handle this, there
# is the `py` launcher, which can be passed a version e.g. `py -3.8`, and it will
# execute an installed Python 3.8 interpreter. See also:
# https://docs.python.org/3/using/windows.html#python-launcher-for-windows
return ["py", f"-{v}"]
else:
return [f"python{v}"]
def _python_executable_from_version(python_version: tuple[int, int]) -> str:
if sys.version_info[:2] == python_version:
return sys.executable
str_ver = ".".join(map(str, python_version))
try:
sys_exe = (
subprocess.check_output(
python_executable_prefix(str_ver) + ["-c", "import sys; print(sys.executable)"],
stderr=subprocess.STDOUT,
)
.decode()
.strip()
)
return sys_exe
except (subprocess.CalledProcessError, FileNotFoundError) as e:
raise PythonExecutableInferenceError(
"failed to find a Python executable matching version {},"
" perhaps try --python-executable, or --no-site-packages?".format(python_version)
) from e
def infer_python_executable(options: Options, special_opts: argparse.Namespace) -> None:
"""Infer the Python executable from the given version.
This function mutates options based on special_opts to infer the correct Python executable
to use.
"""
# TODO: (ethanhs) Look at folding these checks and the site packages subprocess calls into
# one subprocess call for speed.
# Use the command line specified executable, or fall back to one set in the
# config file. If an executable is not specified, infer it from the version
# (unless no_executable is set)
python_executable = special_opts.python_executable or options.python_executable
if python_executable is None:
if not special_opts.no_executable and not options.no_site_packages:
python_executable = _python_executable_from_version(options.python_version)
options.python_executable = python_executable
HEADER: Final = """%(prog)s [-h] [-v] [-V] [more options; see below]
[-m MODULE] [-p PACKAGE] [-c PROGRAM_TEXT] [files ...]"""
DESCRIPTION: Final = """
Mypy is a program that will type check your Python code.
Pass in any files or folders you want to type check. Mypy will
recursively traverse any provided folders to find .py files:
$ mypy my_program.py my_src_folder
For more information on getting started, see:
- https://mypy.readthedocs.io/en/stable/getting_started.html
For more details on both running mypy and using the flags below, see:
- https://mypy.readthedocs.io/en/stable/running_mypy.html
- https://mypy.readthedocs.io/en/stable/command_line.html
You can also use a config file to configure mypy instead of using
command line flags. For more details, see:
- https://mypy.readthedocs.io/en/stable/config_file.html
"""
FOOTER: Final = """Environment variables:
Define MYPYPATH for additional module search path entries.
Define MYPY_CACHE_DIR to override configuration cache_dir path."""
class CapturableArgumentParser(argparse.ArgumentParser):
"""Override ArgumentParser methods that use sys.stdout/sys.stderr directly.
This is needed because hijacking sys.std* is not thread-safe,
yet output must be captured to properly support mypy.api.run.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.stdout = kwargs.pop("stdout", sys.stdout)
self.stderr = kwargs.pop("stderr", sys.stderr)
super().__init__(*args, **kwargs)
# =====================
# Help-printing methods
# =====================
def print_usage(self, file: IO[str] | None = None) -> None:
if file is None:
file = self.stdout
self._print_message(self.format_usage(), file)
def print_help(self, file: IO[str] | None = None) -> None:
if file is None:
file = self.stdout
self._print_message(self.format_help(), file)
def _print_message(self, message: str, file: IO[str] | None = None) -> None:
if message:
if file is None:
file = self.stderr
file.write(message)
# ===============
# Exiting methods
# ===============
def exit(self, status: int = 0, message: str | None = None) -> NoReturn:
if message:
self._print_message(message, self.stderr)
sys.exit(status)
def error(self, message: str) -> NoReturn:
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(self.stderr)
args = {"prog": self.prog, "message": message}
self.exit(2, gettext("%(prog)s: error: %(message)s\n") % args)
class CapturableVersionAction(argparse.Action):
"""Supplement CapturableArgumentParser to handle --version.
This is nearly identical to argparse._VersionAction except,
like CapturableArgumentParser, it allows output to be captured.
Another notable difference is that version is mandatory.
This allows removing a line in __call__ that falls back to parser.version
(which does not appear to exist).
"""
def __init__(
self,
option_strings: Sequence[str],
version: str,
dest: str = argparse.SUPPRESS,
default: str = argparse.SUPPRESS,
help: str = "show program's version number and exit",
stdout: IO[str] | None = None,
) -> None:
super().__init__(
option_strings=option_strings, dest=dest, default=default, nargs=0, help=help
)
self.version = version
self.stdout = stdout or sys.stdout
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: str | Sequence[Any] | None,
option_string: str | None = None,
) -> NoReturn:
formatter = parser._get_formatter()
formatter.add_text(self.version)
parser._print_message(formatter.format_help(), self.stdout)
parser.exit()
def process_options(
args: list[str],
stdout: TextIO | None = None,
stderr: TextIO | None = None,
require_targets: bool = True,
server_options: bool = False,
fscache: FileSystemCache | None = None,
program: str = "mypy",
header: str = HEADER,
) -> tuple[list[BuildSource], Options]:
"""Parse command line arguments.
If a FileSystemCache is passed in, and package_root options are given,
call fscache.set_package_root() to set the cache's package root.
"""
stdout = stdout or sys.stdout
stderr = stderr or sys.stderr
parser = CapturableArgumentParser(
prog=program,
usage=header,
description=DESCRIPTION,
epilog=FOOTER,
fromfile_prefix_chars="@",
formatter_class=AugmentedHelpFormatter,
add_help=False,
stdout=stdout,
stderr=stderr,
)
strict_flag_names: list[str] = []
strict_flag_assignments: list[tuple[str, bool]] = []
def add_invertible_flag(
flag: str,
*,
inverse: str | None = None,
default: bool,
dest: str | None = None,
help: str,
strict_flag: bool = False,
group: argparse._ActionsContainer | None = None,
) -> None:
if inverse is None:
inverse = invert_flag_name(flag)
if group is None:
group = parser
if help is not argparse.SUPPRESS:
help += f" (inverse: {inverse})"
arg = group.add_argument(
flag, action="store_false" if default else "store_true", dest=dest, help=help
)
dest = arg.dest
group.add_argument(
inverse,
action="store_true" if default else "store_false",
dest=dest,
help=argparse.SUPPRESS,
)
if strict_flag:
assert dest is not None
strict_flag_names.append(flag)
strict_flag_assignments.append((dest, not default))
# Unless otherwise specified, arguments will be parsed directly onto an
# Options object. Options that require further processing should have
# their `dest` prefixed with `special-opts:`, which will cause them to be
# parsed into the separate special_opts namespace object.
# Note: we have a style guide for formatting the mypy --help text. See
# https://github.com/python/mypy/wiki/Documentation-Conventions
general_group = parser.add_argument_group(title="Optional arguments")
general_group.add_argument(
"-h", "--help", action="help", help="Show this help message and exit"
)
general_group.add_argument(
"-v", "--verbose", action="count", dest="verbosity", help="More verbose messages"
)
compilation_status = "no" if __file__.endswith(".py") else "yes"
general_group.add_argument(
"-V",
"--version",
action=CapturableVersionAction,
version="%(prog)s " + __version__ + f" (compiled: {compilation_status})",
help="Show program's version number and exit",
stdout=stdout,
)
general_group.add_argument(
"-O",
"--output",
metavar="FORMAT",
help="Set a custom output format",
choices=OUTPUT_CHOICES,
)
config_group = parser.add_argument_group(
title="Config file",
description="Use a config file instead of command line arguments. "
"This is useful if you are using many flags or want "
"to set different options per each module.",
)
config_group.add_argument(
"--config-file",
help="Configuration file, must have a [mypy] section "
"(defaults to {})".format(", ".join(defaults.CONFIG_FILES)),
)
add_invertible_flag(
"--warn-unused-configs",
default=False,
strict_flag=True,
help="Warn about unused '[mypy-<pattern>]' or '[[tool.mypy.overrides]]' "
"config sections",
group=config_group,
)
imports_group = parser.add_argument_group(
title="Import discovery", description="Configure how imports are discovered and followed."
)
add_invertible_flag(
"--no-namespace-packages",
dest="namespace_packages",
default=True,
help="Disable support for namespace packages (PEP 420, __init__.py-less)",
group=imports_group,
)
imports_group.add_argument(
"--ignore-missing-imports",
action="store_true",
help="Silently ignore imports of missing modules",
)
imports_group.add_argument(
"--follow-imports",
choices=["normal", "silent", "skip", "error"],
default="normal",
help="How to treat imports (default normal)",
)
imports_group.add_argument(
"--python-executable",
action="store",
metavar="EXECUTABLE",
help="Python executable used for finding PEP 561 compliant installed"
" packages and stubs",
dest="special-opts:python_executable",
)
imports_group.add_argument(
"--no-site-packages",
action="store_true",
dest="special-opts:no_executable",
help="Do not search for installed PEP 561 compliant packages",
)
imports_group.add_argument(
"--no-silence-site-packages",
action="store_true",
help="Do not silence errors in PEP 561 compliant installed packages",
)
platform_group = parser.add_argument_group(
title="Platform configuration",
description="Type check code assuming it will be run under certain "
"runtime conditions. By default, mypy assumes your code "
"will be run using the same operating system and Python "
"version you are using to run mypy itself.",
)
platform_group.add_argument(
"--python-version",
type=parse_version,
metavar="x.y",
help="Type check code assuming it will be running on Python x.y",
dest="special-opts:python_version",
)
platform_group.add_argument(
"--platform",
action="store",
metavar="PLATFORM",
help="Type check special-cased code for the given OS platform "
"(defaults to sys.platform)",
)
platform_group.add_argument(
"--always-true",
metavar="NAME",
action="append",
default=[],
help="Additional variable to be considered True (may be repeated)",
)
platform_group.add_argument(
"--always-false",
metavar="NAME",
action="append",
default=[],
help="Additional variable to be considered False (may be repeated)",
)
disallow_any_group = parser.add_argument_group(
title="Disallow dynamic typing",
description="Disallow the use of the dynamic 'Any' type under certain conditions.",
)
disallow_any_group.add_argument(
"--disallow-any-unimported",
default=False,
action="store_true",
help="Disallow Any types resulting from unfollowed imports",
)
disallow_any_group.add_argument(
"--disallow-any-expr",
default=False,
action="store_true",
help="Disallow all expressions that have type Any",
)
disallow_any_group.add_argument(
"--disallow-any-decorated",
default=False,
action="store_true",
help="Disallow functions that have Any in their signature "
"after decorator transformation",
)
disallow_any_group.add_argument(
"--disallow-any-explicit",
default=False,
action="store_true",
help="Disallow explicit Any in type positions",
)
add_invertible_flag(
"--disallow-any-generics",
default=False,
strict_flag=True,
help="Disallow usage of generic types that do not specify explicit type parameters",
group=disallow_any_group,
)
add_invertible_flag(
"--disallow-subclassing-any",
default=False,
strict_flag=True,
help="Disallow subclassing values of type 'Any' when defining classes",
group=disallow_any_group,
)
untyped_group = parser.add_argument_group(
title="Untyped definitions and calls",
description="Configure how untyped definitions and calls are handled. "
"Note: by default, mypy ignores any untyped function definitions "
"and assumes any calls to such functions have a return "
"type of 'Any'.",
)
add_invertible_flag(
"--disallow-untyped-calls",
default=False,
strict_flag=True,
help="Disallow calling functions without type annotations"
" from functions with type annotations",
group=untyped_group,
)
untyped_group.add_argument(
"--untyped-calls-exclude",
metavar="MODULE",
action="append",
default=[],
help="Disable --disallow-untyped-calls for functions/methods coming"
" from specific package, module, or class",
)
add_invertible_flag(
"--disallow-untyped-defs",
default=False,
strict_flag=True,
help="Disallow defining functions without type annotations"
" or with incomplete type annotations",
group=untyped_group,
)
add_invertible_flag(
"--disallow-incomplete-defs",
default=False,
strict_flag=True,
help="Disallow defining functions with incomplete type annotations "
"(while still allowing entirely unannotated definitions)",
group=untyped_group,
)
add_invertible_flag(
"--check-untyped-defs",
default=False,
strict_flag=True,
help="Type check the interior of functions without type annotations",
group=untyped_group,
)
add_invertible_flag(
"--disallow-untyped-decorators",
default=False,
strict_flag=True,
help="Disallow decorating typed functions with untyped decorators",
group=untyped_group,
)
none_group = parser.add_argument_group(
title="None and Optional handling",
description="Adjust how values of type 'None' are handled. For more context on "
"how mypy handles values of type 'None', see: "
"https://mypy.readthedocs.io/en/stable/kinds_of_types.html#no-strict-optional",
)
add_invertible_flag(
"--implicit-optional",
default=False,
help="Assume arguments with default values of None are Optional",
group=none_group,
)
none_group.add_argument("--strict-optional", action="store_true", help=argparse.SUPPRESS)
none_group.add_argument(
"--no-strict-optional",
action="store_false",
dest="strict_optional",
help="Disable strict Optional checks (inverse: --strict-optional)",
)
add_invertible_flag(
"--force-uppercase-builtins", default=False, help=argparse.SUPPRESS, group=none_group
)
add_invertible_flag(
"--force-union-syntax", default=False, help=argparse.SUPPRESS, group=none_group
)
lint_group = parser.add_argument_group(
title="Configuring warnings",
description="Detect code that is sound but redundant or problematic.",
)
add_invertible_flag(
"--warn-redundant-casts",
default=False,
strict_flag=True,
help="Warn about casting an expression to its inferred type",
group=lint_group,
)
add_invertible_flag(
"--warn-unused-ignores",
default=False,
strict_flag=True,
help="Warn about unneeded '# type: ignore' comments",
group=lint_group,
)
add_invertible_flag(
"--no-warn-no-return",
dest="warn_no_return",
default=True,
help="Do not warn about functions that end without returning",
group=lint_group,
)
add_invertible_flag(
"--warn-return-any",
default=False,
strict_flag=True,
help="Warn about returning values of type Any from non-Any typed functions",
group=lint_group,
)
add_invertible_flag(
"--warn-unreachable",
default=False,
strict_flag=False,
help="Warn about statements or expressions inferred to be unreachable",
group=lint_group,
)
# Note: this group is intentionally added here even though we don't add
# --strict to this group near the end.
#
# That way, this group will appear after the various strictness groups
# but before the remaining flags.
# We add `--strict` near the end so we don't accidentally miss any strictness
# flags that are added after this group.
strictness_group = parser.add_argument_group(title="Miscellaneous strictness flags")
add_invertible_flag(
"--allow-untyped-globals",
default=False,
strict_flag=False,
help="Suppress toplevel errors caused by missing annotations",
group=strictness_group,
)
add_invertible_flag(
"--allow-redefinition",
default=False,
strict_flag=False,
help="Allow unconditional variable redefinition with a new type",
group=strictness_group,
)
add_invertible_flag(
"--no-implicit-reexport",
default=True,
strict_flag=True,
dest="implicit_reexport",
help="Treat imports as private unless aliased",
group=strictness_group,
)
add_invertible_flag(
"--strict-equality",
default=False,
strict_flag=True,
help="Prohibit equality, identity, and container checks for non-overlapping types",
group=strictness_group,
)
add_invertible_flag(
"--extra-checks",
default=False,
strict_flag=True,
help="Enable additional checks that are technically correct but may be impractical "
"in real code. For example, this prohibits partial overlap in TypedDict updates, "
"and makes arguments prepended via Concatenate positional-only",
group=strictness_group,
)
strict_help = "Strict mode; enables the following flags: {}".format(
", ".join(strict_flag_names)
)
strictness_group.add_argument(
"--strict", action="store_true", dest="special-opts:strict", help=strict_help
)
strictness_group.add_argument(
"--disable-error-code",
metavar="NAME",
action="append",
default=[],
help="Disable a specific error code",
)
strictness_group.add_argument(
"--enable-error-code",
metavar="NAME",
action="append",
default=[],
help="Enable a specific error code",
)
error_group = parser.add_argument_group(
title="Configuring error messages",
description="Adjust the amount of detail shown in error messages.",
)
add_invertible_flag(
"--show-error-context",
default=False,
dest="show_error_context",
help='Precede errors with "note:" messages explaining context',
group=error_group,
)
add_invertible_flag(
"--show-column-numbers",
default=False,
help="Show column numbers in error messages",
group=error_group,
)
add_invertible_flag(
"--show-error-end",
default=False,
help="Show end line/end column numbers in error messages."
" This implies --show-column-numbers",
group=error_group,
)
add_invertible_flag(
"--hide-error-codes",
default=False,
help="Hide error codes in error messages",
group=error_group,
)
add_invertible_flag(
"--show-error-code-links",
default=False,
help="Show links to error code documentation",
group=error_group,
)
add_invertible_flag(
"--pretty",
default=False,
help="Use visually nicer output in error messages:"
" Use soft word wrap, show source code snippets,"
" and show error location markers",
group=error_group,
)
add_invertible_flag(
"--no-color-output",
dest="color_output",
default=True,
help="Do not colorize error messages",
group=error_group,
)
add_invertible_flag(
"--no-error-summary",
dest="error_summary",
default=True,
help="Do not show error stats summary",
group=error_group,
)
add_invertible_flag(
"--show-absolute-path",
default=False,
help="Show absolute paths to files",
group=error_group,
)
error_group.add_argument(
"--soft-error-limit",
default=defaults.MANY_ERRORS_THRESHOLD,
type=int,
dest="many_errors_threshold",
help=argparse.SUPPRESS,
)
incremental_group = parser.add_argument_group(
title="Incremental mode",
description="Adjust how mypy incrementally type checks and caches modules. "
"Mypy caches type information about modules into a cache to "
"let you speed up future invocations of mypy. Also see "
"mypy's daemon mode: "
"mypy.readthedocs.io/en/stable/mypy_daemon.html#mypy-daemon",
)
incremental_group.add_argument(
"-i", "--incremental", action="store_true", help=argparse.SUPPRESS
)
incremental_group.add_argument(
"--no-incremental",
action="store_false",
dest="incremental",
help="Disable module cache (inverse: --incremental)",
)
incremental_group.add_argument(
"--cache-dir",
action="store",
metavar="DIR",
help="Store module cache info in the given folder in incremental mode "
"(defaults to '{}')".format(defaults.CACHE_DIR),
)
add_invertible_flag(
"--sqlite-cache",
default=False,
help="Use a sqlite database to store the cache",
group=incremental_group,
)
incremental_group.add_argument(
"--cache-fine-grained",
action="store_true",
help="Include fine-grained dependency information in the cache for the mypy daemon",
)
incremental_group.add_argument(
"--skip-version-check",
action="store_true",
help="Allow using cache written by older mypy version",
)
incremental_group.add_argument(
"--skip-cache-mtime-checks",
action="store_true",
help="Skip cache internal consistency checks based on mtime",
)
internals_group = parser.add_argument_group(
title="Advanced options", description="Debug and customize mypy internals."
)
internals_group.add_argument("--pdb", action="store_true", help="Invoke pdb on fatal error")
internals_group.add_argument(
"--show-traceback", "--tb", action="store_true", help="Show traceback on fatal error"
)
internals_group.add_argument(
"--raise-exceptions", action="store_true", help="Raise exception on fatal error"
)
internals_group.add_argument(
"--custom-typing-module",
metavar="MODULE",
dest="custom_typing_module",
help="Use a custom typing module",
)
internals_group.add_argument(
"--old-type-inference",
action="store_true",
help="Disable new experimental type inference algorithm",
)
# Deprecated reverse variant of the above.
internals_group.add_argument(
"--new-type-inference", action="store_true", help=argparse.SUPPRESS
)
parser.add_argument(
"--enable-incomplete-feature",
action="append",
metavar="{" + ",".join(sorted(INCOMPLETE_FEATURES)) + "}",
help="Enable support of incomplete/experimental features for early preview",
)
internals_group.add_argument(
"--custom-typeshed-dir", metavar="DIR", help="Use the custom typeshed in DIR"
)
add_invertible_flag(
"--warn-incomplete-stub",
default=False,
help="Warn if missing type annotation in typeshed, only relevant with"
" --disallow-untyped-defs or --disallow-incomplete-defs enabled",
group=internals_group,
)
internals_group.add_argument(
"--shadow-file",
nargs=2,
metavar=("SOURCE_FILE", "SHADOW_FILE"),
dest="shadow_file",
action="append",
help="When encountering SOURCE_FILE, read and type check "
"the contents of SHADOW_FILE instead.",
)
internals_group.add_argument("--fast-exit", action="store_true", help=argparse.SUPPRESS)
internals_group.add_argument(
"--no-fast-exit", action="store_false", dest="fast_exit", help=argparse.SUPPRESS
)
# This flag is useful for mypy tests, where function bodies may be omitted. Plugin developers
# may want to use this as well in their tests.
add_invertible_flag(
"--allow-empty-bodies", default=False, help=argparse.SUPPRESS, group=internals_group
)
# This undocumented feature exports limited line-level dependency information.
internals_group.add_argument("--export-ref-info", action="store_true", help=argparse.SUPPRESS)
report_group = parser.add_argument_group(
title="Report generation", description="Generate a report in the specified format."
)
for report_type in sorted(defaults.REPORTER_NAMES):
if report_type not in {"memory-xml"}:
report_group.add_argument(
f"--{report_type.replace('_', '-')}-report",
metavar="DIR",
dest=f"special-opts:{report_type}_report",
)
other_group = parser.add_argument_group(title="Miscellaneous")
other_group.add_argument("--quickstart-file", help=argparse.SUPPRESS)
other_group.add_argument("--junit-xml", help="Write junit.xml to the given file")
imports_group.add_argument(
"--junit-format",
choices=["global", "per_file"],
default="global",
help="If --junit-xml is set, specifies format. global: single test with all errors; per_file: one test entry per file with failures",
)
other_group.add_argument(
"--find-occurrences",
metavar="CLASS.MEMBER",
dest="special-opts:find_occurrences",
help="Print out all usages of a class member (experimental)",
)
other_group.add_argument(
"--scripts-are-modules",
action="store_true",
help="Script x becomes module x instead of __main__",
)
add_invertible_flag(
"--install-types",
default=False,
strict_flag=False,
help="Install detected missing library stub packages using pip",
group=other_group,
)
add_invertible_flag(
"--non-interactive",
default=False,
strict_flag=False,
help=(
"Install stubs without asking for confirmation and hide "
+ "errors, with --install-types"
),
group=other_group,
inverse="--interactive",
)
if server_options:
# TODO: This flag is superfluous; remove after a short transition (2018-03-16)
other_group.add_argument(
"--experimental",
action="store_true",
dest="fine_grained_incremental",
help="Enable fine-grained incremental mode",
)
other_group.add_argument(
"--use-fine-grained-cache",
action="store_true",
help="Use the cache in fine-grained incremental mode",
)
# hidden options
parser.add_argument(
"--stats", action="store_true", dest="dump_type_stats", help=argparse.SUPPRESS
)
parser.add_argument(
"--inferstats", action="store_true", dest="dump_inference_stats", help=argparse.SUPPRESS
)
parser.add_argument("--dump-build-stats", action="store_true", help=argparse.SUPPRESS)
# Dump timing stats for each processed file into the given output file
parser.add_argument("--timing-stats", dest="timing_stats", help=argparse.SUPPRESS)
# Dump per line type checking timing stats for each processed file into the given
# output file. Only total time spent in each top level expression will be shown.
# Times are show in microseconds.
parser.add_argument(
"--line-checking-stats", dest="line_checking_stats", help=argparse.SUPPRESS
)
# --debug-cache will disable any cache-related compressions/optimizations,
# which will make the cache writing process output pretty-printed JSON (which
# is easier to debug).
parser.add_argument("--debug-cache", action="store_true", help=argparse.SUPPRESS)
# --dump-deps will dump all fine-grained dependencies to stdout
parser.add_argument("--dump-deps", action="store_true", help=argparse.SUPPRESS)
# --dump-graph will dump the contents of the graph of SCCs and exit.
parser.add_argument("--dump-graph", action="store_true", help=argparse.SUPPRESS)
# --semantic-analysis-only does exactly that.
parser.add_argument("--semantic-analysis-only", action="store_true", help=argparse.SUPPRESS)
# Some tests use this to tell mypy that we are running a test.
parser.add_argument("--test-env", action="store_true", help=argparse.SUPPRESS)
# --local-partial-types disallows partial types spanning module top level and a function
# (implicitly defined in fine-grained incremental mode)
parser.add_argument("--local-partial-types", action="store_true", help=argparse.SUPPRESS)
# --logical-deps adds some more dependencies that are not semantically needed, but
# may be helpful to determine relative importance of classes and functions for overall
# type precision in a code base. It also _removes_ some deps, so this flag should be never
# used except for generating code stats. This also automatically enables --cache-fine-grained.
# NOTE: This is an experimental option that may be modified or removed at any time.
parser.add_argument("--logical-deps", action="store_true", help=argparse.SUPPRESS)
# --bazel changes some behaviors for use with Bazel (https://bazel.build).
parser.add_argument("--bazel", action="store_true", help=argparse.SUPPRESS)
# --package-root adds a directory below which directories are considered
# packages even without __init__.py. May be repeated.
parser.add_argument(
"--package-root", metavar="ROOT", action="append", default=[], help=argparse.SUPPRESS
)
# --cache-map FILE ... gives a mapping from source files to cache files.
# Each triple of arguments is a source file, a cache meta file, and a cache data file.
# Modules not mentioned in the file will go through cache_dir.
# Must be followed by another flag or by '--' (and then only file args may follow).
parser.add_argument(
"--cache-map", nargs="+", dest="special-opts:cache_map", help=argparse.SUPPRESS
)
# --debug-serialize will run tree.serialize() even if cache generation is disabled.
# Useful for mypy_primer to detect serialize errors earlier.
parser.add_argument("--debug-serialize", action="store_true", help=argparse.SUPPRESS)
parser.add_argument(
"--disable-bytearray-promotion", action="store_true", help=argparse.SUPPRESS
)
parser.add_argument(
"--disable-memoryview-promotion", action="store_true", help=argparse.SUPPRESS
)
# This flag is deprecated, it has been moved to --extra-checks
parser.add_argument("--strict-concatenate", action="store_true", help=argparse.SUPPRESS)
# options specifying code to check
code_group = parser.add_argument_group(
title="Running code",
description="Specify the code you want to type check. For more details, see "
"mypy.readthedocs.io/en/stable/running_mypy.html#running-mypy",
)
add_invertible_flag(
"--explicit-package-bases",
default=False,
help="Use current directory and MYPYPATH to determine module names of files passed",
group=code_group,
)
add_invertible_flag(
"--fast-module-lookup", default=False, help=argparse.SUPPRESS, group=code_group
)
code_group.add_argument(
"--exclude",
action="append",
metavar="PATTERN",
default=[],
help=(
"Regular expression to match file names, directory names or paths which mypy should "
"ignore while recursively discovering files to check, e.g. --exclude '/setup\\.py$'. "
"May be specified more than once, eg. --exclude a --exclude b"
),
)
code_group.add_argument(
"-m",
"--module",
action="append",
metavar="MODULE",
default=[],
dest="special-opts:modules",
help="Type-check module; can repeat for more modules",
)
code_group.add_argument(
"-p",
"--package",
action="append",
metavar="PACKAGE",
default=[],
dest="special-opts:packages",
help="Type-check package recursively; can be repeated",
)
code_group.add_argument(
"-c",
"--command",
action="append",
metavar="PROGRAM_TEXT",
dest="special-opts:command",
help="Type-check program passed in as string",
)
code_group.add_argument(
metavar="files",
nargs="*",
dest="special-opts:files",
help="Type-check given files or directories",
)
# Parse arguments once into a dummy namespace so we can get the
# filename for the config file and know if the user requested all strict options.
dummy = argparse.Namespace()
parser.parse_args(args, dummy)
config_file = dummy.config_file
# Don't explicitly test if "config_file is not None" for this check.
# This lets `--config-file=` (an empty string) be used to disable all config files.
if config_file and not os.path.exists(config_file):
parser.error(f"Cannot find config file '{config_file}'")
options = Options()
strict_option_set = False
def set_strict_flags() -> None:
nonlocal strict_option_set
strict_option_set = True
for dest, value in strict_flag_assignments:
setattr(options, dest, value)
# Parse config file first, so command line can override.
parse_config_file(options, set_strict_flags, config_file, stdout, stderr)
# Set strict flags before parsing (if strict mode enabled), so other command
# line options can override.
if getattr(dummy, "special-opts:strict"):
set_strict_flags()
# Override cache_dir if provided in the environment
environ_cache_dir = os.getenv("MYPY_CACHE_DIR", "")
if environ_cache_dir.strip():
options.cache_dir = environ_cache_dir
options.cache_dir = os.path.expanduser(options.cache_dir)
# Parse command line for real, using a split namespace.
special_opts = argparse.Namespace()
parser.parse_args(args, SplitNamespace(options, special_opts, "special-opts:"))
# The python_version is either the default, which can be overridden via a config file,
# or stored in special_opts and is passed via the command line.
options.python_version = special_opts.python_version or options.python_version
if options.python_version < (3,):
parser.error(
"Mypy no longer supports checking Python 2 code. "
"Consider pinning to mypy<0.980 if you need to check Python 2 code."
)
try:
infer_python_executable(options, special_opts)
except PythonExecutableInferenceError as e:
parser.error(str(e))
if special_opts.no_executable or options.no_site_packages:
options.python_executable = None
# Paths listed in the config file will be ignored if any paths, modules or packages
# are passed on the command line.
if not (special_opts.files or special_opts.packages or special_opts.modules):
if options.files:
special_opts.files = options.files
if options.packages:
special_opts.packages = options.packages
if options.modules:
special_opts.modules = options.modules
# Check for invalid argument combinations.
if require_targets:
code_methods = sum(
bool(c)
for c in [
special_opts.modules + special_opts.packages,
special_opts.command,
special_opts.files,
]
)
if code_methods == 0 and not options.install_types:
parser.error("Missing target module, package, files, or command.")
elif code_methods > 1:
parser.error("May only specify one of: module/package, files, or command.")
if options.explicit_package_bases and not options.namespace_packages:
parser.error(
"Can only use --explicit-package-bases with --namespace-packages, since otherwise "
"examining __init__.py's is sufficient to determine module names for files"
)
# Check for overlapping `--always-true` and `--always-false` flags.
overlap = set(options.always_true) & set(options.always_false)
if overlap:
parser.error(
"You can't make a variable always true and always false (%s)"
% ", ".join(sorted(overlap))
)
validate_package_allow_list(options.untyped_calls_exclude)
options.process_error_codes(error_callback=parser.error)
options.process_incomplete_features(error_callback=parser.error, warning_callback=print)
# Compute absolute path for custom typeshed (if present).
if options.custom_typeshed_dir is not None:
options.abs_custom_typeshed_dir = os.path.abspath(options.custom_typeshed_dir)
# Set build flags.
if special_opts.find_occurrences:
_find_occurrences = tuple(special_opts.find_occurrences.split("."))
if len(_find_occurrences) < 2:
parser.error("Can only find occurrences of class members.")
if len(_find_occurrences) != 2:
parser.error("Can only find occurrences of non-nested class members.")
state.find_occurrences = _find_occurrences
# Set reports.
for flag, val in vars(special_opts).items():
if flag.endswith("_report") and val is not None:
report_type = flag[:-7].replace("_", "-")
report_dir = val
options.report_dirs[report_type] = report_dir
# Process --package-root.
if options.package_root:
process_package_roots(fscache, parser, options)
# Process --cache-map.
if special_opts.cache_map:
if options.sqlite_cache:
parser.error("--cache-map is incompatible with --sqlite-cache")
process_cache_map(parser, special_opts, options)
# An explicitly specified cache_fine_grained implies local_partial_types
# (because otherwise the cache is not compatible with dmypy)
if options.cache_fine_grained:
options.local_partial_types = True
# Implicitly show column numbers if error location end is shown
if options.show_error_end:
options.show_column_numbers = True
# Let logical_deps imply cache_fine_grained (otherwise the former is useless).
if options.logical_deps:
options.cache_fine_grained = True
if options.new_type_inference:
print(
"Warning: --new-type-inference flag is deprecated;"
" new type inference algorithm is already enabled by default"
)
if options.strict_concatenate and not strict_option_set:
print("Warning: --strict-concatenate is deprecated; use --extra-checks instead")
# Set target.
if special_opts.modules + special_opts.packages:
options.build_type = BuildType.MODULE
sys_path, _ = get_search_dirs(options.python_executable)
search_paths = SearchPaths(
(os.getcwd(),), tuple(mypy_path() + options.mypy_path), tuple(sys_path), ()
)
targets = []
# TODO: use the same cache that the BuildManager will
cache = FindModuleCache(search_paths, fscache, options)
for p in special_opts.packages:
if os.sep in p or os.altsep and os.altsep in p:
fail(f"Package name '{p}' cannot have a slash in it.", stderr, options)
p_targets = cache.find_modules_recursive(p)
if not p_targets:
fail(f"Can't find package '{p}'", stderr, options)
targets.extend(p_targets)
for m in special_opts.modules:
targets.append(BuildSource(None, m, None))
return targets, options
elif special_opts.command:
options.build_type = BuildType.PROGRAM_TEXT
targets = [BuildSource(None, None, "\n".join(special_opts.command))]
return targets, options
else:
try:
targets = create_source_list(special_opts.files, options, fscache)
# Variable named e2 instead of e to work around mypyc bug #620
# which causes issues when using the same variable to catch
# exceptions of different types.
except InvalidSourceList as e2:
fail(str(e2), stderr, options)
return targets, options
def process_package_roots(
fscache: FileSystemCache | None, parser: argparse.ArgumentParser, options: Options
) -> None:
"""Validate and normalize package_root."""
if fscache is None:
parser.error("--package-root does not work here (no fscache)")
assert fscache is not None # Since mypy doesn't know parser.error() raises.
# Do some stuff with drive letters to make Windows happy (esp. tests).
current_drive, _ = os.path.splitdrive(os.getcwd())
dot = os.curdir
dotslash = os.curdir + os.sep
dotdotslash = os.pardir + os.sep
trivial_paths = {dot, dotslash}
package_root = []
for root in options.package_root:
if os.path.isabs(root):
parser.error(f"Package root cannot be absolute: {root!r}")
drive, root = os.path.splitdrive(root)
if drive and drive != current_drive:
parser.error(f"Package root must be on current drive: {drive + root!r}")
# Empty package root is always okay.
if root:
root = os.path.relpath(root) # Normalize the heck out of it.
if not root.endswith(os.sep):
root = root + os.sep
if root.startswith(dotdotslash):
parser.error(f"Package root cannot be above current directory: {root!r}")
if root in trivial_paths:
root = ""
package_root.append(root)
options.package_root = package_root
# Pass the package root on the the filesystem cache.
fscache.set_package_root(package_root)
def process_cache_map(
parser: argparse.ArgumentParser, special_opts: argparse.Namespace, options: Options
) -> None:
"""Validate cache_map and copy into options.cache_map."""
n = len(special_opts.cache_map)
if n % 3 != 0:
parser.error("--cache-map requires one or more triples (see source)")
for i in range(0, n, 3):
source, meta_file, data_file = special_opts.cache_map[i : i + 3]
if source in options.cache_map:
parser.error(f"Duplicate --cache-map source {source})")
if not source.endswith(".py") and not source.endswith(".pyi"):
parser.error(f"Invalid --cache-map source {source} (triple[0] must be *.py[i])")
if not meta_file.endswith(".meta.json"):
parser.error(
"Invalid --cache-map meta_file %s (triple[1] must be *.meta.json)" % meta_file
)
if not data_file.endswith(".data.json"):
parser.error(
"Invalid --cache-map data_file %s (triple[2] must be *.data.json)" % data_file
)
options.cache_map[source] = (meta_file, data_file)
def maybe_write_junit_xml(
td: float,
serious: bool,
all_messages: list[str],
messages_by_file: dict[str | None, list[str]],
options: Options,
) -> None:
if options.junit_xml:
py_version = f"{options.python_version[0]}_{options.python_version[1]}"
if options.junit_format == "global":
util.write_junit_xml(
td,
serious,
{None: all_messages} if all_messages else {},
options.junit_xml,
py_version,
options.platform,
)
else:
# per_file
util.write_junit_xml(
td, serious, messages_by_file, options.junit_xml, py_version, options.platform
)
def fail(msg: str, stderr: TextIO, options: Options) -> NoReturn:
"""Fail with a serious error."""
stderr.write(f"{msg}\n")
maybe_write_junit_xml(
0.0, serious=True, all_messages=[msg], messages_by_file={None: [msg]}, options=options
)
sys.exit(2)
def read_types_packages_to_install(cache_dir: str, after_run: bool) -> list[str]:
if not os.path.isdir(cache_dir):
if not after_run:
sys.stderr.write(
"error: Can't determine which types to install with no files to check "
+ "(and no cache from previous mypy run)\n"
)
else:
sys.stderr.write("error: --install-types failed (no mypy cache directory)\n")
sys.exit(2)
fnam = build.missing_stubs_file(cache_dir)
if not os.path.isfile(fnam):
# No missing stubs.
return []
with open(fnam) as f:
return [line.strip() for line in f]
def install_types(
formatter: util.FancyFormatter,
options: Options,
*,
after_run: bool = False,
non_interactive: bool = False,
) -> bool:
"""Install stub packages using pip if some missing stubs were detected."""
packages = read_types_packages_to_install(options.cache_dir, after_run)
if not packages:
# If there are no missing stubs, generate no output.
return False
if after_run and not non_interactive:
print()
print("Installing missing stub packages:")
assert options.python_executable, "Python executable required to install types"
cmd = [options.python_executable, "-m", "pip", "install"] + packages
print(formatter.style(" ".join(cmd), "none", bold=True))
print()
if not non_interactive:
x = input("Install? [yN] ")
if not x.strip() or not x.lower().startswith("y"):
print(formatter.style("mypy: Skipping installation", "red", bold=True))
sys.exit(2)
print()
subprocess.run(cmd)
return True
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/main.py
|
Python
|
NOASSERTION
| 57,730 |
from __future__ import annotations
from mypy.expandtype import expand_type_by_instance
from mypy.nodes import TypeInfo
from mypy.types import AnyType, Instance, TupleType, TypeOfAny, has_type_vars
def map_instance_to_supertype(instance: Instance, superclass: TypeInfo) -> Instance:
"""Produce a supertype of `instance` that is an Instance
of `superclass`, mapping type arguments up the chain of bases.
If `superclass` is not a nominal superclass of `instance.type`,
then all type arguments are mapped to 'Any'.
"""
if instance.type == superclass:
# Fast path: `instance` already belongs to `superclass`.
return instance
if superclass.fullname == "builtins.tuple" and instance.type.tuple_type:
if has_type_vars(instance.type.tuple_type):
# We special case mapping generic tuple types to tuple base, because for
# such tuples fallback can't be calculated before applying type arguments.
alias = instance.type.special_alias
assert alias is not None
if not alias._is_recursive:
# Unfortunately we can't support this for generic recursive tuples.
# If we skip this special casing we will fall back to tuple[Any, ...].
tuple_type = expand_type_by_instance(instance.type.tuple_type, instance)
if isinstance(tuple_type, TupleType):
# Make the import here to avoid cyclic imports.
import mypy.typeops
return mypy.typeops.tuple_fallback(tuple_type)
elif isinstance(tuple_type, Instance):
# This can happen after normalizing variadic tuples.
return tuple_type
if not superclass.type_vars:
# Fast path: `superclass` has no type variables to map to.
return Instance(superclass, [])
return map_instance_to_supertypes(instance, superclass)[0]
def map_instance_to_supertypes(instance: Instance, supertype: TypeInfo) -> list[Instance]:
# FIX: Currently we should only have one supertype per interface, so no
# need to return an array
result: list[Instance] = []
for path in class_derivation_paths(instance.type, supertype):
types = [instance]
for sup in path:
a: list[Instance] = []
for t in types:
a.extend(map_instance_to_direct_supertypes(t, sup))
types = a
result.extend(types)
if result:
return result
else:
# Nothing. Presumably due to an error. Construct a dummy using Any.
any_type = AnyType(TypeOfAny.from_error)
return [Instance(supertype, [any_type] * len(supertype.type_vars))]
def class_derivation_paths(typ: TypeInfo, supertype: TypeInfo) -> list[list[TypeInfo]]:
"""Return an array of non-empty paths of direct base classes from
type to supertype. Return [] if no such path could be found.
InterfaceImplementationPaths(A, B) == [[B]] if A inherits B
InterfaceImplementationPaths(A, C) == [[B, C]] if A inherits B and
B inherits C
"""
# FIX: Currently we might only ever have a single path, so this could be
# simplified
result: list[list[TypeInfo]] = []
for base in typ.bases:
btype = base.type
if btype == supertype:
result.append([btype])
else:
# Try constructing a longer path via the base class.
for path in class_derivation_paths(btype, supertype):
result.append([btype] + path)
return result
def map_instance_to_direct_supertypes(instance: Instance, supertype: TypeInfo) -> list[Instance]:
# FIX: There should only be one supertypes, always.
typ = instance.type
result: list[Instance] = []
for b in typ.bases:
if b.type == supertype:
t = expand_type_by_instance(b, instance)
assert isinstance(t, Instance)
result.append(t)
if result:
return result
else:
# Relationship with the supertype not specified explicitly. Use dynamic
# type arguments implicitly.
any_type = AnyType(TypeOfAny.unannotated)
return [Instance(supertype, [any_type] * len(supertype.type_vars))]
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/maptype.py
|
Python
|
NOASSERTION
| 4,331 |
from __future__ import annotations
from typing import Callable
from mypy import join
from mypy.erasetype import erase_type
from mypy.maptype import map_instance_to_supertype
from mypy.state import state
from mypy.subtypes import (
are_parameters_compatible,
find_member,
is_callable_compatible,
is_equivalent,
is_proper_subtype,
is_same_type,
is_subtype,
)
from mypy.typeops import is_recursive_pair, make_simplified_union, tuple_fallback
from mypy.types import (
MYPYC_NATIVE_INT_NAMES,
TUPLE_LIKE_INSTANCE_NAMES,
AnyType,
CallableType,
DeletedType,
ErasedType,
FunctionLike,
Instance,
LiteralType,
NoneType,
Overloaded,
Parameters,
ParamSpecType,
PartialType,
ProperType,
TupleType,
Type,
TypeAliasType,
TypedDictType,
TypeGuardedType,
TypeOfAny,
TypeType,
TypeVarLikeType,
TypeVarTupleType,
TypeVarType,
TypeVisitor,
UnboundType,
UninhabitedType,
UnionType,
UnpackType,
find_unpack_in_list,
get_proper_type,
get_proper_types,
split_with_prefix_and_suffix,
)
# TODO Describe this module.
def trivial_meet(s: Type, t: Type) -> ProperType:
"""Return one of types (expanded) if it is a subtype of other, otherwise bottom type."""
if is_subtype(s, t):
return get_proper_type(s)
elif is_subtype(t, s):
return get_proper_type(t)
else:
if state.strict_optional:
return UninhabitedType()
else:
return NoneType()
def meet_types(s: Type, t: Type) -> ProperType:
"""Return the greatest lower bound of two types."""
if is_recursive_pair(s, t):
# This case can trigger an infinite recursion, general support for this will be
# tricky, so we use a trivial meet (like for protocols).
return trivial_meet(s, t)
s = get_proper_type(s)
t = get_proper_type(t)
if isinstance(s, Instance) and isinstance(t, Instance) and s.type == t.type:
# Code in checker.py should merge any extra_items where possible, so we
# should have only compatible extra_items here. We check this before
# the below subtype check, so that extra_attrs will not get erased.
if (s.extra_attrs or t.extra_attrs) and is_same_type(s, t):
if s.extra_attrs and t.extra_attrs:
if len(s.extra_attrs.attrs) > len(t.extra_attrs.attrs):
# Return the one that has more precise information.
return s
return t
if s.extra_attrs:
return s
return t
if not isinstance(s, UnboundType) and not isinstance(t, UnboundType):
if is_proper_subtype(s, t, ignore_promotions=True):
return s
if is_proper_subtype(t, s, ignore_promotions=True):
return t
if isinstance(s, ErasedType):
return s
if isinstance(s, AnyType):
return t
if isinstance(s, UnionType) and not isinstance(t, UnionType):
s, t = t, s
# Meets/joins require callable type normalization.
s, t = join.normalize_callables(s, t)
return t.accept(TypeMeetVisitor(s))
def narrow_declared_type(declared: Type, narrowed: Type) -> Type:
"""Return the declared type narrowed down to another type."""
# TODO: check infinite recursion for aliases here.
if isinstance(narrowed, TypeGuardedType): # type: ignore[misc]
# A type guard forces the new type even if it doesn't overlap the old.
return narrowed.type_guard
original_declared = declared
original_narrowed = narrowed
declared = get_proper_type(declared)
narrowed = get_proper_type(narrowed)
if declared == narrowed:
return original_declared
if isinstance(declared, UnionType):
return make_simplified_union(
[
narrow_declared_type(x, narrowed)
for x in declared.relevant_items()
# This (ugly) special-casing is needed to support checking
# branches like this:
# x: Union[float, complex]
# if isinstance(x, int):
# ...
if (
is_overlapping_types(x, narrowed, ignore_promotions=True)
or is_subtype(narrowed, x, ignore_promotions=False)
)
]
)
if is_enum_overlapping_union(declared, narrowed):
return original_narrowed
elif not is_overlapping_types(declared, narrowed, prohibit_none_typevar_overlap=True):
if state.strict_optional:
return UninhabitedType()
else:
return NoneType()
elif isinstance(narrowed, UnionType):
return make_simplified_union(
[narrow_declared_type(declared, x) for x in narrowed.relevant_items()]
)
elif isinstance(narrowed, AnyType):
return original_narrowed
elif isinstance(narrowed, TypeVarType) and is_subtype(narrowed.upper_bound, declared):
return narrowed
elif isinstance(declared, TypeType) and isinstance(narrowed, TypeType):
return TypeType.make_normalized(narrow_declared_type(declared.item, narrowed.item))
elif (
isinstance(declared, TypeType)
and isinstance(narrowed, Instance)
and narrowed.type.is_metaclass()
):
# We'd need intersection types, so give up.
return original_declared
elif isinstance(declared, Instance):
if declared.type.alt_promote:
# Special case: low-level integer type can't be narrowed
return original_declared
if (
isinstance(narrowed, Instance)
and narrowed.type.alt_promote
and narrowed.type.alt_promote.type is declared.type
):
# Special case: 'int' can't be narrowed down to a native int type such as
# i64, since they have different runtime representations.
return original_declared
return meet_types(original_declared, original_narrowed)
elif isinstance(declared, (TupleType, TypeType, LiteralType)):
return meet_types(original_declared, original_narrowed)
elif isinstance(declared, TypedDictType) and isinstance(narrowed, Instance):
# Special case useful for selecting TypedDicts from unions using isinstance(x, dict).
if narrowed.type.fullname == "builtins.dict" and all(
isinstance(t, AnyType) for t in get_proper_types(narrowed.args)
):
return original_declared
return meet_types(original_declared, original_narrowed)
return original_narrowed
def get_possible_variants(typ: Type) -> list[Type]:
"""This function takes any "Union-like" type and returns a list of the available "options".
Specifically, there are currently exactly three different types that can have
"variants" or are "union-like":
- Unions
- TypeVars with value restrictions
- Overloads
This function will return a list of each "option" present in those types.
If this function receives any other type, we return a list containing just that
original type. (E.g. pretend the type was contained within a singleton union).
The only current exceptions are regular TypeVars and ParamSpecs. For these "TypeVarLike"s,
we return a list containing that TypeVarLike's upper bound.
This function is useful primarily when checking to see if two types are overlapping:
the algorithm to check if two unions are overlapping is fundamentally the same as
the algorithm for checking if two overloads are overlapping.
Normalizing both kinds of types in the same way lets us reuse the same algorithm
for both.
"""
typ = get_proper_type(typ)
if isinstance(typ, TypeVarType):
if len(typ.values) > 0:
return typ.values
else:
return [typ.upper_bound]
elif isinstance(typ, ParamSpecType):
return [typ.upper_bound]
elif isinstance(typ, TypeVarTupleType):
return [typ.upper_bound]
elif isinstance(typ, UnionType):
return list(typ.items)
elif isinstance(typ, Overloaded):
# Note: doing 'return typ.items()' makes mypy
# infer a too-specific return type of List[CallableType]
return list(typ.items)
else:
return [typ]
def is_enum_overlapping_union(x: ProperType, y: ProperType) -> bool:
"""Return True if x is an Enum, and y is an Union with at least one Literal from x"""
return (
isinstance(x, Instance)
and x.type.is_enum
and isinstance(y, UnionType)
and any(
isinstance(p, LiteralType) and x.type == p.fallback.type
for p in (get_proper_type(z) for z in y.relevant_items())
)
)
def is_literal_in_union(x: ProperType, y: ProperType) -> bool:
"""Return True if x is a Literal and y is an Union that includes x"""
return (
isinstance(x, LiteralType)
and isinstance(y, UnionType)
and any(x == get_proper_type(z) for z in y.items)
)
def is_object(t: ProperType) -> bool:
return isinstance(t, Instance) and t.type.fullname == "builtins.object"
def is_overlapping_types(
left: Type,
right: Type,
ignore_promotions: bool = False,
prohibit_none_typevar_overlap: bool = False,
overlap_for_overloads: bool = False,
seen_types: set[tuple[Type, Type]] | None = None,
) -> bool:
"""Can a value of type 'left' also be of type 'right' or vice-versa?
If 'ignore_promotions' is True, we ignore promotions while checking for overlaps.
If 'prohibit_none_typevar_overlap' is True, we disallow None from overlapping with
TypeVars (in both strict-optional and non-strict-optional mode).
If 'overlap_for_overloads' is True, we check for overlaps more strictly (to avoid false
positives), for example: None only overlaps with explicitly optional types, Any
doesn't overlap with anything except object, we don't ignore positional argument names.
"""
if isinstance(left, TypeGuardedType) or isinstance( # type: ignore[misc]
right, TypeGuardedType
):
# A type guard forces the new type even if it doesn't overlap the old.
return True
if seen_types is None:
seen_types = set()
if (left, right) in seen_types:
return True
if isinstance(left, TypeAliasType) and isinstance(right, TypeAliasType):
seen_types.add((left, right))
left, right = get_proper_types((left, right))
def _is_overlapping_types(left: Type, right: Type) -> bool:
"""Encode the kind of overlapping check to perform.
This function mostly exists, so we don't have to repeat keyword arguments everywhere.
"""
return is_overlapping_types(
left,
right,
ignore_promotions=ignore_promotions,
prohibit_none_typevar_overlap=prohibit_none_typevar_overlap,
overlap_for_overloads=overlap_for_overloads,
seen_types=seen_types.copy(),
)
# We should never encounter this type.
if isinstance(left, PartialType) or isinstance(right, PartialType):
assert False, "Unexpectedly encountered partial type"
# We should also never encounter these types, but it's possible a few
# have snuck through due to unrelated bugs. For now, we handle these
# in the same way we handle 'Any'.
#
# TODO: Replace these with an 'assert False' once we are more confident.
illegal_types = (UnboundType, ErasedType, DeletedType)
if isinstance(left, illegal_types) or isinstance(right, illegal_types):
return True
# When running under non-strict optional mode, simplify away types of
# the form 'Union[A, B, C, None]' into just 'Union[A, B, C]'.
if not state.strict_optional:
if isinstance(left, UnionType):
left = UnionType.make_union(left.relevant_items())
if isinstance(right, UnionType):
right = UnionType.make_union(right.relevant_items())
left, right = get_proper_types((left, right))
# 'Any' may or may not be overlapping with the other type
if isinstance(left, AnyType) or isinstance(right, AnyType):
return not overlap_for_overloads or is_object(left) or is_object(right)
# We check for complete overlaps next as a general-purpose failsafe.
# If this check fails, we start checking to see if there exists a
# *partial* overlap between types.
#
# These checks will also handle the NoneType and UninhabitedType cases for us.
# enums are sometimes expanded into an Union of Literals
# when that happens we want to make sure we treat the two as overlapping
# and crucially, we want to do that *fast* in case the enum is large
# so we do it before expanding variants below to avoid O(n**2) behavior
if (
is_enum_overlapping_union(left, right)
or is_enum_overlapping_union(right, left)
or is_literal_in_union(left, right)
or is_literal_in_union(right, left)
):
return True
def is_none_object_overlap(t1: Type, t2: Type) -> bool:
t1, t2 = get_proper_types((t1, t2))
return (
isinstance(t1, NoneType)
and isinstance(t2, Instance)
and t2.type.fullname == "builtins.object"
)
if overlap_for_overloads:
if is_none_object_overlap(left, right) or is_none_object_overlap(right, left):
return False
def _is_subtype(left: Type, right: Type) -> bool:
if overlap_for_overloads:
return is_proper_subtype(left, right, ignore_promotions=ignore_promotions)
else:
return is_subtype(left, right, ignore_promotions=ignore_promotions)
if _is_subtype(left, right) or _is_subtype(right, left):
return True
# See the docstring for 'get_possible_variants' for more info on what the
# following lines are doing.
left_possible = get_possible_variants(left)
right_possible = get_possible_variants(right)
# Now move on to checking multi-variant types like Unions. We also perform
# the same logic if either type happens to be a TypeVar/ParamSpec/TypeVarTuple.
#
# Handling the TypeVarLikes now lets us simulate having them bind to the corresponding
# type -- if we deferred these checks, the "return-early" logic of the other
# checks will prevent us from detecting certain overlaps.
#
# If both types are singleton variants (and are not TypeVarLikes), we've hit the base case:
# we skip these checks to avoid infinitely recursing.
def is_none_typevarlike_overlap(t1: Type, t2: Type) -> bool:
t1, t2 = get_proper_types((t1, t2))
return isinstance(t1, NoneType) and isinstance(t2, TypeVarLikeType)
if prohibit_none_typevar_overlap:
if is_none_typevarlike_overlap(left, right) or is_none_typevarlike_overlap(right, left):
return False
if (
len(left_possible) > 1
or len(right_possible) > 1
or isinstance(left, TypeVarLikeType)
or isinstance(right, TypeVarLikeType)
):
for l in left_possible:
for r in right_possible:
if _is_overlapping_types(l, r):
return True
return False
# Now that we've finished handling TypeVarLikes, we're free to end early
# if one one of the types is None and we're running in strict-optional mode.
# (None only overlaps with None in strict-optional mode).
#
# We must perform this check after the TypeVarLike checks because
# a TypeVar could be bound to None, for example.
if state.strict_optional and isinstance(left, NoneType) != isinstance(right, NoneType):
return False
# Next, we handle single-variant types that may be inherently partially overlapping:
#
# - TypedDicts
# - Tuples
#
# If we cannot identify a partial overlap and end early, we degrade these two types
# into their 'Instance' fallbacks.
if isinstance(left, TypedDictType) and isinstance(right, TypedDictType):
return are_typed_dicts_overlapping(left, right, _is_overlapping_types)
elif typed_dict_mapping_pair(left, right):
# Overlaps between TypedDicts and Mappings require dedicated logic.
return typed_dict_mapping_overlap(left, right, overlapping=_is_overlapping_types)
elif isinstance(left, TypedDictType):
left = left.fallback
elif isinstance(right, TypedDictType):
right = right.fallback
if is_tuple(left) and is_tuple(right):
return are_tuples_overlapping(left, right, _is_overlapping_types)
elif isinstance(left, TupleType):
left = tuple_fallback(left)
elif isinstance(right, TupleType):
right = tuple_fallback(right)
# Next, we handle single-variant types that cannot be inherently partially overlapping,
# but do require custom logic to inspect.
#
# As before, we degrade into 'Instance' whenever possible.
if isinstance(left, TypeType) and isinstance(right, TypeType):
return _is_overlapping_types(left.item, right.item)
def _type_object_overlap(left: Type, right: Type) -> bool:
"""Special cases for type object types overlaps."""
# TODO: these checks are a bit in gray area, adjust if they cause problems.
left, right = get_proper_types((left, right))
# 1. Type[C] vs Callable[..., C] overlap even if the latter is not class object.
if isinstance(left, TypeType) and isinstance(right, CallableType):
return _is_overlapping_types(left.item, right.ret_type)
# 2. Type[C] vs Meta, where Meta is a metaclass for C.
if isinstance(left, TypeType) and isinstance(right, Instance):
if isinstance(left.item, Instance):
left_meta = left.item.type.metaclass_type
if left_meta is not None:
return _is_overlapping_types(left_meta, right)
# builtins.type (default metaclass) overlaps with all metaclasses
return right.type.has_base("builtins.type")
elif isinstance(left.item, AnyType):
return right.type.has_base("builtins.type")
# 3. Callable[..., C] vs Meta is considered below, when we switch to fallbacks.
return False
if isinstance(left, TypeType) or isinstance(right, TypeType):
return _type_object_overlap(left, right) or _type_object_overlap(right, left)
if isinstance(left, Parameters) and isinstance(right, Parameters):
return are_parameters_compatible(
left,
right,
is_compat=_is_overlapping_types,
is_proper_subtype=False,
ignore_pos_arg_names=not overlap_for_overloads,
allow_partial_overlap=True,
)
# A `Parameters` does not overlap with anything else, however
if isinstance(left, Parameters) or isinstance(right, Parameters):
return False
if isinstance(left, CallableType) and isinstance(right, CallableType):
return is_callable_compatible(
left,
right,
is_compat=_is_overlapping_types,
is_proper_subtype=False,
ignore_pos_arg_names=not overlap_for_overloads,
allow_partial_overlap=True,
)
call = None
other = None
if isinstance(left, CallableType) and isinstance(right, Instance):
call = find_member("__call__", right, right, is_operator=True)
other = left
if isinstance(right, CallableType) and isinstance(left, Instance):
call = find_member("__call__", left, left, is_operator=True)
other = right
if isinstance(get_proper_type(call), FunctionLike):
assert call is not None and other is not None
return _is_overlapping_types(call, other)
if isinstance(left, CallableType):
left = left.fallback
if isinstance(right, CallableType):
right = right.fallback
if isinstance(left, LiteralType) and isinstance(right, LiteralType):
if left.value == right.value:
# If values are the same, we still need to check if fallbacks are overlapping,
# this is done below.
left = left.fallback
right = right.fallback
else:
return False
elif isinstance(left, LiteralType):
left = left.fallback
elif isinstance(right, LiteralType):
right = right.fallback
# Finally, we handle the case where left and right are instances.
if isinstance(left, Instance) and isinstance(right, Instance):
# First we need to handle promotions and structural compatibility for instances
# that came as fallbacks, so simply call is_subtype() to avoid code duplication.
if _is_subtype(left, right) or _is_subtype(right, left):
return True
if right.type.fullname == "builtins.int" and left.type.fullname in MYPYC_NATIVE_INT_NAMES:
return True
# Two unrelated types cannot be partially overlapping: they're disjoint.
if left.type.has_base(right.type.fullname):
left = map_instance_to_supertype(left, right.type)
elif right.type.has_base(left.type.fullname):
right = map_instance_to_supertype(right, left.type)
else:
return False
if len(left.args) == len(right.args):
# Note: we don't really care about variance here, since the overlapping check
# is symmetric and since we want to return 'True' even for partial overlaps.
#
# For example, suppose we have two types Wrapper[Parent] and Wrapper[Child].
# It doesn't matter whether Wrapper is covariant or contravariant since
# either way, one of the two types will overlap with the other.
#
# Similarly, if Wrapper was invariant, the two types could still be partially
# overlapping -- what if Wrapper[Parent] happened to contain only instances of
# specifically Child?
#
# Or, to use a more concrete example, List[Union[A, B]] and List[Union[B, C]]
# would be considered partially overlapping since it's possible for both lists
# to contain only instances of B at runtime.
if all(
_is_overlapping_types(left_arg, right_arg)
for left_arg, right_arg in zip(left.args, right.args)
):
return True
return False
# We ought to have handled every case by now: we conclude the
# two types are not overlapping, either completely or partially.
#
# Note: it's unclear however, whether returning False is the right thing
# to do when inferring reachability -- see https://github.com/python/mypy/issues/5529
assert type(left) != type(right), f"{type(left)} vs {type(right)}"
return False
def is_overlapping_erased_types(
left: Type, right: Type, *, ignore_promotions: bool = False
) -> bool:
"""The same as 'is_overlapping_erased_types', except the types are erased first."""
return is_overlapping_types(
erase_type(left),
erase_type(right),
ignore_promotions=ignore_promotions,
prohibit_none_typevar_overlap=True,
)
def are_typed_dicts_overlapping(
left: TypedDictType, right: TypedDictType, is_overlapping: Callable[[Type, Type], bool]
) -> bool:
"""Returns 'true' if left and right are overlapping TypeDictTypes."""
# All required keys in left are present and overlapping with something in right
for key in left.required_keys:
if key not in right.items:
return False
if not is_overlapping(left.items[key], right.items[key]):
return False
# Repeat check in the other direction
for key in right.required_keys:
if key not in left.items:
return False
if not is_overlapping(left.items[key], right.items[key]):
return False
# The presence of any additional optional keys does not affect whether the two
# TypedDicts are partially overlapping: the dicts would be overlapping if the
# keys happened to be missing.
return True
def are_tuples_overlapping(
left: Type, right: Type, is_overlapping: Callable[[Type, Type], bool]
) -> bool:
"""Returns true if left and right are overlapping tuples."""
left, right = get_proper_types((left, right))
left = adjust_tuple(left, right) or left
right = adjust_tuple(right, left) or right
assert isinstance(left, TupleType), f"Type {left} is not a tuple"
assert isinstance(right, TupleType), f"Type {right} is not a tuple"
# This algorithm works well if only one tuple is variadic, if both are
# variadic we may get rare false negatives for overlapping prefix/suffix.
# Also, this ignores empty unpack case, but it is probably consistent with
# how we handle e.g. empty lists in overload overlaps.
# TODO: write a more robust algorithm for cases where both types are variadic.
left_unpack = find_unpack_in_list(left.items)
right_unpack = find_unpack_in_list(right.items)
if left_unpack is not None:
left = expand_tuple_if_possible(left, len(right.items))
if right_unpack is not None:
right = expand_tuple_if_possible(right, len(left.items))
if len(left.items) != len(right.items):
return False
return all(is_overlapping(l, r) for l, r in zip(left.items, right.items))
def expand_tuple_if_possible(tup: TupleType, target: int) -> TupleType:
if len(tup.items) > target + 1:
return tup
extra = target + 1 - len(tup.items)
new_items = []
for it in tup.items:
if not isinstance(it, UnpackType):
new_items.append(it)
continue
unpacked = get_proper_type(it.type)
if isinstance(unpacked, TypeVarTupleType):
instance = unpacked.tuple_fallback
else:
# Nested non-variadic tuples should be normalized at this point.
assert isinstance(unpacked, Instance)
instance = unpacked
assert instance.type.fullname == "builtins.tuple"
new_items.extend([instance.args[0]] * extra)
return tup.copy_modified(items=new_items)
def adjust_tuple(left: ProperType, r: ProperType) -> TupleType | None:
"""Find out if `left` is a Tuple[A, ...], and adjust its length to `right`"""
if isinstance(left, Instance) and left.type.fullname == "builtins.tuple":
n = r.length() if isinstance(r, TupleType) else 1
return TupleType([left.args[0]] * n, left)
return None
def is_tuple(typ: Type) -> bool:
typ = get_proper_type(typ)
return isinstance(typ, TupleType) or (
isinstance(typ, Instance) and typ.type.fullname == "builtins.tuple"
)
class TypeMeetVisitor(TypeVisitor[ProperType]):
def __init__(self, s: ProperType) -> None:
self.s = s
def visit_unbound_type(self, t: UnboundType) -> ProperType:
if isinstance(self.s, NoneType):
if state.strict_optional:
return AnyType(TypeOfAny.special_form)
else:
return self.s
elif isinstance(self.s, UninhabitedType):
return self.s
else:
return AnyType(TypeOfAny.special_form)
def visit_any(self, t: AnyType) -> ProperType:
return self.s
def visit_union_type(self, t: UnionType) -> ProperType:
if isinstance(self.s, UnionType):
meets: list[Type] = []
for x in t.items:
for y in self.s.items:
meets.append(meet_types(x, y))
else:
meets = [meet_types(x, self.s) for x in t.items]
return make_simplified_union(meets)
def visit_none_type(self, t: NoneType) -> ProperType:
if state.strict_optional:
if isinstance(self.s, NoneType) or (
isinstance(self.s, Instance) and self.s.type.fullname == "builtins.object"
):
return t
else:
return UninhabitedType()
else:
return t
def visit_uninhabited_type(self, t: UninhabitedType) -> ProperType:
return t
def visit_deleted_type(self, t: DeletedType) -> ProperType:
if isinstance(self.s, NoneType):
if state.strict_optional:
return t
else:
return self.s
elif isinstance(self.s, UninhabitedType):
return self.s
else:
return t
def visit_erased_type(self, t: ErasedType) -> ProperType:
return self.s
def visit_type_var(self, t: TypeVarType) -> ProperType:
if isinstance(self.s, TypeVarType) and self.s.id == t.id:
return self.s
else:
return self.default(self.s)
def visit_param_spec(self, t: ParamSpecType) -> ProperType:
if self.s == t:
return self.s
else:
return self.default(self.s)
def visit_type_var_tuple(self, t: TypeVarTupleType) -> ProperType:
if isinstance(self.s, TypeVarTupleType) and self.s.id == t.id:
return self.s if self.s.min_len > t.min_len else t
else:
return self.default(self.s)
def visit_unpack_type(self, t: UnpackType) -> ProperType:
raise NotImplementedError
def visit_parameters(self, t: Parameters) -> ProperType:
if isinstance(self.s, Parameters):
if len(t.arg_types) != len(self.s.arg_types):
return self.default(self.s)
from mypy.join import join_types
return t.copy_modified(
arg_types=[join_types(s_a, t_a) for s_a, t_a in zip(self.s.arg_types, t.arg_types)]
)
else:
return self.default(self.s)
def visit_instance(self, t: Instance) -> ProperType:
if isinstance(self.s, Instance):
if t.type == self.s.type:
if is_subtype(t, self.s) or is_subtype(self.s, t):
# Combine type arguments. We could have used join below
# equivalently.
args: list[Type] = []
# N.B: We use zip instead of indexing because the lengths might have
# mismatches during daemon reprocessing.
if t.type.has_type_var_tuple_type:
# We handle meet of variadic instances by simply creating correct mapping
# for type arguments and compute the individual meets same as for regular
# instances. All the heavy lifting is done in the meet of tuple types.
s = self.s
assert s.type.type_var_tuple_prefix is not None
assert s.type.type_var_tuple_suffix is not None
prefix = s.type.type_var_tuple_prefix
suffix = s.type.type_var_tuple_suffix
tvt = s.type.defn.type_vars[prefix]
assert isinstance(tvt, TypeVarTupleType)
fallback = tvt.tuple_fallback
s_prefix, s_middle, s_suffix = split_with_prefix_and_suffix(
s.args, prefix, suffix
)
t_prefix, t_middle, t_suffix = split_with_prefix_and_suffix(
t.args, prefix, suffix
)
s_args = s_prefix + (TupleType(list(s_middle), fallback),) + s_suffix
t_args = t_prefix + (TupleType(list(t_middle), fallback),) + t_suffix
else:
t_args = t.args
s_args = self.s.args
for ta, sa, tv in zip(t_args, s_args, t.type.defn.type_vars):
meet = self.meet(ta, sa)
if isinstance(tv, TypeVarTupleType):
# Correctly unpack possible outcomes of meets of tuples: it can be
# either another tuple type or Never (normalized as *tuple[Never, ...])
if isinstance(meet, TupleType):
args.extend(meet.items)
continue
else:
assert isinstance(meet, UninhabitedType)
meet = UnpackType(tv.tuple_fallback.copy_modified(args=[meet]))
args.append(meet)
return Instance(t.type, args)
else:
if state.strict_optional:
return UninhabitedType()
else:
return NoneType()
else:
alt_promote = t.type.alt_promote
if alt_promote and alt_promote.type is self.s.type:
return t
alt_promote = self.s.type.alt_promote
if alt_promote and alt_promote.type is t.type:
return self.s
if is_subtype(t, self.s):
return t
elif is_subtype(self.s, t):
# See also above comment.
return self.s
else:
if state.strict_optional:
return UninhabitedType()
else:
return NoneType()
elif isinstance(self.s, FunctionLike) and t.type.is_protocol:
call = join.unpack_callback_protocol(t)
if call:
return meet_types(call, self.s)
elif isinstance(self.s, FunctionLike) and self.s.is_type_obj() and t.type.is_metaclass():
if is_subtype(self.s.fallback, t):
return self.s
return self.default(self.s)
elif isinstance(self.s, TypeType):
return meet_types(t, self.s)
elif isinstance(self.s, TupleType):
return meet_types(t, self.s)
elif isinstance(self.s, LiteralType):
return meet_types(t, self.s)
elif isinstance(self.s, TypedDictType):
return meet_types(t, self.s)
return self.default(self.s)
def visit_callable_type(self, t: CallableType) -> ProperType:
if isinstance(self.s, CallableType) and join.is_similar_callables(t, self.s):
if is_equivalent(t, self.s):
return join.combine_similar_callables(t, self.s)
result = meet_similar_callables(t, self.s)
# We set the from_type_type flag to suppress error when a collection of
# concrete class objects gets inferred as their common abstract superclass.
if not (
(t.is_type_obj() and t.type_object().is_abstract)
or (self.s.is_type_obj() and self.s.type_object().is_abstract)
):
result.from_type_type = True
if isinstance(get_proper_type(result.ret_type), UninhabitedType):
# Return a plain None or <uninhabited> instead of a weird function.
return self.default(self.s)
return result
elif isinstance(self.s, TypeType) and t.is_type_obj() and not t.is_generic():
# In this case we are able to potentially produce a better meet.
res = meet_types(self.s.item, t.ret_type)
if not isinstance(res, (NoneType, UninhabitedType)):
return TypeType.make_normalized(res)
return self.default(self.s)
elif isinstance(self.s, Instance) and self.s.type.is_protocol:
call = join.unpack_callback_protocol(self.s)
if call:
return meet_types(t, call)
return self.default(self.s)
def visit_overloaded(self, t: Overloaded) -> ProperType:
# TODO: Implement a better algorithm that covers at least the same cases
# as TypeJoinVisitor.visit_overloaded().
s = self.s
if isinstance(s, FunctionLike):
if s.items == t.items:
return Overloaded(t.items)
elif is_subtype(s, t):
return s
elif is_subtype(t, s):
return t
else:
return meet_types(t.fallback, s.fallback)
elif isinstance(self.s, Instance) and self.s.type.is_protocol:
call = join.unpack_callback_protocol(self.s)
if call:
return meet_types(t, call)
return meet_types(t.fallback, s)
def meet_tuples(self, s: TupleType, t: TupleType) -> list[Type] | None:
"""Meet two tuple types while handling variadic entries.
This is surprisingly tricky, and we don't handle some tricky corner cases.
Most of the trickiness comes from the variadic tuple items like *tuple[X, ...]
since they can have arbitrary partial overlaps (while *Ts can't be split). This
function is roughly a mirror of join_tuples() w.r.t. to the fact that fixed
tuples are subtypes of variadic ones but not vice versa.
"""
s_unpack_index = find_unpack_in_list(s.items)
t_unpack_index = find_unpack_in_list(t.items)
if s_unpack_index is None and t_unpack_index is None:
if s.length() == t.length():
items: list[Type] = []
for i in range(t.length()):
items.append(self.meet(t.items[i], s.items[i]))
return items
return None
if s_unpack_index is not None and t_unpack_index is not None:
# The only simple case we can handle if both tuples are variadic
# is when their structure fully matches. Other cases are tricky because
# a variadic item is effectively a union of tuples of all length, thus
# potentially causing overlap between a suffix in `s` and a prefix
# in `t` (see how this is handled in is_subtype() for details).
# TODO: handle more cases (like when both prefix/suffix are shorter in s or t).
if s.length() == t.length() and s_unpack_index == t_unpack_index:
unpack_index = s_unpack_index
s_unpack = s.items[unpack_index]
assert isinstance(s_unpack, UnpackType)
s_unpacked = get_proper_type(s_unpack.type)
t_unpack = t.items[unpack_index]
assert isinstance(t_unpack, UnpackType)
t_unpacked = get_proper_type(t_unpack.type)
if not (isinstance(s_unpacked, Instance) and isinstance(t_unpacked, Instance)):
return None
meet = self.meet(s_unpacked, t_unpacked)
if not isinstance(meet, Instance):
return None
m_prefix: list[Type] = []
for si, ti in zip(s.items[:unpack_index], t.items[:unpack_index]):
m_prefix.append(meet_types(si, ti))
m_suffix: list[Type] = []
for si, ti in zip(s.items[unpack_index + 1 :], t.items[unpack_index + 1 :]):
m_suffix.append(meet_types(si, ti))
return m_prefix + [UnpackType(meet)] + m_suffix
return None
if s_unpack_index is not None:
variadic = s
unpack_index = s_unpack_index
fixed = t
else:
assert t_unpack_index is not None
variadic = t
unpack_index = t_unpack_index
fixed = s
# If one tuple is variadic one, and the other one is fixed, the meet will be fixed.
unpack = variadic.items[unpack_index]
assert isinstance(unpack, UnpackType)
unpacked = get_proper_type(unpack.type)
if not isinstance(unpacked, Instance):
return None
if fixed.length() < variadic.length() - 1:
return None
prefix_len = unpack_index
suffix_len = variadic.length() - prefix_len - 1
prefix, middle, suffix = split_with_prefix_and_suffix(
tuple(fixed.items), prefix_len, suffix_len
)
items = []
for fi, vi in zip(prefix, variadic.items[:prefix_len]):
items.append(self.meet(fi, vi))
for mi in middle:
items.append(self.meet(mi, unpacked.args[0]))
if suffix_len:
for fi, vi in zip(suffix, variadic.items[-suffix_len:]):
items.append(self.meet(fi, vi))
return items
def visit_tuple_type(self, t: TupleType) -> ProperType:
if isinstance(self.s, TupleType):
items = self.meet_tuples(self.s, t)
if items is None:
return self.default(self.s)
# TODO: What if the fallbacks are different?
return TupleType(items, tuple_fallback(t))
elif isinstance(self.s, Instance):
# meet(Tuple[t1, t2, <...>], Tuple[s, ...]) == Tuple[meet(t1, s), meet(t2, s), <...>].
if self.s.type.fullname in TUPLE_LIKE_INSTANCE_NAMES and self.s.args:
return t.copy_modified(items=[meet_types(it, self.s.args[0]) for it in t.items])
elif is_proper_subtype(t, self.s):
# A named tuple that inherits from a normal class
return t
elif self.s.type.has_type_var_tuple_type and is_subtype(t, self.s):
# This is a bit ad-hoc but more principled handling is tricky, and this
# special case is important for type narrowing in binder to work.
return t
return self.default(self.s)
def visit_typeddict_type(self, t: TypedDictType) -> ProperType:
if isinstance(self.s, TypedDictType):
for name, l, r in self.s.zip(t):
if not is_equivalent(l, r) or (name in t.required_keys) != (
name in self.s.required_keys
):
return self.default(self.s)
item_list: list[tuple[str, Type]] = []
for item_name, s_item_type, t_item_type in self.s.zipall(t):
if s_item_type is not None:
item_list.append((item_name, s_item_type))
else:
# at least one of s_item_type and t_item_type is not None
assert t_item_type is not None
item_list.append((item_name, t_item_type))
items = dict(item_list)
fallback = self.s.create_anonymous_fallback()
required_keys = t.required_keys | self.s.required_keys
readonly_keys = t.readonly_keys | self.s.readonly_keys
return TypedDictType(items, required_keys, readonly_keys, fallback)
elif isinstance(self.s, Instance) and is_subtype(t, self.s):
return t
else:
return self.default(self.s)
def visit_literal_type(self, t: LiteralType) -> ProperType:
if isinstance(self.s, LiteralType) and self.s == t:
return t
elif isinstance(self.s, Instance) and is_subtype(t.fallback, self.s):
return t
else:
return self.default(self.s)
def visit_partial_type(self, t: PartialType) -> ProperType:
# We can't determine the meet of partial types. We should never get here.
assert False, "Internal error"
def visit_type_type(self, t: TypeType) -> ProperType:
if isinstance(self.s, TypeType):
typ = self.meet(t.item, self.s.item)
if not isinstance(typ, NoneType):
typ = TypeType.make_normalized(typ, line=t.line)
return typ
elif isinstance(self.s, Instance) and self.s.type.fullname == "builtins.type":
return t
elif isinstance(self.s, CallableType):
return self.meet(t, self.s)
else:
return self.default(self.s)
def visit_type_alias_type(self, t: TypeAliasType) -> ProperType:
assert False, f"This should be never called, got {t}"
def meet(self, s: Type, t: Type) -> ProperType:
return meet_types(s, t)
def default(self, typ: Type) -> ProperType:
if isinstance(typ, UnboundType):
return AnyType(TypeOfAny.special_form)
else:
if state.strict_optional:
return UninhabitedType()
else:
return NoneType()
def meet_similar_callables(t: CallableType, s: CallableType) -> CallableType:
from mypy.join import match_generic_callables, safe_join
t, s = match_generic_callables(t, s)
arg_types: list[Type] = []
for i in range(len(t.arg_types)):
arg_types.append(safe_join(t.arg_types[i], s.arg_types[i]))
# TODO in combine_similar_callables also applies here (names and kinds)
# The fallback type can be either 'function' or 'type'. The result should have 'function' as
# fallback only if both operands have it as 'function'.
if t.fallback.type.fullname != "builtins.function":
fallback = t.fallback
else:
fallback = s.fallback
return t.copy_modified(
arg_types=arg_types,
ret_type=meet_types(t.ret_type, s.ret_type),
fallback=fallback,
name=None,
)
def meet_type_list(types: list[Type]) -> Type:
if not types:
# This should probably be builtins.object but that is hard to get and
# it doesn't matter for any current users.
return AnyType(TypeOfAny.implementation_artifact)
met = types[0]
for t in types[1:]:
met = meet_types(met, t)
return met
def typed_dict_mapping_pair(left: Type, right: Type) -> bool:
"""Is this a pair where one type is a TypedDict and another one is an instance of Mapping?
This case requires a precise/principled consideration because there are two use cases
that push the boundary the opposite ways: we need to avoid spurious overlaps to avoid
false positives for overloads, but we also need to avoid spuriously non-overlapping types
to avoid false positives with --strict-equality.
"""
left, right = get_proper_types((left, right))
assert not isinstance(left, TypedDictType) or not isinstance(right, TypedDictType)
if isinstance(left, TypedDictType):
_, other = left, right
elif isinstance(right, TypedDictType):
_, other = right, left
else:
return False
return isinstance(other, Instance) and other.type.has_base("typing.Mapping")
def typed_dict_mapping_overlap(
left: Type, right: Type, overlapping: Callable[[Type, Type], bool]
) -> bool:
"""Check if a TypedDict type is overlapping with a Mapping.
The basic logic here consists of two rules:
* A TypedDict with some required keys is overlapping with Mapping[str, <some type>]
if and only if every key type is overlapping with <some type>. For example:
- TypedDict(x=int, y=str) overlaps with Dict[str, Union[str, int]]
- TypedDict(x=int, y=str) doesn't overlap with Dict[str, int]
Note that any additional non-required keys can't change the above result.
* A TypedDict with no required keys overlaps with Mapping[str, <some type>] if and
only if at least one of key types overlaps with <some type>. For example:
- TypedDict(x=str, y=str, total=False) overlaps with Dict[str, str]
- TypedDict(x=str, y=str, total=False) doesn't overlap with Dict[str, int]
- TypedDict(x=int, y=str, total=False) overlaps with Dict[str, str]
* A TypedDict with at least one ReadOnly[] key does not overlap
with Dict or MutableMapping, because they assume mutable data.
As usual empty, dictionaries lie in a gray area. In general, List[str] and List[str]
are considered non-overlapping despite empty list belongs to both. However, List[int]
and List[Never] are considered overlapping.
So here we follow the same logic: a TypedDict with no required keys is considered
non-overlapping with Mapping[str, <some type>], but is considered overlapping with
Mapping[Never, Never]. This way we avoid false positives for overloads, and also
avoid false positives for comparisons like SomeTypedDict == {} under --strict-equality.
"""
left, right = get_proper_types((left, right))
assert not isinstance(left, TypedDictType) or not isinstance(right, TypedDictType)
if isinstance(left, TypedDictType):
assert isinstance(right, Instance)
typed, other = left, right
else:
assert isinstance(left, Instance)
assert isinstance(right, TypedDictType)
typed, other = right, left
mutable_mapping = next(
(base for base in other.type.mro if base.fullname == "typing.MutableMapping"), None
)
if mutable_mapping is not None and typed.readonly_keys:
return False
mapping = next(base for base in other.type.mro if base.fullname == "typing.Mapping")
other = map_instance_to_supertype(other, mapping)
key_type, value_type = get_proper_types(other.args)
# TODO: is there a cleaner way to get str_type here?
fallback = typed.as_anonymous().fallback
str_type = fallback.type.bases[0].args[0] # typing._TypedDict inherits Mapping[str, object]
# Special case: a TypedDict with no required keys overlaps with an empty dict.
if isinstance(key_type, UninhabitedType) and isinstance(value_type, UninhabitedType):
return not typed.required_keys
if typed.required_keys:
if not overlapping(key_type, str_type):
return False
return all(overlapping(typed.items[k], value_type) for k in typed.required_keys)
else:
if not overlapping(key_type, str_type):
return False
non_required = set(typed.items.keys()) - typed.required_keys
return any(overlapping(typed.items[k], value_type) for k in non_required)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/meet.py
|
Python
|
NOASSERTION
| 50,002 |
"""Utility for dumping memory usage stats.
This is tailored to mypy and knows (a little) about which list objects are
owned by particular AST nodes, etc.
"""
from __future__ import annotations
import gc
import sys
from collections import defaultdict
from typing import Dict, Iterable, cast
from mypy.nodes import FakeInfo, Node
from mypy.types import Type
from mypy.util import get_class_descriptors
def collect_memory_stats() -> tuple[dict[str, int], dict[str, int]]:
"""Return stats about memory use.
Return a tuple with these items:
- Dict from object kind to number of instances of that kind
- Dict from object kind to total bytes used by all instances of that kind
"""
objs = gc.get_objects()
find_recursive_objects(objs)
inferred = {}
for obj in objs:
if type(obj) is FakeInfo:
# Processing these would cause a crash.
continue
n = type(obj).__name__
if hasattr(obj, "__dict__"):
# Keep track of which class a particular __dict__ is associated with.
inferred[id(obj.__dict__)] = f"{n} (__dict__)"
if isinstance(obj, (Node, Type)): # type: ignore[misc]
if hasattr(obj, "__dict__"):
for x in obj.__dict__.values():
if isinstance(x, list):
# Keep track of which node a list is associated with.
inferred[id(x)] = f"{n} (list)"
if isinstance(x, tuple):
# Keep track of which node a list is associated with.
inferred[id(x)] = f"{n} (tuple)"
for k in get_class_descriptors(type(obj)):
x = getattr(obj, k, None)
if isinstance(x, list):
inferred[id(x)] = f"{n} (list)"
if isinstance(x, tuple):
inferred[id(x)] = f"{n} (tuple)"
freqs: dict[str, int] = {}
memuse: dict[str, int] = {}
for obj in objs:
if id(obj) in inferred:
name = inferred[id(obj)]
else:
name = type(obj).__name__
freqs[name] = freqs.get(name, 0) + 1
memuse[name] = memuse.get(name, 0) + sys.getsizeof(obj)
return freqs, memuse
def print_memory_profile(run_gc: bool = True) -> None:
if not sys.platform.startswith("win"):
import resource
system_memuse = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
else:
system_memuse = -1 # TODO: Support this on Windows
if run_gc:
gc.collect()
freqs, memuse = collect_memory_stats()
print("%7s %7s %7s %s" % ("Freq", "Size(k)", "AvgSize", "Type"))
print("-------------------------------------------")
totalmem = 0
i = 0
for n, mem in sorted(memuse.items(), key=lambda x: -x[1]):
f = freqs[n]
if i < 50:
print("%7d %7d %7.0f %s" % (f, mem // 1024, mem / f, n))
i += 1
totalmem += mem
print()
print("Mem usage RSS ", system_memuse // 1024)
print("Total reachable ", totalmem // 1024)
def find_recursive_objects(objs: list[object]) -> None:
"""Find additional objects referenced by objs and append them to objs.
We use this since gc.get_objects() does not return objects without pointers
in them such as strings.
"""
seen = {id(o) for o in objs}
def visit(o: object) -> None:
if id(o) not in seen:
objs.append(o)
seen.add(id(o))
for obj in objs.copy():
if type(obj) is FakeInfo:
# Processing these would cause a crash.
continue
if type(obj) in (dict, defaultdict):
for key, val in cast(Dict[object, object], obj).items():
visit(key)
visit(val)
if type(obj) in (list, tuple, set):
for x in cast(Iterable[object], obj):
visit(x)
if hasattr(obj, "__slots__"):
for base in type.mro(type(obj)):
for slot in getattr(base, "__slots__", ()):
if hasattr(obj, slot):
visit(getattr(obj, slot))
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/memprofile.py
|
Python
|
NOASSERTION
| 4,153 |
"""Message constants for generating error messages during type checking.
Literal messages should be defined as constants in this module so they won't get out of sync
if used in more than one place, and so that they can be easily introspected. These messages are
ultimately consumed by messages.MessageBuilder.fail(). For more non-trivial message generation,
add a method to MessageBuilder and call this instead.
"""
from __future__ import annotations
from typing import Final, NamedTuple
from mypy import errorcodes as codes
class ErrorMessage(NamedTuple):
value: str
code: codes.ErrorCode | None = None
def format(self, *args: object, **kwargs: object) -> ErrorMessage:
return ErrorMessage(self.value.format(*args, **kwargs), code=self.code)
def with_additional_msg(self, info: str) -> ErrorMessage:
return ErrorMessage(self.value + info, code=self.code)
# Invalid types
INVALID_TYPE_RAW_ENUM_VALUE: Final = ErrorMessage(
"Invalid type: try using Literal[{}.{}] instead?", codes.VALID_TYPE
)
# Type checker error message constants
NO_RETURN_VALUE_EXPECTED: Final = ErrorMessage("No return value expected", codes.RETURN_VALUE)
MISSING_RETURN_STATEMENT: Final = ErrorMessage("Missing return statement", codes.RETURN)
EMPTY_BODY_ABSTRACT: Final = ErrorMessage(
"If the method is meant to be abstract, use @abc.abstractmethod", codes.EMPTY_BODY
)
INVALID_IMPLICIT_RETURN: Final = ErrorMessage("Implicit return in function which does not return")
INCOMPATIBLE_RETURN_VALUE_TYPE: Final = ErrorMessage(
"Incompatible return value type", codes.RETURN_VALUE
)
RETURN_VALUE_EXPECTED: Final = ErrorMessage("Return value expected", codes.RETURN_VALUE)
NO_RETURN_EXPECTED: Final = ErrorMessage("Return statement in function which does not return")
INVALID_EXCEPTION: Final = ErrorMessage("Exception must be derived from BaseException")
INVALID_EXCEPTION_TYPE: Final = ErrorMessage(
"Exception type must be derived from BaseException (or be a tuple of exception classes)"
)
INVALID_EXCEPTION_GROUP: Final = ErrorMessage(
"Exception type in except* cannot derive from BaseExceptionGroup"
)
RETURN_IN_ASYNC_GENERATOR: Final = ErrorMessage(
'"return" with value in async generator is not allowed'
)
INVALID_RETURN_TYPE_FOR_GENERATOR: Final = ErrorMessage(
'The return type of a generator function should be "Generator" or one of its supertypes'
)
INVALID_RETURN_TYPE_FOR_ASYNC_GENERATOR: Final = ErrorMessage(
'The return type of an async generator function should be "AsyncGenerator" or one of its '
"supertypes"
)
YIELD_VALUE_EXPECTED: Final = ErrorMessage("Yield value expected")
INCOMPATIBLE_TYPES: Final = ErrorMessage("Incompatible types")
INCOMPATIBLE_TYPES_IN_ASSIGNMENT: Final = ErrorMessage(
"Incompatible types in assignment", code=codes.ASSIGNMENT
)
COVARIANT_OVERRIDE_OF_MUTABLE_ATTRIBUTE: Final = ErrorMessage(
"Covariant override of a mutable attribute", code=codes.MUTABLE_OVERRIDE
)
INCOMPATIBLE_TYPES_IN_AWAIT: Final = ErrorMessage('Incompatible types in "await"')
INCOMPATIBLE_REDEFINITION: Final = ErrorMessage("Incompatible redefinition")
INCOMPATIBLE_TYPES_IN_ASYNC_WITH_AENTER: Final = (
'Incompatible types in "async with" for "__aenter__"'
)
INCOMPATIBLE_TYPES_IN_ASYNC_WITH_AEXIT: Final = (
'Incompatible types in "async with" for "__aexit__"'
)
INCOMPATIBLE_TYPES_IN_ASYNC_FOR: Final = 'Incompatible types in "async for"'
INVALID_TYPE_FOR_SLOTS: Final = 'Invalid type for "__slots__"'
ASYNC_FOR_OUTSIDE_COROUTINE: Final = '"async for" outside async function'
ASYNC_WITH_OUTSIDE_COROUTINE: Final = '"async with" outside async function'
INCOMPATIBLE_TYPES_IN_YIELD: Final = ErrorMessage('Incompatible types in "yield"')
INCOMPATIBLE_TYPES_IN_YIELD_FROM: Final = ErrorMessage('Incompatible types in "yield from"')
INCOMPATIBLE_TYPES_IN_STR_INTERPOLATION: Final = "Incompatible types in string interpolation"
INCOMPATIBLE_TYPES_IN_CAPTURE: Final = ErrorMessage("Incompatible types in capture pattern")
MUST_HAVE_NONE_RETURN_TYPE: Final = ErrorMessage('The return type of "{}" must be None')
TUPLE_INDEX_OUT_OF_RANGE: Final = ErrorMessage("Tuple index out of range")
AMBIGUOUS_SLICE_OF_VARIADIC_TUPLE: Final = ErrorMessage("Ambiguous slice of a variadic tuple")
TOO_MANY_TARGETS_FOR_VARIADIC_UNPACK: Final = ErrorMessage(
"Too many assignment targets for variadic unpack"
)
INVALID_SLICE_INDEX: Final = ErrorMessage("Slice index must be an integer, SupportsIndex or None")
CANNOT_INFER_LAMBDA_TYPE: Final = ErrorMessage("Cannot infer type of lambda")
CANNOT_ACCESS_INIT: Final = (
'Accessing "__init__" on an instance is unsound, since instance.__init__ could be from'
" an incompatible subclass"
)
NON_INSTANCE_NEW_TYPE: Final = ErrorMessage('"__new__" must return a class instance (got {})')
INVALID_NEW_TYPE: Final = ErrorMessage('Incompatible return type for "__new__"')
BAD_CONSTRUCTOR_TYPE: Final = ErrorMessage("Unsupported decorated constructor type")
CANNOT_ASSIGN_TO_METHOD: Final = "Cannot assign to a method"
CANNOT_ASSIGN_TO_TYPE: Final = "Cannot assign to a type"
INCONSISTENT_ABSTRACT_OVERLOAD: Final = ErrorMessage(
"Overloaded method has both abstract and non-abstract variants"
)
MULTIPLE_OVERLOADS_REQUIRED: Final = ErrorMessage("Single overload definition, multiple required")
READ_ONLY_PROPERTY_OVERRIDES_READ_WRITE: Final = ErrorMessage(
"Read-only property cannot override read-write property"
)
FORMAT_REQUIRES_MAPPING: Final = "Format requires a mapping"
RETURN_TYPE_CANNOT_BE_CONTRAVARIANT: Final = ErrorMessage(
"Cannot use a contravariant type variable as return type"
)
FUNCTION_PARAMETER_CANNOT_BE_COVARIANT: Final = ErrorMessage(
"Cannot use a covariant type variable as a parameter"
)
INCOMPATIBLE_IMPORT_OF: Final = ErrorMessage('Incompatible import of "{}"', code=codes.ASSIGNMENT)
FUNCTION_TYPE_EXPECTED: Final = ErrorMessage(
"Function is missing a type annotation", codes.NO_UNTYPED_DEF
)
ONLY_CLASS_APPLICATION: Final = ErrorMessage(
"Type application is only supported for generic classes"
)
RETURN_TYPE_EXPECTED: Final = ErrorMessage(
"Function is missing a return type annotation", codes.NO_UNTYPED_DEF
)
ARGUMENT_TYPE_EXPECTED: Final = ErrorMessage(
"Function is missing a type annotation for one or more arguments", codes.NO_UNTYPED_DEF
)
KEYWORD_ARGUMENT_REQUIRES_STR_KEY_TYPE: Final = ErrorMessage(
'Keyword argument only valid with "str" key type in call to "dict"'
)
ALL_MUST_BE_SEQ_STR: Final = ErrorMessage("Type of __all__ must be {}, not {}")
INVALID_TYPEDDICT_ARGS: Final = ErrorMessage(
"Expected keyword arguments, {...}, or dict(...) in TypedDict constructor"
)
TYPEDDICT_KEY_MUST_BE_STRING_LITERAL: Final = ErrorMessage(
"Expected TypedDict key to be string literal"
)
TYPEDDICT_OVERRIDE_MERGE: Final = 'Overwriting TypedDict field "{}" while merging'
MALFORMED_ASSERT: Final = ErrorMessage("Assertion is always true, perhaps remove parentheses?")
DUPLICATE_TYPE_SIGNATURES: Final = ErrorMessage("Function has duplicate type signatures")
DESCRIPTOR_SET_NOT_CALLABLE: Final = ErrorMessage("{}.__set__ is not callable")
DESCRIPTOR_GET_NOT_CALLABLE: Final = "{}.__get__ is not callable"
MODULE_LEVEL_GETATTRIBUTE: Final = ErrorMessage(
"__getattribute__ is not valid at the module level"
)
CLASS_VAR_CONFLICTS_SLOTS: Final = '"{}" in __slots__ conflicts with class variable access'
NAME_NOT_IN_SLOTS: Final = ErrorMessage(
'Trying to assign name "{}" that is not in "__slots__" of type "{}"'
)
TYPE_ALWAYS_TRUE: Final = ErrorMessage(
"{} which does not implement __bool__ or __len__ "
"so it could always be true in boolean context",
code=codes.TRUTHY_BOOL,
)
TYPE_ALWAYS_TRUE_UNIONTYPE: Final = ErrorMessage(
"{} of which no members implement __bool__ or __len__ "
"so it could always be true in boolean context",
code=codes.TRUTHY_BOOL,
)
FUNCTION_ALWAYS_TRUE: Final = ErrorMessage(
"Function {} could always be true in boolean context", code=codes.TRUTHY_FUNCTION
)
ITERABLE_ALWAYS_TRUE: Final = ErrorMessage(
"{} which can always be true in boolean context. Consider using {} instead.",
code=codes.TRUTHY_ITERABLE,
)
NOT_CALLABLE: Final = "{} not callable"
TYPE_MUST_BE_USED: Final = "Value of type {} must be used"
# Generic
GENERIC_INSTANCE_VAR_CLASS_ACCESS: Final = (
"Access to generic instance variables via class is ambiguous"
)
GENERIC_CLASS_VAR_ACCESS: Final = "Access to generic class variables is ambiguous"
BARE_GENERIC: Final = "Missing type parameters for generic type {}"
IMPLICIT_GENERIC_ANY_BUILTIN: Final = (
'Implicit generic "Any". Use "{}" and specify generic parameters'
)
INVALID_UNPACK: Final = "{} cannot be unpacked (must be tuple or TypeVarTuple)"
INVALID_UNPACK_POSITION: Final = "Unpack is only valid in a variadic position"
INVALID_PARAM_SPEC_LOCATION: Final = "Invalid location for ParamSpec {}"
INVALID_PARAM_SPEC_LOCATION_NOTE: Final = (
'You can use ParamSpec as the first argument to Callable, e.g., "Callable[{}, int]"'
)
# TypeVar
INCOMPATIBLE_TYPEVAR_VALUE: Final = 'Value of type variable "{}" of {} cannot be {}'
CANNOT_USE_TYPEVAR_AS_EXPRESSION: Final = 'Type variable "{}.{}" cannot be used as an expression'
INVALID_TYPEVAR_AS_TYPEARG: Final = 'Type variable "{}" not valid as type argument value for "{}"'
INVALID_TYPEVAR_ARG_BOUND: Final = 'Type argument {} of "{}" must be a subtype of {}'
INVALID_TYPEVAR_ARG_VALUE: Final = 'Invalid type argument value for "{}"'
TYPEVAR_VARIANCE_DEF: Final = 'TypeVar "{}" may only be a literal bool'
TYPEVAR_ARG_MUST_BE_TYPE: Final = '{} "{}" must be a type'
TYPEVAR_UNEXPECTED_ARGUMENT: Final = 'Unexpected argument to "TypeVar()"'
UNBOUND_TYPEVAR: Final = (
"A function returning TypeVar should receive at least "
"one argument containing the same TypeVar"
)
TYPE_PARAMETERS_SHOULD_BE_DECLARED: Final = (
"All type parameters should be declared ({} not declared)"
)
# Super
TOO_MANY_ARGS_FOR_SUPER: Final = ErrorMessage('Too many arguments for "super"')
SUPER_WITH_SINGLE_ARG_NOT_SUPPORTED: Final = ErrorMessage(
'"super" with a single argument not supported'
)
UNSUPPORTED_ARG_1_FOR_SUPER: Final = ErrorMessage('Unsupported argument 1 for "super"')
UNSUPPORTED_ARG_2_FOR_SUPER: Final = ErrorMessage('Unsupported argument 2 for "super"')
SUPER_VARARGS_NOT_SUPPORTED: Final = ErrorMessage('Varargs not supported with "super"')
SUPER_POSITIONAL_ARGS_REQUIRED: Final = ErrorMessage('"super" only accepts positional arguments')
SUPER_ARG_2_NOT_INSTANCE_OF_ARG_1: Final = ErrorMessage(
'Argument 2 for "super" not an instance of argument 1'
)
TARGET_CLASS_HAS_NO_BASE_CLASS: Final = ErrorMessage("Target class has no base class")
SUPER_OUTSIDE_OF_METHOD_NOT_SUPPORTED: Final = ErrorMessage(
'"super()" outside of a method is not supported'
)
SUPER_ENCLOSING_POSITIONAL_ARGS_REQUIRED: Final = ErrorMessage(
'"super()" requires one or two positional arguments in enclosing function'
)
# Self-type
MISSING_OR_INVALID_SELF_TYPE: Final = ErrorMessage(
"Self argument missing for a non-static method (or an invalid type for self)"
)
ERASED_SELF_TYPE_NOT_SUPERTYPE: Final = ErrorMessage(
'The erased type of self "{}" is not a supertype of its class "{}"'
)
# Final
CANNOT_INHERIT_FROM_FINAL: Final = ErrorMessage('Cannot inherit from final class "{}"')
DEPENDENT_FINAL_IN_CLASS_BODY: Final = ErrorMessage(
"Final name declared in class body cannot depend on type variables"
)
CANNOT_ACCESS_FINAL_INSTANCE_ATTR: Final = (
'Cannot access final instance attribute "{}" on class object'
)
CANNOT_MAKE_DELETABLE_FINAL: Final = ErrorMessage("Deletable attribute cannot be final")
# Enum
ENUM_MEMBERS_ATTR_WILL_BE_OVERRIDEN: Final = ErrorMessage(
'Assigned "__members__" will be overridden by "Enum" internally'
)
# ClassVar
CANNOT_OVERRIDE_INSTANCE_VAR: Final = ErrorMessage(
'Cannot override instance variable (previously declared on base class "{}") with class '
"variable"
)
CANNOT_OVERRIDE_CLASS_VAR: Final = ErrorMessage(
'Cannot override class variable (previously declared on base class "{}") with instance '
"variable"
)
CLASS_VAR_WITH_TYPEVARS: Final = "ClassVar cannot contain type variables"
CLASS_VAR_WITH_GENERIC_SELF: Final = "ClassVar cannot contain Self type in generic classes"
CLASS_VAR_OUTSIDE_OF_CLASS: Final = "ClassVar can only be used for assignments in class body"
# Protocol
RUNTIME_PROTOCOL_EXPECTED: Final = ErrorMessage(
"Only @runtime_checkable protocols can be used with instance and class checks"
)
CANNOT_INSTANTIATE_PROTOCOL: Final = ErrorMessage('Cannot instantiate protocol class "{}"')
TOO_MANY_UNION_COMBINATIONS: Final = ErrorMessage(
"Not all union combinations were tried because there are too many unions"
)
CONTIGUOUS_ITERABLE_EXPECTED: Final = ErrorMessage("Contiguous iterable with same type expected")
ITERABLE_TYPE_EXPECTED: Final = ErrorMessage("Invalid type '{}' for *expr (iterable expected)")
TYPE_GUARD_POS_ARG_REQUIRED: Final = ErrorMessage("Type {} requires positional argument")
# Match Statement
MISSING_MATCH_ARGS: Final = 'Class "{}" doesn\'t define "__match_args__"'
OR_PATTERN_ALTERNATIVE_NAMES: Final = "Alternative patterns bind different names"
CLASS_PATTERN_GENERIC_TYPE_ALIAS: Final = (
"Class pattern class must not be a type alias with type parameters"
)
CLASS_PATTERN_TYPE_REQUIRED: Final = 'Expected type in class pattern; found "{}"'
CLASS_PATTERN_TOO_MANY_POSITIONAL_ARGS: Final = "Too many positional patterns for class pattern"
CLASS_PATTERN_KEYWORD_MATCHES_POSITIONAL: Final = (
'Keyword "{}" already matches a positional pattern'
)
CLASS_PATTERN_DUPLICATE_KEYWORD_PATTERN: Final = 'Duplicate keyword pattern "{}"'
CLASS_PATTERN_UNKNOWN_KEYWORD: Final = 'Class "{}" has no attribute "{}"'
CLASS_PATTERN_CLASS_OR_STATIC_METHOD: Final = "Cannot have both classmethod and staticmethod"
MULTIPLE_ASSIGNMENTS_IN_PATTERN: Final = 'Multiple assignments to name "{}" in pattern'
CANNOT_MODIFY_MATCH_ARGS: Final = 'Cannot assign to "__match_args__"'
DATACLASS_FIELD_ALIAS_MUST_BE_LITERAL: Final = (
'"alias" argument to dataclass field must be a string literal'
)
DATACLASS_POST_INIT_MUST_BE_A_FUNCTION: Final = '"__post_init__" method must be an instance method'
# fastparse
FAILED_TO_MERGE_OVERLOADS: Final = ErrorMessage(
"Condition can't be inferred, unable to merge overloads"
)
TYPE_IGNORE_WITH_ERRCODE_ON_MODULE: Final = ErrorMessage(
"type ignore with error code is not supported for modules; "
'use `# mypy: disable-error-code="{}"`',
codes.SYNTAX,
)
INVALID_TYPE_IGNORE: Final = ErrorMessage('Invalid "type: ignore" comment', codes.SYNTAX)
TYPE_COMMENT_SYNTAX_ERROR_VALUE: Final = ErrorMessage(
'Syntax error in type comment "{}"', codes.SYNTAX
)
ELLIPSIS_WITH_OTHER_TYPEARGS: Final = ErrorMessage(
"Ellipses cannot accompany other argument types in function type signature", codes.SYNTAX
)
TYPE_SIGNATURE_TOO_MANY_ARGS: Final = ErrorMessage(
"Type signature has too many arguments", codes.SYNTAX
)
TYPE_SIGNATURE_TOO_FEW_ARGS: Final = ErrorMessage(
"Type signature has too few arguments", codes.SYNTAX
)
ARG_CONSTRUCTOR_NAME_EXPECTED: Final = ErrorMessage("Expected arg constructor name", codes.SYNTAX)
ARG_CONSTRUCTOR_TOO_MANY_ARGS: Final = ErrorMessage(
"Too many arguments for argument constructor", codes.SYNTAX
)
MULTIPLE_VALUES_FOR_NAME_KWARG: Final = ErrorMessage(
'"{}" gets multiple values for keyword argument "name"', codes.SYNTAX
)
MULTIPLE_VALUES_FOR_TYPE_KWARG: Final = ErrorMessage(
'"{}" gets multiple values for keyword argument "type"', codes.SYNTAX
)
ARG_CONSTRUCTOR_UNEXPECTED_ARG: Final = ErrorMessage(
'Unexpected argument "{}" for argument constructor', codes.SYNTAX
)
ARG_NAME_EXPECTED_STRING_LITERAL: Final = ErrorMessage(
"Expected string literal for argument name, got {}", codes.SYNTAX
)
NARROWED_TYPE_NOT_SUBTYPE: Final = ErrorMessage(
"Narrowed type {} is not a subtype of input type {}", codes.NARROWED_TYPE_NOT_SUBTYPE
)
TYPE_VAR_TOO_FEW_CONSTRAINED_TYPES: Final = ErrorMessage(
"Type variable must have at least two constrained types", codes.MISC
)
TYPE_VAR_YIELD_EXPRESSION_IN_BOUND: Final = ErrorMessage(
"Yield expression cannot be used as a type variable bound", codes.SYNTAX
)
TYPE_VAR_NAMED_EXPRESSION_IN_BOUND: Final = ErrorMessage(
"Named expression cannot be used as a type variable bound", codes.SYNTAX
)
TYPE_VAR_AWAIT_EXPRESSION_IN_BOUND: Final = ErrorMessage(
"Await expression cannot be used as a type variable bound", codes.SYNTAX
)
TYPE_ALIAS_WITH_YIELD_EXPRESSION: Final = ErrorMessage(
"Yield expression cannot be used within a type alias", codes.SYNTAX
)
TYPE_ALIAS_WITH_NAMED_EXPRESSION: Final = ErrorMessage(
"Named expression cannot be used within a type alias", codes.SYNTAX
)
TYPE_ALIAS_WITH_AWAIT_EXPRESSION: Final = ErrorMessage(
"Await expression cannot be used within a type alias", codes.SYNTAX
)
TYPE_PARAM_DEFAULT_NOT_SUPPORTED: Final = ErrorMessage(
"Type parameter default types not supported when using Python 3.12 type parameter syntax",
codes.MISC,
)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/message_registry.py
|
Python
|
NOASSERTION
| 17,128 |
"""Facilities for generating error messages during type checking.
Don't add any non-trivial message construction logic to the type
checker, as it can compromise clarity and make messages less
consistent. Add such logic to this module instead. Literal messages, including those
with format args, should be defined as constants in mypy.message_registry.
Historically we tried to avoid all message string literals in the type
checker but we are moving away from this convention.
"""
from __future__ import annotations
import difflib
import itertools
import re
from contextlib import contextmanager
from textwrap import dedent
from typing import Any, Callable, Collection, Final, Iterable, Iterator, List, Sequence, cast
import mypy.typeops
from mypy import errorcodes as codes, message_registry
from mypy.erasetype import erase_type
from mypy.errorcodes import ErrorCode
from mypy.errors import ErrorInfo, Errors, ErrorWatcher
from mypy.nodes import (
ARG_NAMED,
ARG_NAMED_OPT,
ARG_OPT,
ARG_POS,
ARG_STAR,
ARG_STAR2,
CONTRAVARIANT,
COVARIANT,
SYMBOL_FUNCBASE_TYPES,
ArgKind,
CallExpr,
ClassDef,
Context,
Expression,
FuncDef,
IndexExpr,
MypyFile,
NameExpr,
ReturnStmt,
StrExpr,
SymbolNode,
SymbolTable,
TypeInfo,
Var,
reverse_builtin_aliases,
)
from mypy.operators import op_methods, op_methods_to_symbols
from mypy.options import Options
from mypy.subtypes import (
IS_CLASS_OR_STATIC,
IS_CLASSVAR,
IS_SETTABLE,
IS_VAR,
find_member,
get_member_flags,
is_same_type,
is_subtype,
)
from mypy.typeops import separate_union_literals
from mypy.types import (
AnyType,
CallableType,
DeletedType,
FunctionLike,
Instance,
LiteralType,
NoneType,
Overloaded,
Parameters,
ParamSpecType,
PartialType,
ProperType,
TupleType,
Type,
TypeAliasType,
TypedDictType,
TypeOfAny,
TypeStrVisitor,
TypeType,
TypeVarLikeType,
TypeVarTupleType,
TypeVarType,
UnboundType,
UninhabitedType,
UnionType,
UnpackType,
flatten_nested_unions,
get_proper_type,
get_proper_types,
)
from mypy.typetraverser import TypeTraverserVisitor
from mypy.util import plural_s, unmangle
TYPES_FOR_UNIMPORTED_HINTS: Final = {
"typing.Any",
"typing.Callable",
"typing.Dict",
"typing.Iterable",
"typing.Iterator",
"typing.List",
"typing.Optional",
"typing.Set",
"typing.Tuple",
"typing.TypeVar",
"typing.Union",
"typing.cast",
}
ARG_CONSTRUCTOR_NAMES: Final = {
ARG_POS: "Arg",
ARG_OPT: "DefaultArg",
ARG_NAMED: "NamedArg",
ARG_NAMED_OPT: "DefaultNamedArg",
ARG_STAR: "VarArg",
ARG_STAR2: "KwArg",
}
# Map from the full name of a missing definition to the test fixture (under
# test-data/unit/fixtures/) that provides the definition. This is used for
# generating better error messages when running mypy tests only.
SUGGESTED_TEST_FIXTURES: Final = {
"builtins.set": "set.pyi",
"builtins.tuple": "tuple.pyi",
"builtins.bool": "bool.pyi",
"builtins.Exception": "exception.pyi",
"builtins.BaseException": "exception.pyi",
"builtins.isinstance": "isinstancelist.pyi",
"builtins.property": "property.pyi",
"builtins.classmethod": "classmethod.pyi",
"typing._SpecialForm": "typing-medium.pyi",
}
UNSUPPORTED_NUMBERS_TYPES: Final = {
"numbers.Number",
"numbers.Complex",
"numbers.Real",
"numbers.Rational",
"numbers.Integral",
}
MAX_TUPLE_ITEMS = 10
MAX_UNION_ITEMS = 10
class MessageBuilder:
"""Helper class for reporting type checker error messages with parameters.
The methods of this class need to be provided with the context within a
file; the errors member manages the wider context.
IDEA: Support a 'verbose mode' that includes full information about types
in error messages and that may otherwise produce more detailed error
messages.
"""
# Report errors using this instance. It knows about the current file and
# import context.
errors: Errors
modules: dict[str, MypyFile]
# Hack to deduplicate error messages from union types
_disable_type_names: list[bool]
def __init__(self, errors: Errors, modules: dict[str, MypyFile]) -> None:
self.errors = errors
self.options = errors.options
self.modules = modules
self._disable_type_names = []
#
# Helpers
#
def filter_errors(
self,
*,
filter_errors: bool | Callable[[str, ErrorInfo], bool] = True,
save_filtered_errors: bool = False,
) -> ErrorWatcher:
return ErrorWatcher(
self.errors, filter_errors=filter_errors, save_filtered_errors=save_filtered_errors
)
def add_errors(self, errors: list[ErrorInfo]) -> None:
"""Add errors in messages to this builder."""
for info in errors:
self.errors.add_error_info(info)
@contextmanager
def disable_type_names(self) -> Iterator[None]:
self._disable_type_names.append(True)
try:
yield
finally:
self._disable_type_names.pop()
def are_type_names_disabled(self) -> bool:
return len(self._disable_type_names) > 0 and self._disable_type_names[-1]
def prefer_simple_messages(self) -> bool:
"""Should we generate simple/fast error messages?
If errors aren't shown to the user, we don't want to waste cyles producing
complex error messages.
"""
return self.errors.prefer_simple_messages()
def report(
self,
msg: str,
context: Context | None,
severity: str,
*,
code: ErrorCode | None = None,
file: str | None = None,
origin: Context | None = None,
offset: int = 0,
allow_dups: bool = False,
secondary_context: Context | None = None,
) -> None:
"""Report an error or note (unless disabled).
Note that context controls where error is reported, while origin controls
where # type: ignore comments have effect.
"""
def span_from_context(ctx: Context) -> Iterable[int]:
"""This determines where a type: ignore for a given context has effect.
Current logic is a bit tricky, to keep as much backwards compatibility as
possible. We may reconsider this to always be a single line (or otherwise
simplify it) when we drop Python 3.7.
TODO: address this in follow up PR
"""
if isinstance(ctx, (ClassDef, FuncDef)):
return range(ctx.deco_line or ctx.line, ctx.line + 1)
elif not isinstance(ctx, Expression):
return [ctx.line]
else:
return range(ctx.line, (ctx.end_line or ctx.line) + 1)
origin_span: Iterable[int] | None
if origin is not None:
origin_span = span_from_context(origin)
elif context is not None:
origin_span = span_from_context(context)
else:
origin_span = None
if secondary_context is not None:
assert origin_span is not None
origin_span = itertools.chain(origin_span, span_from_context(secondary_context))
self.errors.report(
context.line if context else -1,
context.column if context else -1,
msg,
severity=severity,
file=file,
offset=offset,
origin_span=origin_span,
end_line=context.end_line if context else -1,
end_column=context.end_column if context else -1,
code=code,
allow_dups=allow_dups,
)
def fail(
self,
msg: str,
context: Context | None,
*,
code: ErrorCode | None = None,
file: str | None = None,
allow_dups: bool = False,
secondary_context: Context | None = None,
) -> None:
"""Report an error message (unless disabled)."""
self.report(
msg,
context,
"error",
code=code,
file=file,
allow_dups=allow_dups,
secondary_context=secondary_context,
)
def note(
self,
msg: str,
context: Context,
file: str | None = None,
origin: Context | None = None,
offset: int = 0,
allow_dups: bool = False,
*,
code: ErrorCode | None = None,
secondary_context: Context | None = None,
) -> None:
"""Report a note (unless disabled)."""
self.report(
msg,
context,
"note",
file=file,
origin=origin,
offset=offset,
allow_dups=allow_dups,
code=code,
secondary_context=secondary_context,
)
def note_multiline(
self,
messages: str,
context: Context,
file: str | None = None,
offset: int = 0,
allow_dups: bool = False,
code: ErrorCode | None = None,
*,
secondary_context: Context | None = None,
) -> None:
"""Report as many notes as lines in the message (unless disabled)."""
for msg in messages.splitlines():
self.report(
msg,
context,
"note",
file=file,
offset=offset,
allow_dups=allow_dups,
code=code,
secondary_context=secondary_context,
)
#
# Specific operations
#
# The following operations are for generating specific error messages. They
# get some information as arguments, and they build an error message based
# on them.
def has_no_attr(
self,
original_type: Type,
typ: Type,
member: str,
context: Context,
module_symbol_table: SymbolTable | None = None,
) -> ErrorCode | None:
"""Report a missing or non-accessible member.
original_type is the top-level type on which the error occurred.
typ is the actual type that is missing the member. These can be
different, e.g., in a union, original_type will be the union and typ
will be the specific item in the union that does not have the member
attribute.
'module_symbol_table' is passed to this function if the type for which we
are trying to get a member was originally a module. The SymbolTable allows
us to look up and suggests attributes of the module since they are not
directly available on original_type
If member corresponds to an operator, use the corresponding operator
name in the messages. Return the error code that was produced, if any.
"""
original_type = get_proper_type(original_type)
typ = get_proper_type(typ)
if isinstance(original_type, Instance) and original_type.type.has_readable_member(member):
self.fail(f'Member "{member}" is not assignable', context)
return None
elif member == "__contains__":
self.fail(
f"Unsupported right operand type for in ({format_type(original_type, self.options)})",
context,
code=codes.OPERATOR,
)
return codes.OPERATOR
elif member in op_methods.values():
# Access to a binary operator member (e.g. _add). This case does
# not handle indexing operations.
for op, method in op_methods.items():
if method == member:
self.unsupported_left_operand(op, original_type, context)
return codes.OPERATOR
elif member == "__neg__":
self.fail(
f"Unsupported operand type for unary - ({format_type(original_type, self.options)})",
context,
code=codes.OPERATOR,
)
return codes.OPERATOR
elif member == "__pos__":
self.fail(
f"Unsupported operand type for unary + ({format_type(original_type, self.options)})",
context,
code=codes.OPERATOR,
)
return codes.OPERATOR
elif member == "__invert__":
self.fail(
f"Unsupported operand type for ~ ({format_type(original_type, self.options)})",
context,
code=codes.OPERATOR,
)
return codes.OPERATOR
elif member == "__getitem__":
# Indexed get.
# TODO: Fix this consistently in format_type
if isinstance(original_type, FunctionLike) and original_type.is_type_obj():
self.fail(
"The type {} is not generic and not indexable".format(
format_type(original_type, self.options)
),
context,
)
return None
else:
self.fail(
f"Value of type {format_type(original_type, self.options)} is not indexable",
context,
code=codes.INDEX,
)
return codes.INDEX
elif member == "__setitem__":
# Indexed set.
self.fail(
"Unsupported target for indexed assignment ({})".format(
format_type(original_type, self.options)
),
context,
code=codes.INDEX,
)
return codes.INDEX
elif member == "__call__":
if isinstance(original_type, Instance) and (
original_type.type.fullname == "builtins.function"
):
# "'function' not callable" is a confusing error message.
# Explain that the problem is that the type of the function is not known.
self.fail("Cannot call function of unknown type", context, code=codes.OPERATOR)
return codes.OPERATOR
else:
self.fail(
message_registry.NOT_CALLABLE.format(format_type(original_type, self.options)),
context,
code=codes.OPERATOR,
)
return codes.OPERATOR
else:
# The non-special case: a missing ordinary attribute.
extra = ""
if member == "__iter__":
extra = " (not iterable)"
elif member == "__aiter__":
extra = " (not async iterable)"
if not self.are_type_names_disabled():
failed = False
if isinstance(original_type, Instance) and original_type.type.names:
if (
module_symbol_table is not None
and member in module_symbol_table
and not module_symbol_table[member].module_public
):
self.fail(
f"{format_type(original_type, self.options, module_names=True)} does not "
f'explicitly export attribute "{member}"',
context,
code=codes.ATTR_DEFINED,
)
failed = True
else:
alternatives = set(original_type.type.names.keys())
if module_symbol_table is not None:
alternatives |= {
k for k, v in module_symbol_table.items() if v.module_public
}
# Rare but possible, see e.g. testNewAnalyzerCyclicDefinitionCrossModule
alternatives.discard(member)
matches = [m for m in COMMON_MISTAKES.get(member, []) if m in alternatives]
matches.extend(best_matches(member, alternatives, n=3))
if member == "__aiter__" and matches == ["__iter__"]:
matches = [] # Avoid misleading suggestion
if matches:
self.fail(
'{} has no attribute "{}"; maybe {}?{}'.format(
format_type(original_type, self.options),
member,
pretty_seq(matches, "or"),
extra,
),
context,
code=codes.ATTR_DEFINED,
)
failed = True
if not failed:
self.fail(
'{} has no attribute "{}"{}'.format(
format_type(original_type, self.options), member, extra
),
context,
code=codes.ATTR_DEFINED,
)
return codes.ATTR_DEFINED
elif isinstance(original_type, UnionType):
# The checker passes "object" in lieu of "None" for attribute
# checks, so we manually convert it back.
typ_format, orig_type_format = format_type_distinctly(
typ, original_type, options=self.options
)
if typ_format == '"object"' and any(
type(item) == NoneType for item in original_type.items
):
typ_format = '"None"'
self.fail(
'Item {} of {} has no attribute "{}"{}'.format(
typ_format, orig_type_format, member, extra
),
context,
code=codes.UNION_ATTR,
)
return codes.UNION_ATTR
elif isinstance(original_type, TypeVarType):
bound = get_proper_type(original_type.upper_bound)
if isinstance(bound, UnionType):
typ_fmt, bound_fmt = format_type_distinctly(typ, bound, options=self.options)
original_type_fmt = format_type(original_type, self.options)
self.fail(
"Item {} of the upper bound {} of type variable {} has no "
'attribute "{}"{}'.format(
typ_fmt, bound_fmt, original_type_fmt, member, extra
),
context,
code=codes.UNION_ATTR,
)
return codes.UNION_ATTR
else:
self.fail(
'{} has no attribute "{}"{}'.format(
format_type(original_type, self.options), member, extra
),
context,
code=codes.ATTR_DEFINED,
)
return codes.ATTR_DEFINED
return None
def unsupported_operand_types(
self,
op: str,
left_type: Any,
right_type: Any,
context: Context,
*,
code: ErrorCode = codes.OPERATOR,
) -> None:
"""Report unsupported operand types for a binary operation.
Types can be Type objects or strings.
"""
left_str = ""
if isinstance(left_type, str):
left_str = left_type
else:
left_str = format_type(left_type, self.options)
right_str = ""
if isinstance(right_type, str):
right_str = right_type
else:
right_str = format_type(right_type, self.options)
if self.are_type_names_disabled():
msg = f"Unsupported operand types for {op} (likely involving Union)"
else:
msg = f"Unsupported operand types for {op} ({left_str} and {right_str})"
self.fail(msg, context, code=code)
def unsupported_left_operand(self, op: str, typ: Type, context: Context) -> None:
if self.are_type_names_disabled():
msg = f"Unsupported left operand type for {op} (some union)"
else:
msg = f"Unsupported left operand type for {op} ({format_type(typ, self.options)})"
self.fail(msg, context, code=codes.OPERATOR)
def not_callable(self, typ: Type, context: Context) -> Type:
self.fail(message_registry.NOT_CALLABLE.format(format_type(typ, self.options)), context)
return AnyType(TypeOfAny.from_error)
def untyped_function_call(self, callee: CallableType, context: Context) -> Type:
name = callable_name(callee) or "(unknown)"
self.fail(
f"Call to untyped function {name} in typed context",
context,
code=codes.NO_UNTYPED_CALL,
)
return AnyType(TypeOfAny.from_error)
def incompatible_argument(
self,
n: int,
m: int,
callee: CallableType,
arg_type: Type,
arg_kind: ArgKind,
object_type: Type | None,
context: Context,
outer_context: Context,
) -> ErrorCode | None:
"""Report an error about an incompatible argument type.
The argument type is arg_type, argument number is n and the
callee type is 'callee'. If the callee represents a method
that corresponds to an operator, use the corresponding
operator name in the messages.
Return the error code that used for the argument (multiple error
codes are possible).
"""
arg_type = get_proper_type(arg_type)
target = ""
callee_name = callable_name(callee)
if callee_name is not None:
name = callee_name
if callee.bound_args and callee.bound_args[0] is not None:
base = format_type(callee.bound_args[0], self.options)
else:
base = extract_type(name)
for method, op in op_methods_to_symbols.items():
for variant in method, "__r" + method[2:]:
# FIX: do not rely on textual formatting
if name.startswith(f'"{variant}" of'):
if op == "in" or variant != method:
# Reversed order of base/argument.
self.unsupported_operand_types(
op, arg_type, base, context, code=codes.OPERATOR
)
else:
self.unsupported_operand_types(
op, base, arg_type, context, code=codes.OPERATOR
)
return codes.OPERATOR
if name.startswith('"__getitem__" of'):
self.invalid_index_type(
arg_type, callee.arg_types[n - 1], base, context, code=codes.INDEX
)
return codes.INDEX
if name.startswith('"__setitem__" of'):
if n == 1:
self.invalid_index_type(
arg_type, callee.arg_types[n - 1], base, context, code=codes.INDEX
)
return codes.INDEX
else:
arg_type_str, callee_type_str = format_type_distinctly(
arg_type, callee.arg_types[n - 1], options=self.options
)
info = (
f" (expression has type {arg_type_str}, "
f"target has type {callee_type_str})"
)
error_msg = (
message_registry.INCOMPATIBLE_TYPES_IN_ASSIGNMENT.with_additional_msg(info)
)
self.fail(error_msg.value, context, code=error_msg.code)
return error_msg.code
target = f"to {name} "
msg = ""
code = codes.MISC
notes: list[str] = []
if callee_name == "<list>":
name = callee_name[1:-1]
n -= 1
actual_type_str, expected_type_str = format_type_distinctly(
arg_type, callee.arg_types[0], options=self.options
)
msg = "{} item {} has incompatible type {}; expected {}".format(
name.title(), n, actual_type_str, expected_type_str
)
code = codes.LIST_ITEM
elif callee_name == "<dict>" and isinstance(
get_proper_type(callee.arg_types[n - 1]), TupleType
):
name = callee_name[1:-1]
n -= 1
key_type, value_type = cast(TupleType, arg_type).items
expected_key_type, expected_value_type = cast(TupleType, callee.arg_types[n]).items
# don't increase verbosity unless there is need to do so
if is_subtype(key_type, expected_key_type):
key_type_str = format_type(key_type, self.options)
expected_key_type_str = format_type(expected_key_type, self.options)
else:
key_type_str, expected_key_type_str = format_type_distinctly(
key_type, expected_key_type, options=self.options
)
if is_subtype(value_type, expected_value_type):
value_type_str = format_type(value_type, self.options)
expected_value_type_str = format_type(expected_value_type, self.options)
else:
value_type_str, expected_value_type_str = format_type_distinctly(
value_type, expected_value_type, options=self.options
)
msg = "{} entry {} has incompatible type {}: {}; expected {}: {}".format(
name.title(),
n,
key_type_str,
value_type_str,
expected_key_type_str,
expected_value_type_str,
)
code = codes.DICT_ITEM
elif callee_name == "<dict>":
value_type_str, expected_value_type_str = format_type_distinctly(
arg_type, callee.arg_types[n - 1], options=self.options
)
msg = "Unpacked dict entry {} has incompatible type {}; expected {}".format(
n - 1, value_type_str, expected_value_type_str
)
code = codes.DICT_ITEM
elif callee_name == "<list-comprehension>":
actual_type_str, expected_type_str = map(
strip_quotes,
format_type_distinctly(arg_type, callee.arg_types[0], options=self.options),
)
msg = "List comprehension has incompatible type List[{}]; expected List[{}]".format(
actual_type_str, expected_type_str
)
elif callee_name == "<set-comprehension>":
actual_type_str, expected_type_str = map(
strip_quotes,
format_type_distinctly(arg_type, callee.arg_types[0], options=self.options),
)
msg = "Set comprehension has incompatible type Set[{}]; expected Set[{}]".format(
actual_type_str, expected_type_str
)
elif callee_name == "<dictionary-comprehension>":
actual_type_str, expected_type_str = format_type_distinctly(
arg_type, callee.arg_types[n - 1], options=self.options
)
msg = (
"{} expression in dictionary comprehension has incompatible type {}; "
"expected type {}"
).format("Key" if n == 1 else "Value", actual_type_str, expected_type_str)
elif callee_name == "<generator>":
actual_type_str, expected_type_str = format_type_distinctly(
arg_type, callee.arg_types[0], options=self.options
)
msg = "Generator has incompatible item type {}; expected {}".format(
actual_type_str, expected_type_str
)
else:
if self.prefer_simple_messages():
msg = "Argument has incompatible type"
else:
try:
expected_type = callee.arg_types[m - 1]
except IndexError: # Varargs callees
expected_type = callee.arg_types[-1]
arg_type_str, expected_type_str = format_type_distinctly(
arg_type, expected_type, bare=True, options=self.options
)
if arg_kind == ARG_STAR:
arg_type_str = "*" + arg_type_str
elif arg_kind == ARG_STAR2:
arg_type_str = "**" + arg_type_str
# For function calls with keyword arguments, display the argument name rather
# than the number.
arg_label = str(n)
if isinstance(outer_context, CallExpr) and len(outer_context.arg_names) >= n:
arg_name = outer_context.arg_names[n - 1]
if arg_name is not None:
arg_label = f'"{arg_name}"'
if (
arg_kind == ARG_STAR2
and isinstance(arg_type, TypedDictType)
and m <= len(callee.arg_names)
and callee.arg_names[m - 1] is not None
and callee.arg_kinds[m - 1] != ARG_STAR2
):
arg_name = callee.arg_names[m - 1]
assert arg_name is not None
arg_type_str, expected_type_str = format_type_distinctly(
arg_type.items[arg_name], expected_type, bare=True, options=self.options
)
arg_label = f'"{arg_name}"'
if isinstance(outer_context, IndexExpr) and isinstance(
outer_context.index, StrExpr
):
msg = 'Value of "{}" has incompatible type {}; expected {}'.format(
outer_context.index.value,
quote_type_string(arg_type_str),
quote_type_string(expected_type_str),
)
else:
msg = "Argument {} {}has incompatible type {}; expected {}".format(
arg_label,
target,
quote_type_string(arg_type_str),
quote_type_string(expected_type_str),
)
expected_type = get_proper_type(expected_type)
if isinstance(expected_type, UnionType):
expected_types = list(expected_type.items)
else:
expected_types = [expected_type]
for type in get_proper_types(expected_types):
if isinstance(arg_type, Instance) and isinstance(type, Instance):
notes = append_invariance_notes(notes, arg_type, type)
notes = append_numbers_notes(notes, arg_type, type)
object_type = get_proper_type(object_type)
if isinstance(object_type, TypedDictType):
code = codes.TYPEDDICT_ITEM
else:
code = codes.ARG_TYPE
self.fail(msg, context, code=code)
if notes:
for note_msg in notes:
self.note(note_msg, context, code=code)
return code
def incompatible_argument_note(
self,
original_caller_type: ProperType,
callee_type: ProperType,
context: Context,
code: ErrorCode | None,
) -> None:
if self.prefer_simple_messages():
return
if isinstance(
original_caller_type, (Instance, TupleType, TypedDictType, TypeType, CallableType)
):
if isinstance(callee_type, Instance) and callee_type.type.is_protocol:
self.report_protocol_problems(
original_caller_type, callee_type, context, code=code
)
if isinstance(callee_type, UnionType):
for item in callee_type.items:
item = get_proper_type(item)
if isinstance(item, Instance) and item.type.is_protocol:
self.report_protocol_problems(
original_caller_type, item, context, code=code
)
if isinstance(callee_type, CallableType) and isinstance(original_caller_type, Instance):
call = find_member(
"__call__", original_caller_type, original_caller_type, is_operator=True
)
if call:
self.note_call(original_caller_type, call, context, code=code)
self.maybe_note_concatenate_pos_args(original_caller_type, callee_type, context, code)
def maybe_note_concatenate_pos_args(
self,
original_caller_type: ProperType,
callee_type: ProperType,
context: Context,
code: ErrorCode | None = None,
) -> None:
# pos-only vs positional can be confusing, with Concatenate
if (
isinstance(callee_type, CallableType)
and isinstance(original_caller_type, CallableType)
and (original_caller_type.from_concatenate or callee_type.from_concatenate)
):
names: list[str] = []
for c, o in zip(
callee_type.formal_arguments(), original_caller_type.formal_arguments()
):
if None in (c.pos, o.pos):
# non-positional
continue
if c.name != o.name and c.name is None and o.name is not None:
names.append(o.name)
if names:
missing_arguments = '"' + '", "'.join(names) + '"'
self.note(
f'This is likely because "{original_caller_type.name}" has named arguments: '
f"{missing_arguments}. Consider marking them positional-only",
context,
code=code,
)
def invalid_index_type(
self,
index_type: Type,
expected_type: Type,
base_str: str,
context: Context,
*,
code: ErrorCode,
) -> None:
index_str, expected_str = format_type_distinctly(
index_type, expected_type, options=self.options
)
self.fail(
"Invalid index type {} for {}; expected type {}".format(
index_str, base_str, expected_str
),
context,
code=code,
)
def readonly_keys_mutated(self, keys: set[str], context: Context) -> None:
if len(keys) == 1:
suffix = "is"
else:
suffix = "are"
self.fail(
"ReadOnly {} TypedDict {} mutated".format(format_key_list(sorted(keys)), suffix),
code=codes.TYPEDDICT_READONLY_MUTATED,
context=context,
)
def too_few_arguments(
self, callee: CallableType, context: Context, argument_names: Sequence[str | None] | None
) -> None:
if self.prefer_simple_messages():
msg = "Too few arguments"
elif argument_names is not None:
num_positional_args = sum(k is None for k in argument_names)
arguments_left = callee.arg_names[num_positional_args : callee.min_args]
diff = [k for k in arguments_left if k not in argument_names]
if len(diff) == 1:
msg = "Missing positional argument"
else:
msg = "Missing positional arguments"
callee_name = callable_name(callee)
if callee_name is not None and diff and all(d is not None for d in diff):
args = '", "'.join(cast(List[str], diff))
msg += f' "{args}" in call to {callee_name}'
else:
msg = "Too few arguments" + for_function(callee)
else:
msg = "Too few arguments" + for_function(callee)
self.fail(msg, context, code=codes.CALL_ARG)
def missing_named_argument(self, callee: CallableType, context: Context, name: str) -> None:
msg = f'Missing named argument "{name}"' + for_function(callee)
self.fail(msg, context, code=codes.CALL_ARG)
def too_many_arguments(self, callee: CallableType, context: Context) -> None:
if self.prefer_simple_messages():
msg = "Too many arguments"
else:
msg = "Too many arguments" + for_function(callee)
self.fail(msg, context, code=codes.CALL_ARG)
self.maybe_note_about_special_args(callee, context)
def too_many_arguments_from_typed_dict(
self, callee: CallableType, arg_type: TypedDictType, context: Context
) -> None:
# Try to determine the name of the extra argument.
for key in arg_type.items:
if key not in callee.arg_names:
msg = f'Extra argument "{key}" from **args' + for_function(callee)
break
else:
self.too_many_arguments(callee, context)
return
self.fail(msg, context)
def too_many_positional_arguments(self, callee: CallableType, context: Context) -> None:
if self.prefer_simple_messages():
msg = "Too many positional arguments"
else:
msg = "Too many positional arguments" + for_function(callee)
self.fail(msg, context)
self.maybe_note_about_special_args(callee, context)
def maybe_note_about_special_args(self, callee: CallableType, context: Context) -> None:
if self.prefer_simple_messages():
return
# https://github.com/python/mypy/issues/11309
first_arg = callee.def_extras.get("first_arg")
if first_arg and first_arg not in {"self", "cls", "mcs"}:
self.note(
"Looks like the first special argument in a method "
'is not named "self", "cls", or "mcs", '
"maybe it is missing?",
context,
)
def unexpected_keyword_argument_for_function(
self, for_func: str, name: str, context: Context, *, matches: list[str] | None = None
) -> None:
msg = f'Unexpected keyword argument "{name}"' + for_func
if matches:
msg += f"; did you mean {pretty_seq(matches, 'or')}?"
self.fail(msg, context, code=codes.CALL_ARG)
def unexpected_keyword_argument(
self, callee: CallableType, name: str, arg_type: Type, context: Context
) -> None:
# Suggest intended keyword, look for type match else fallback on any match.
matching_type_args = []
not_matching_type_args = []
for i, kwarg_type in enumerate(callee.arg_types):
callee_arg_name = callee.arg_names[i]
if callee_arg_name is not None and callee.arg_kinds[i] != ARG_STAR:
if is_subtype(arg_type, kwarg_type):
matching_type_args.append(callee_arg_name)
else:
not_matching_type_args.append(callee_arg_name)
matches = best_matches(name, matching_type_args, n=3)
if not matches:
matches = best_matches(name, not_matching_type_args, n=3)
self.unexpected_keyword_argument_for_function(
for_function(callee), name, context, matches=matches
)
module = find_defining_module(self.modules, callee)
if module:
assert callee.definition is not None
fname = callable_name(callee)
if not fname: # an alias to function with a different name
fname = "Called function"
self.note(
f"{fname} defined here",
callee.definition,
file=module.path,
origin=context,
code=codes.CALL_ARG,
)
def duplicate_argument_value(self, callee: CallableType, index: int, context: Context) -> None:
self.fail(
'{} gets multiple values for keyword argument "{}"'.format(
callable_name(callee) or "Function", callee.arg_names[index]
),
context,
)
def does_not_return_value(self, callee_type: Type | None, context: Context) -> None:
"""Report an error about use of an unusable type."""
callee_type = get_proper_type(callee_type)
callee_name = callable_name(callee_type) if isinstance(callee_type, FunctionLike) else None
name = callee_name or "Function"
message = f"{name} does not return a value (it only ever returns None)"
self.fail(message, context, code=codes.FUNC_RETURNS_VALUE)
def deleted_as_rvalue(self, typ: DeletedType, context: Context) -> None:
"""Report an error about using an deleted type as an rvalue."""
if typ.source is None:
s = ""
else:
s = f' "{typ.source}"'
self.fail(f"Trying to read deleted variable{s}", context)
def deleted_as_lvalue(self, typ: DeletedType, context: Context) -> None:
"""Report an error about using an deleted type as an lvalue.
Currently, this only occurs when trying to assign to an
exception variable outside the local except: blocks.
"""
if typ.source is None:
s = ""
else:
s = f' "{typ.source}"'
self.fail(f"Assignment to variable{s} outside except: block", context)
def no_variant_matches_arguments(
self,
overload: Overloaded,
arg_types: list[Type],
context: Context,
*,
code: ErrorCode | None = None,
) -> None:
code = code or codes.CALL_OVERLOAD
name = callable_name(overload)
if name:
name_str = f" of {name}"
else:
name_str = ""
arg_types_str = ", ".join(format_type(arg, self.options) for arg in arg_types)
num_args = len(arg_types)
if num_args == 0:
self.fail(
f"All overload variants{name_str} require at least one argument",
context,
code=code,
)
elif num_args == 1:
self.fail(
f"No overload variant{name_str} matches argument type {arg_types_str}",
context,
code=code,
)
else:
self.fail(
f"No overload variant{name_str} matches argument types {arg_types_str}",
context,
code=code,
)
self.note(f"Possible overload variant{plural_s(len(overload.items))}:", context, code=code)
for item in overload.items:
self.note(pretty_callable(item, self.options), context, offset=4, code=code)
def wrong_number_values_to_unpack(
self, provided: int, expected: int, context: Context
) -> None:
if provided < expected:
if provided == 1:
self.fail(f"Need more than 1 value to unpack ({expected} expected)", context)
else:
self.fail(
f"Need more than {provided} values to unpack ({expected} expected)", context
)
elif provided > expected:
self.fail(
f"Too many values to unpack ({expected} expected, {provided} provided)", context
)
def unpacking_strings_disallowed(self, context: Context) -> None:
self.fail("Unpacking a string is disallowed", context)
def type_not_iterable(self, type: Type, context: Context) -> None:
self.fail(f"{format_type(type, self.options)} object is not iterable", context)
def possible_missing_await(self, context: Context, code: ErrorCode | None) -> None:
self.note('Maybe you forgot to use "await"?', context, code=code)
def incompatible_operator_assignment(self, op: str, context: Context) -> None:
self.fail(f"Result type of {op} incompatible in assignment", context)
def overload_signature_incompatible_with_supertype(
self, name: str, name_in_super: str, supertype: str, context: Context
) -> None:
target = self.override_target(name, name_in_super, supertype)
self.fail(
f'Signature of "{name}" incompatible with {target}', context, code=codes.OVERRIDE
)
note_template = 'Overload variants must be defined in the same order as they are in "{}"'
self.note(note_template.format(supertype), context, code=codes.OVERRIDE)
def signature_incompatible_with_supertype(
self,
name: str,
name_in_super: str,
supertype: str,
context: Context,
*,
original: ProperType,
override: ProperType,
) -> None:
code = codes.OVERRIDE
target = self.override_target(name, name_in_super, supertype)
self.fail(f'Signature of "{name}" incompatible with {target}', context, code=code)
original_str, override_str = format_type_distinctly(
original, override, options=self.options, bare=True
)
INCLUDE_DECORATOR = True # Include @classmethod and @staticmethod decorators, if any
ALLOW_DUPS = True # Allow duplicate notes, needed when signatures are duplicates
ALIGN_OFFSET = 1 # One space, to account for the difference between error and note
OFFSET = 4 # Four spaces, so that notes will look like this:
# error: Signature of "f" incompatible with supertype "A"
# note: Superclass:
# note: def f(self) -> str
# note: Subclass:
# note: def f(self, x: str) -> None
self.note(
"Superclass:", context, offset=ALIGN_OFFSET + OFFSET, allow_dups=ALLOW_DUPS, code=code
)
if isinstance(original, (CallableType, Overloaded)):
self.pretty_callable_or_overload(
original,
context,
offset=ALIGN_OFFSET + 2 * OFFSET,
add_class_or_static_decorator=INCLUDE_DECORATOR,
allow_dups=ALLOW_DUPS,
code=code,
)
else:
self.note(
original_str,
context,
offset=ALIGN_OFFSET + 2 * OFFSET,
allow_dups=ALLOW_DUPS,
code=code,
)
self.note(
"Subclass:", context, offset=ALIGN_OFFSET + OFFSET, allow_dups=ALLOW_DUPS, code=code
)
if isinstance(override, (CallableType, Overloaded)):
self.pretty_callable_or_overload(
override,
context,
offset=ALIGN_OFFSET + 2 * OFFSET,
add_class_or_static_decorator=INCLUDE_DECORATOR,
allow_dups=ALLOW_DUPS,
code=code,
)
else:
self.note(
override_str,
context,
offset=ALIGN_OFFSET + 2 * OFFSET,
allow_dups=ALLOW_DUPS,
code=code,
)
def pretty_callable_or_overload(
self,
tp: CallableType | Overloaded,
context: Context,
*,
offset: int = 0,
add_class_or_static_decorator: bool = False,
allow_dups: bool = False,
code: ErrorCode | None = None,
) -> None:
if isinstance(tp, CallableType):
if add_class_or_static_decorator:
decorator = pretty_class_or_static_decorator(tp)
if decorator is not None:
self.note(decorator, context, offset=offset, allow_dups=allow_dups, code=code)
self.note(
pretty_callable(tp, self.options),
context,
offset=offset,
allow_dups=allow_dups,
code=code,
)
elif isinstance(tp, Overloaded):
self.pretty_overload(
tp,
context,
offset,
add_class_or_static_decorator=add_class_or_static_decorator,
allow_dups=allow_dups,
code=code,
)
def argument_incompatible_with_supertype(
self,
arg_num: int,
name: str,
type_name: str | None,
name_in_supertype: str,
arg_type_in_supertype: Type,
supertype: str,
context: Context,
secondary_context: Context,
) -> None:
target = self.override_target(name, name_in_supertype, supertype)
arg_type_in_supertype_f = format_type_bare(arg_type_in_supertype, self.options)
self.fail(
'Argument {} of "{}" is incompatible with {}; '
'supertype defines the argument type as "{}"'.format(
arg_num, name, target, arg_type_in_supertype_f
),
context,
code=codes.OVERRIDE,
secondary_context=secondary_context,
)
if name != "__post_init__":
# `__post_init__` is special, it can be incompatible by design.
# So, this note is misleading.
self.note(
"This violates the Liskov substitution principle",
context,
code=codes.OVERRIDE,
secondary_context=secondary_context,
)
self.note(
"See https://mypy.readthedocs.io/en/stable/common_issues.html#incompatible-overrides",
context,
code=codes.OVERRIDE,
secondary_context=secondary_context,
)
if name == "__eq__" and type_name:
multiline_msg = self.comparison_method_example_msg(class_name=type_name)
self.note_multiline(
multiline_msg, context, code=codes.OVERRIDE, secondary_context=secondary_context
)
def comparison_method_example_msg(self, class_name: str) -> str:
return dedent(
"""\
It is recommended for "__eq__" to work with arbitrary objects, for example:
def __eq__(self, other: object) -> bool:
if not isinstance(other, {class_name}):
return NotImplemented
return <logic to compare two {class_name} instances>
""".format(
class_name=class_name
)
)
def return_type_incompatible_with_supertype(
self,
name: str,
name_in_supertype: str,
supertype: str,
original: Type,
override: Type,
context: Context,
) -> None:
target = self.override_target(name, name_in_supertype, supertype)
override_str, original_str = format_type_distinctly(
override, original, options=self.options
)
self.fail(
'Return type {} of "{}" incompatible with return type {} in {}'.format(
override_str, name, original_str, target
),
context,
code=codes.OVERRIDE,
)
original = get_proper_type(original)
override = get_proper_type(override)
if (
isinstance(original, Instance)
and isinstance(override, Instance)
and override.type.fullname == "typing.AsyncIterator"
and original.type.fullname == "typing.Coroutine"
and len(original.args) == 3
and original.args[2] == override
):
self.note(f'Consider declaring "{name}" in {target} without "async"', context)
self.note(
"See https://mypy.readthedocs.io/en/stable/more_types.html#asynchronous-iterators",
context,
)
def override_target(self, name: str, name_in_super: str, supertype: str) -> str:
target = f'supertype "{supertype}"'
if name_in_super != name:
target = f'"{name_in_super}" of {target}'
return target
def incompatible_type_application(
self, min_arg_count: int, max_arg_count: int, actual_arg_count: int, context: Context
) -> None:
if max_arg_count == 0:
self.fail("Type application targets a non-generic function or class", context)
return
if min_arg_count == max_arg_count:
s = f"{max_arg_count} expected"
else:
s = f"expected between {min_arg_count} and {max_arg_count}"
if actual_arg_count > max_arg_count:
self.fail(f"Type application has too many types ({s})", context)
else:
self.fail(f"Type application has too few types ({s})", context)
def could_not_infer_type_arguments(
self, callee_type: CallableType, n: int, context: Context
) -> None:
callee_name = callable_name(callee_type)
if callee_name is not None and n > 0:
self.fail(f"Cannot infer type argument {n} of {callee_name}", context)
if callee_name == "<dict>":
# Invariance in key type causes more of these errors than we would want.
self.note(
"Try assigning the literal to a variable annotated as dict[<key>, <val>]",
context,
)
else:
self.fail("Cannot infer function type argument", context)
def invalid_var_arg(self, typ: Type, context: Context) -> None:
self.fail("List or tuple expected as variadic arguments", context)
def invalid_keyword_var_arg(self, typ: Type, is_mapping: bool, context: Context) -> None:
typ = get_proper_type(typ)
if isinstance(typ, Instance) and is_mapping:
self.fail("Keywords must be strings", context)
else:
self.fail(
f"Argument after ** must be a mapping, not {format_type(typ, self.options)}",
context,
code=codes.ARG_TYPE,
)
def undefined_in_superclass(self, member: str, context: Context) -> None:
self.fail(f'"{member}" undefined in superclass', context)
def variable_may_be_undefined(self, name: str, context: Context) -> None:
self.fail(f'Name "{name}" may be undefined', context, code=codes.POSSIBLY_UNDEFINED)
def var_used_before_def(self, name: str, context: Context) -> None:
self.fail(f'Name "{name}" is used before definition', context, code=codes.USED_BEFORE_DEF)
def first_argument_for_super_must_be_type(self, actual: Type, context: Context) -> None:
actual = get_proper_type(actual)
if isinstance(actual, Instance):
# Don't include type of instance, because it can look confusingly like a type
# object.
type_str = "a non-type instance"
else:
type_str = format_type(actual, self.options)
self.fail(
f'Argument 1 for "super" must be a type object; got {type_str}',
context,
code=codes.ARG_TYPE,
)
def unsafe_super(self, method: str, cls: str, ctx: Context) -> None:
self.fail(
'Call to abstract method "{}" of "{}" with trivial body'
" via super() is unsafe".format(method, cls),
ctx,
code=codes.SAFE_SUPER,
)
def too_few_string_formatting_arguments(self, context: Context) -> None:
self.fail("Not enough arguments for format string", context, code=codes.STRING_FORMATTING)
def too_many_string_formatting_arguments(self, context: Context) -> None:
self.fail(
"Not all arguments converted during string formatting",
context,
code=codes.STRING_FORMATTING,
)
def unsupported_placeholder(self, placeholder: str, context: Context) -> None:
self.fail(
f'Unsupported format character "{placeholder}"', context, code=codes.STRING_FORMATTING
)
def string_interpolation_with_star_and_key(self, context: Context) -> None:
self.fail(
"String interpolation contains both stars and mapping keys",
context,
code=codes.STRING_FORMATTING,
)
def requires_int_or_single_byte(self, context: Context, format_call: bool = False) -> None:
self.fail(
'"{}c" requires an integer in range(256) or a single byte'.format(
":" if format_call else "%"
),
context,
code=codes.STRING_FORMATTING,
)
def requires_int_or_char(self, context: Context, format_call: bool = False) -> None:
self.fail(
'"{}c" requires int or char'.format(":" if format_call else "%"),
context,
code=codes.STRING_FORMATTING,
)
def key_not_in_mapping(self, key: str, context: Context) -> None:
self.fail(f'Key "{key}" not found in mapping', context, code=codes.STRING_FORMATTING)
def string_interpolation_mixing_key_and_non_keys(self, context: Context) -> None:
self.fail(
"String interpolation mixes specifier with and without mapping keys",
context,
code=codes.STRING_FORMATTING,
)
def cannot_determine_type(self, name: str, context: Context) -> None:
self.fail(f'Cannot determine type of "{name}"', context, code=codes.HAS_TYPE)
def cannot_determine_type_in_base(self, name: str, base: str, context: Context) -> None:
self.fail(f'Cannot determine type of "{name}" in base class "{base}"', context)
def no_formal_self(self, name: str, item: CallableType, context: Context) -> None:
type = format_type(item, self.options)
self.fail(
f'Attribute function "{name}" with type {type} does not accept self argument', context
)
def incompatible_self_argument(
self, name: str, arg: Type, sig: CallableType, is_classmethod: bool, context: Context
) -> None:
kind = "class attribute function" if is_classmethod else "attribute function"
arg_type = format_type(arg, self.options)
sig_type = format_type(sig, self.options)
self.fail(
f'Invalid self argument {arg_type} to {kind} "{name}" with type {sig_type}', context
)
def incompatible_conditional_function_def(
self, defn: FuncDef, old_type: FunctionLike, new_type: FunctionLike
) -> None:
self.fail("All conditional function variants must have identical signatures", defn)
if isinstance(old_type, (CallableType, Overloaded)) and isinstance(
new_type, (CallableType, Overloaded)
):
self.note("Original:", defn)
self.pretty_callable_or_overload(old_type, defn, offset=4)
self.note("Redefinition:", defn)
self.pretty_callable_or_overload(new_type, defn, offset=4)
def cannot_instantiate_abstract_class(
self, class_name: str, abstract_attributes: dict[str, bool], context: Context
) -> None:
attrs = format_string_list([f'"{a}"' for a in abstract_attributes])
self.fail(
f'Cannot instantiate abstract class "{class_name}" with abstract '
f"attribute{plural_s(abstract_attributes)} {attrs}",
context,
code=codes.ABSTRACT,
)
attrs_with_none = [
f'"{a}"'
for a, implicit_and_can_return_none in abstract_attributes.items()
if implicit_and_can_return_none
]
if not attrs_with_none:
return
if len(attrs_with_none) == 1:
note = (
f"{attrs_with_none[0]} is implicitly abstract because it has an empty function "
"body. If it is not meant to be abstract, explicitly `return` or `return None`."
)
else:
note = (
"The following methods were marked implicitly abstract because they have empty "
f"function bodies: {format_string_list(attrs_with_none)}. "
"If they are not meant to be abstract, explicitly `return` or `return None`."
)
self.note(note, context, code=codes.ABSTRACT)
def base_class_definitions_incompatible(
self, name: str, base1: TypeInfo, base2: TypeInfo, context: Context
) -> None:
self.fail(
'Definition of "{}" in base class "{}" is incompatible '
'with definition in base class "{}"'.format(name, base1.name, base2.name),
context,
)
def cant_assign_to_method(self, context: Context) -> None:
self.fail(message_registry.CANNOT_ASSIGN_TO_METHOD, context, code=codes.METHOD_ASSIGN)
def cant_assign_to_classvar(self, name: str, context: Context) -> None:
self.fail(f'Cannot assign to class variable "{name}" via instance', context)
def no_overridable_method(self, name: str, context: Context) -> None:
self.fail(
f'Method "{name}" is marked as an override, '
"but no base method was found with this name",
context,
)
def explicit_override_decorator_missing(
self, name: str, base_name: str, context: Context
) -> None:
self.fail(
f'Method "{name}" is not using @override '
f'but is overriding a method in class "{base_name}"',
context,
code=codes.EXPLICIT_OVERRIDE_REQUIRED,
)
def final_cant_override_writable(self, name: str, ctx: Context) -> None:
self.fail(f'Cannot override writable attribute "{name}" with a final one', ctx)
def cant_override_final(self, name: str, base_name: str, ctx: Context) -> None:
self.fail(
'Cannot override final attribute "{}"'
' (previously declared in base class "{}")'.format(name, base_name),
ctx,
)
def cant_assign_to_final(self, name: str, attr_assign: bool, ctx: Context) -> None:
"""Warn about a prohibited assignment to a final attribute.
Pass `attr_assign=True` if the assignment assigns to an attribute.
"""
kind = "attribute" if attr_assign else "name"
self.fail(f'Cannot assign to final {kind} "{unmangle(name)}"', ctx)
def protocol_members_cant_be_final(self, ctx: Context) -> None:
self.fail("Protocol member cannot be final", ctx)
def final_without_value(self, ctx: Context) -> None:
self.fail("Final name must be initialized with a value", ctx)
def read_only_property(self, name: str, type: TypeInfo, context: Context) -> None:
self.fail(f'Property "{name}" defined in "{type.name}" is read-only', context)
def incompatible_typevar_value(
self, callee: CallableType, typ: Type, typevar_name: str, context: Context
) -> None:
self.fail(
message_registry.INCOMPATIBLE_TYPEVAR_VALUE.format(
typevar_name, callable_name(callee) or "function", format_type(typ, self.options)
),
context,
code=codes.TYPE_VAR,
)
def dangerous_comparison(self, left: Type, right: Type, kind: str, ctx: Context) -> None:
left_str = "element" if kind == "container" else "left operand"
right_str = "container item" if kind == "container" else "right operand"
message = "Non-overlapping {} check ({} type: {}, {} type: {})"
left_typ, right_typ = format_type_distinctly(left, right, options=self.options)
self.fail(
message.format(kind, left_str, left_typ, right_str, right_typ),
ctx,
code=codes.COMPARISON_OVERLAP,
)
def overload_inconsistently_applies_decorator(self, decorator: str, context: Context) -> None:
self.fail(
f'Overload does not consistently use the "@{decorator}" '
+ "decorator on all function signatures.",
context,
)
def overloaded_signatures_overlap(
self, index1: int, index2: int, flip_note: bool, context: Context
) -> None:
self.fail(
"Overloaded function signatures {} and {} overlap with "
"incompatible return types".format(index1, index2),
context,
code=codes.OVERLOAD_OVERLAP,
)
if flip_note:
self.note(
"Flipping the order of overloads will fix this error",
context,
code=codes.OVERLOAD_OVERLAP,
)
def overloaded_signature_will_never_match(
self, index1: int, index2: int, context: Context
) -> None:
self.fail(
"Overloaded function signature {index2} will never be matched: "
"signature {index1}'s parameter type(s) are the same or broader".format(
index1=index1, index2=index2
),
context,
code=codes.OVERLOAD_CANNOT_MATCH,
)
def overloaded_signatures_typevar_specific(self, index: int, context: Context) -> None:
self.fail(
f"Overloaded function implementation cannot satisfy signature {index} "
+ "due to inconsistencies in how they use type variables",
context,
)
def overloaded_signatures_arg_specific(self, index: int, context: Context) -> None:
self.fail(
"Overloaded function implementation does not accept all possible arguments "
"of signature {}".format(index),
context,
)
def overloaded_signatures_ret_specific(self, index: int, context: Context) -> None:
self.fail(
"Overloaded function implementation cannot produce return type "
"of signature {}".format(index),
context,
)
def warn_both_operands_are_from_unions(self, context: Context) -> None:
self.note("Both left and right operands are unions", context, code=codes.OPERATOR)
def warn_operand_was_from_union(self, side: str, original: Type, context: Context) -> None:
self.note(
f"{side} operand is of type {format_type(original, self.options)}",
context,
code=codes.OPERATOR,
)
def operator_method_signatures_overlap(
self,
reverse_class: TypeInfo,
reverse_method: str,
forward_class: Type,
forward_method: str,
context: Context,
) -> None:
self.fail(
'Signatures of "{}" of "{}" and "{}" of {} '
"are unsafely overlapping".format(
reverse_method,
reverse_class.name,
forward_method,
format_type(forward_class, self.options),
),
context,
)
def forward_operator_not_callable(self, forward_method: str, context: Context) -> None:
self.fail(f'Forward operator "{forward_method}" is not callable', context)
def signatures_incompatible(self, method: str, other_method: str, context: Context) -> None:
self.fail(f'Signatures of "{method}" and "{other_method}" are incompatible', context)
def yield_from_invalid_operand_type(self, expr: Type, context: Context) -> Type:
text = (
format_type(expr, self.options)
if format_type(expr, self.options) != "object"
else expr
)
self.fail(f'"yield from" can\'t be applied to {text}', context)
return AnyType(TypeOfAny.from_error)
def invalid_signature(self, func_type: Type, context: Context) -> None:
self.fail(f"Invalid signature {format_type(func_type, self.options)}", context)
def invalid_signature_for_special_method(
self, func_type: Type, context: Context, method_name: str
) -> None:
self.fail(
f'Invalid signature {format_type(func_type, self.options)} for "{method_name}"',
context,
)
def reveal_type(self, typ: Type, context: Context) -> None:
visitor = TypeStrVisitor(options=self.options)
self.note(f'Revealed type is "{typ.accept(visitor)}"', context)
def reveal_locals(self, type_map: dict[str, Type | None], context: Context) -> None:
# To ensure that the output is predictable on Python < 3.6,
# use an ordered dictionary sorted by variable name
sorted_locals = dict(sorted(type_map.items(), key=lambda t: t[0]))
if sorted_locals:
self.note("Revealed local types are:", context)
for k, v in sorted_locals.items():
visitor = TypeStrVisitor(options=self.options)
self.note(f" {k}: {v.accept(visitor) if v is not None else None}", context)
else:
self.note("There are no locals to reveal", context)
def unsupported_type_type(self, item: Type, context: Context) -> None:
self.fail(
f'Cannot instantiate type "Type[{format_type_bare(item, self.options)}]"', context
)
def redundant_cast(self, typ: Type, context: Context) -> None:
self.fail(
f"Redundant cast to {format_type(typ, self.options)}",
context,
code=codes.REDUNDANT_CAST,
)
def assert_type_fail(self, source_type: Type, target_type: Type, context: Context) -> None:
(source, target) = format_type_distinctly(source_type, target_type, options=self.options)
self.fail(f"Expression is of type {source}, not {target}", context, code=codes.ASSERT_TYPE)
def unimported_type_becomes_any(self, prefix: str, typ: Type, ctx: Context) -> None:
self.fail(
f"{prefix} becomes {format_type(typ, self.options)} due to an unfollowed import",
ctx,
code=codes.NO_ANY_UNIMPORTED,
)
def need_annotation_for_var(
self, node: SymbolNode, context: Context, python_version: tuple[int, int] | None = None
) -> None:
hint = ""
pep604_supported = not python_version or python_version >= (3, 10)
# type to recommend the user adds
recommended_type = None
# Only gives hint if it's a variable declaration and the partial type is a builtin type
if python_version and isinstance(node, Var) and isinstance(node.type, PartialType):
type_dec = "<type>"
if not node.type.type:
# partial None
if pep604_supported:
recommended_type = f"{type_dec} | None"
else:
recommended_type = f"Optional[{type_dec}]"
elif node.type.type.fullname in reverse_builtin_aliases:
# partial types other than partial None
alias = reverse_builtin_aliases[node.type.type.fullname]
alias = alias.split(".")[-1]
if alias == "Dict":
type_dec = f"{type_dec}, {type_dec}"
if self.options.use_lowercase_names():
alias = alias.lower()
recommended_type = f"{alias}[{type_dec}]"
if recommended_type is not None:
hint = f' (hint: "{node.name}: {recommended_type} = ...")'
self.fail(
f'Need type annotation for "{unmangle(node.name)}"{hint}',
context,
code=codes.VAR_ANNOTATED,
)
def explicit_any(self, ctx: Context) -> None:
self.fail('Explicit "Any" is not allowed', ctx)
def unsupported_target_for_star_typeddict(self, typ: Type, ctx: Context) -> None:
self.fail(
"Unsupported type {} for ** expansion in TypedDict".format(
format_type(typ, self.options)
),
ctx,
code=codes.TYPEDDICT_ITEM,
)
def non_required_keys_absent_with_star(self, keys: list[str], ctx: Context) -> None:
self.fail(
"Non-required {} not explicitly found in any ** item".format(
format_key_list(keys, short=True)
),
ctx,
code=codes.TYPEDDICT_ITEM,
)
def unexpected_typeddict_keys(
self,
typ: TypedDictType,
expected_keys: list[str],
actual_keys: list[str],
context: Context,
) -> None:
actual_set = set(actual_keys)
expected_set = set(expected_keys)
if not typ.is_anonymous():
# Generate simpler messages for some common special cases.
# Use list comprehension instead of set operations to preserve order.
missing = [key for key in expected_keys if key not in actual_set]
if missing:
self.fail(
"Missing {} for TypedDict {}".format(
format_key_list(missing, short=True), format_type(typ, self.options)
),
context,
code=codes.TYPEDDICT_ITEM,
)
extra = [key for key in actual_keys if key not in expected_set]
if extra:
self.fail(
"Extra {} for TypedDict {}".format(
format_key_list(extra, short=True), format_type(typ, self.options)
),
context,
code=codes.TYPEDDICT_UNKNOWN_KEY,
)
if missing or extra:
# No need to check for further errors
return
found = format_key_list(actual_keys, short=True)
if not expected_keys:
self.fail(f"Unexpected TypedDict {found}", context)
return
expected = format_key_list(expected_keys)
if actual_keys and actual_set < expected_set:
found = f"only {found}"
self.fail(f"Expected {expected} but found {found}", context, code=codes.TYPEDDICT_ITEM)
def typeddict_key_must_be_string_literal(self, typ: TypedDictType, context: Context) -> None:
self.fail(
"TypedDict key must be a string literal; expected one of {}".format(
format_item_name_list(typ.items.keys())
),
context,
code=codes.LITERAL_REQ,
)
def typeddict_key_not_found(
self, typ: TypedDictType, item_name: str, context: Context, setitem: bool = False
) -> None:
"""Handle error messages for TypedDicts that have unknown keys.
Note, that we differentiate in between reading a value and setting a
value.
Setting a value on a TypedDict is an 'unknown-key' error, whereas
reading it is the more serious/general 'item' error.
"""
if typ.is_anonymous():
self.fail(
'"{}" is not a valid TypedDict key; expected one of {}'.format(
item_name, format_item_name_list(typ.items.keys())
),
context,
)
else:
err_code = codes.TYPEDDICT_UNKNOWN_KEY if setitem else codes.TYPEDDICT_ITEM
self.fail(
f'TypedDict {format_type(typ, self.options)} has no key "{item_name}"',
context,
code=err_code,
)
matches = best_matches(item_name, typ.items.keys(), n=3)
if matches:
self.note(
"Did you mean {}?".format(pretty_seq(matches, "or")), context, code=err_code
)
def typeddict_context_ambiguous(self, types: list[TypedDictType], context: Context) -> None:
formatted_types = ", ".join(list(format_type_distinctly(*types, options=self.options)))
self.fail(
f"Type of TypedDict is ambiguous, none of ({formatted_types}) matches cleanly", context
)
def typeddict_key_cannot_be_deleted(
self, typ: TypedDictType, item_name: str, context: Context
) -> None:
if typ.is_anonymous():
self.fail(f'TypedDict key "{item_name}" cannot be deleted', context)
else:
self.fail(
f'Key "{item_name}" of TypedDict {format_type(typ, self.options)} cannot be deleted',
context,
)
def typeddict_setdefault_arguments_inconsistent(
self, default: Type, expected: Type, context: Context
) -> None:
msg = 'Argument 2 to "setdefault" of "TypedDict" has incompatible type {}; expected {}'
self.fail(
msg.format(format_type(default, self.options), format_type(expected, self.options)),
context,
code=codes.TYPEDDICT_ITEM,
)
def type_arguments_not_allowed(self, context: Context) -> None:
self.fail("Parameterized generics cannot be used with class or instance checks", context)
def disallowed_any_type(self, typ: Type, context: Context) -> None:
typ = get_proper_type(typ)
if isinstance(typ, AnyType):
message = 'Expression has type "Any"'
else:
message = f'Expression type contains "Any" (has type {format_type(typ, self.options)})'
self.fail(message, context)
def incorrectly_returning_any(self, typ: Type, context: Context) -> None:
message = (
f"Returning Any from function declared to return {format_type(typ, self.options)}"
)
self.fail(message, context, code=codes.NO_ANY_RETURN)
def incorrect__exit__return(self, context: Context) -> None:
self.fail(
'"bool" is invalid as return type for "__exit__" that always returns False',
context,
code=codes.EXIT_RETURN,
)
self.note(
'Use "typing_extensions.Literal[False]" as the return type or change it to "None"',
context,
code=codes.EXIT_RETURN,
)
self.note(
'If return type of "__exit__" implies that it may return True, '
"the context manager may swallow exceptions",
context,
code=codes.EXIT_RETURN,
)
def untyped_decorated_function(self, typ: Type, context: Context) -> None:
typ = get_proper_type(typ)
if isinstance(typ, AnyType):
self.fail("Function is untyped after decorator transformation", context)
else:
self.fail(
f'Type of decorated function contains type "Any" ({format_type(typ, self.options)})',
context,
)
def typed_function_untyped_decorator(self, func_name: str, context: Context) -> None:
self.fail(f'Untyped decorator makes function "{func_name}" untyped', context)
def bad_proto_variance(
self, actual: int, tvar_name: str, expected: int, context: Context
) -> None:
msg = capitalize(
'{} type variable "{}" used in protocol where'
" {} one is expected".format(
variance_string(actual), tvar_name, variance_string(expected)
)
)
self.fail(msg, context)
def concrete_only_assign(self, typ: Type, context: Context) -> None:
self.fail(
f"Can only assign concrete classes to a variable of type {format_type(typ, self.options)}",
context,
code=codes.TYPE_ABSTRACT,
)
def concrete_only_call(self, typ: Type, context: Context) -> None:
self.fail(
f"Only concrete class can be given where {format_type(typ, self.options)} is expected",
context,
code=codes.TYPE_ABSTRACT,
)
def cannot_use_function_with_type(
self, method_name: str, type_name: str, context: Context
) -> None:
self.fail(f"Cannot use {method_name}() with {type_name} type", context)
def report_non_method_protocol(
self, tp: TypeInfo, members: list[str], context: Context
) -> None:
self.fail(
"Only protocols that don't have non-method members can be used with issubclass()",
context,
)
if len(members) < 3:
attrs = ", ".join(members)
self.note(f'Protocol "{tp.name}" has non-method member(s): {attrs}', context)
def note_call(
self, subtype: Type, call: Type, context: Context, *, code: ErrorCode | None
) -> None:
self.note(
'"{}.__call__" has type {}'.format(
format_type_bare(subtype, self.options),
format_type(call, self.options, verbosity=1),
),
context,
code=code,
)
def unreachable_statement(self, context: Context) -> None:
self.fail("Statement is unreachable", context, code=codes.UNREACHABLE)
def redundant_left_operand(self, op_name: str, context: Context) -> None:
"""Indicates that the left operand of a boolean expression is redundant:
it does not change the truth value of the entire condition as a whole.
'op_name' should either be the string "and" or the string "or".
"""
self.redundant_expr(f'Left operand of "{op_name}"', op_name == "and", context)
def unreachable_right_operand(self, op_name: str, context: Context) -> None:
"""Indicates that the right operand of a boolean expression is redundant:
it does not change the truth value of the entire condition as a whole.
'op_name' should either be the string "and" or the string "or".
"""
self.fail(
f'Right operand of "{op_name}" is never evaluated', context, code=codes.UNREACHABLE
)
def redundant_condition_in_comprehension(self, truthiness: bool, context: Context) -> None:
self.redundant_expr("If condition in comprehension", truthiness, context)
def redundant_condition_in_if(self, truthiness: bool, context: Context) -> None:
self.redundant_expr("If condition", truthiness, context)
def redundant_expr(self, description: str, truthiness: bool, context: Context) -> None:
self.fail(
f"{description} is always {str(truthiness).lower()}",
context,
code=codes.REDUNDANT_EXPR,
)
def impossible_intersection(
self, formatted_base_class_list: str, reason: str, context: Context
) -> None:
template = "Subclass of {} cannot exist: {}"
self.fail(
template.format(formatted_base_class_list, reason), context, code=codes.UNREACHABLE
)
def tvar_without_default_type(
self, tvar_name: str, last_tvar_name_with_default: str, context: Context
) -> None:
self.fail(
f'"{tvar_name}" cannot appear after "{last_tvar_name_with_default}" '
"in type parameter list because it has no default type",
context,
)
def report_protocol_problems(
self,
subtype: Instance | TupleType | TypedDictType | TypeType | CallableType,
supertype: Instance,
context: Context,
*,
code: ErrorCode | None,
) -> None:
"""Report possible protocol conflicts between 'subtype' and 'supertype'.
This includes missing members, incompatible types, and incompatible
attribute flags, such as settable vs read-only or class variable vs
instance variable.
"""
OFFSET = 4 # Four spaces, so that notes will look like this:
# note: 'Cls' is missing following 'Proto' members:
# note: method, attr
MAX_ITEMS = 2 # Maximum number of conflicts, missing members, and overloads shown
# List of special situations where we don't want to report additional problems
exclusions: dict[type, list[str]] = {
TypedDictType: ["typing.Mapping"],
TupleType: ["typing.Iterable", "typing.Sequence"],
}
if supertype.type.fullname in exclusions.get(type(subtype), []):
return
if any(isinstance(tp, UninhabitedType) for tp in get_proper_types(supertype.args)):
# We don't want to add notes for failed inference (e.g. Iterable[Never]).
# This will be only confusing a user even more.
return
class_obj = False
is_module = False
skip = []
if isinstance(subtype, TupleType):
if not isinstance(subtype.partial_fallback, Instance):
return
subtype = subtype.partial_fallback
elif isinstance(subtype, TypedDictType):
if not isinstance(subtype.fallback, Instance):
return
subtype = subtype.fallback
elif isinstance(subtype, TypeType):
if not isinstance(subtype.item, Instance):
return
class_obj = True
subtype = subtype.item
elif isinstance(subtype, CallableType):
if subtype.is_type_obj():
ret_type = get_proper_type(subtype.ret_type)
if isinstance(ret_type, TupleType):
ret_type = ret_type.partial_fallback
if not isinstance(ret_type, Instance):
return
class_obj = True
subtype = ret_type
else:
subtype = subtype.fallback
skip = ["__call__"]
if subtype.extra_attrs and subtype.extra_attrs.mod_name:
is_module = True
# Report missing members
missing = get_missing_protocol_members(subtype, supertype, skip=skip)
if (
missing
and (len(missing) < len(supertype.type.protocol_members) or missing == ["__call__"])
and len(missing) <= MAX_ITEMS
):
if missing == ["__call__"] and class_obj:
self.note(
'"{}" has constructor incompatible with "__call__" of "{}"'.format(
subtype.type.name, supertype.type.name
),
context,
code=code,
)
else:
self.note(
'"{}" is missing following "{}" protocol member{}:'.format(
subtype.type.name, supertype.type.name, plural_s(missing)
),
context,
code=code,
)
self.note(", ".join(missing), context, offset=OFFSET, code=code)
elif len(missing) > MAX_ITEMS or len(missing) == len(supertype.type.protocol_members):
# This is an obviously wrong type: too many missing members
return
# Report member type conflicts
conflict_types = get_conflict_protocol_types(
subtype, supertype, class_obj=class_obj, options=self.options
)
if conflict_types and (
not is_subtype(subtype, erase_type(supertype), options=self.options)
or not subtype.type.defn.type_vars
or not supertype.type.defn.type_vars
# Always show detailed message for ParamSpec
or subtype.type.has_param_spec_type
or supertype.type.has_param_spec_type
):
type_name = format_type(subtype, self.options, module_names=True)
self.note(f"Following member(s) of {type_name} have conflicts:", context, code=code)
for name, got, exp in conflict_types[:MAX_ITEMS]:
exp = get_proper_type(exp)
got = get_proper_type(got)
if not isinstance(exp, (CallableType, Overloaded)) or not isinstance(
got, (CallableType, Overloaded)
):
self.note(
"{}: expected {}, got {}".format(
name, *format_type_distinctly(exp, got, options=self.options)
),
context,
offset=OFFSET,
code=code,
)
else:
self.note("Expected:", context, offset=OFFSET, code=code)
if isinstance(exp, CallableType):
self.note(
pretty_callable(exp, self.options, skip_self=class_obj or is_module),
context,
offset=2 * OFFSET,
code=code,
)
else:
assert isinstance(exp, Overloaded)
self.pretty_overload(
exp, context, 2 * OFFSET, code=code, skip_self=class_obj or is_module
)
self.note("Got:", context, offset=OFFSET, code=code)
if isinstance(got, CallableType):
self.note(
pretty_callable(got, self.options, skip_self=class_obj or is_module),
context,
offset=2 * OFFSET,
code=code,
)
else:
assert isinstance(got, Overloaded)
self.pretty_overload(
got, context, 2 * OFFSET, code=code, skip_self=class_obj or is_module
)
self.print_more(conflict_types, context, OFFSET, MAX_ITEMS, code=code)
# Report flag conflicts (i.e. settable vs read-only etc.)
conflict_flags = get_bad_protocol_flags(subtype, supertype, class_obj=class_obj)
for name, subflags, superflags in conflict_flags[:MAX_ITEMS]:
if not class_obj and IS_CLASSVAR in subflags and IS_CLASSVAR not in superflags:
self.note(
"Protocol member {}.{} expected instance variable,"
" got class variable".format(supertype.type.name, name),
context,
code=code,
)
if not class_obj and IS_CLASSVAR in superflags and IS_CLASSVAR not in subflags:
self.note(
"Protocol member {}.{} expected class variable,"
" got instance variable".format(supertype.type.name, name),
context,
code=code,
)
if IS_SETTABLE in superflags and IS_SETTABLE not in subflags:
self.note(
"Protocol member {}.{} expected settable variable,"
" got read-only attribute".format(supertype.type.name, name),
context,
code=code,
)
if IS_CLASS_OR_STATIC in superflags and IS_CLASS_OR_STATIC not in subflags:
self.note(
"Protocol member {}.{} expected class or static method".format(
supertype.type.name, name
),
context,
code=code,
)
if (
class_obj
and IS_VAR in superflags
and (IS_VAR in subflags and IS_CLASSVAR not in subflags)
):
self.note(
"Only class variables allowed for class object access on protocols,"
' {} is an instance variable of "{}"'.format(name, subtype.type.name),
context,
code=code,
)
if class_obj and IS_CLASSVAR in superflags:
self.note(
"ClassVar protocol member {}.{} can never be matched by a class object".format(
supertype.type.name, name
),
context,
code=code,
)
self.print_more(conflict_flags, context, OFFSET, MAX_ITEMS, code=code)
def pretty_overload(
self,
tp: Overloaded,
context: Context,
offset: int,
*,
add_class_or_static_decorator: bool = False,
allow_dups: bool = False,
code: ErrorCode | None = None,
skip_self: bool = False,
) -> None:
for item in tp.items:
self.note("@overload", context, offset=offset, allow_dups=allow_dups, code=code)
if add_class_or_static_decorator:
decorator = pretty_class_or_static_decorator(item)
if decorator is not None:
self.note(decorator, context, offset=offset, allow_dups=allow_dups, code=code)
self.note(
pretty_callable(item, self.options, skip_self=skip_self),
context,
offset=offset,
allow_dups=allow_dups,
code=code,
)
def print_more(
self,
conflicts: Sequence[Any],
context: Context,
offset: int,
max_items: int,
*,
code: ErrorCode | None = None,
) -> None:
if len(conflicts) > max_items:
self.note(
f"<{len(conflicts) - max_items} more conflict(s) not shown>",
context,
offset=offset,
code=code,
)
def try_report_long_tuple_assignment_error(
self,
subtype: ProperType,
supertype: ProperType,
context: Context,
msg: message_registry.ErrorMessage,
subtype_label: str | None = None,
supertype_label: str | None = None,
) -> bool:
"""Try to generate meaningful error message for very long tuple assignment
Returns a bool: True when generating long tuple assignment error,
False when no such error reported
"""
if isinstance(subtype, TupleType):
if (
len(subtype.items) > MAX_TUPLE_ITEMS
and isinstance(supertype, Instance)
and supertype.type.fullname == "builtins.tuple"
):
lhs_type = supertype.args[0]
lhs_types = [lhs_type] * len(subtype.items)
self.generate_incompatible_tuple_error(lhs_types, subtype.items, context, msg)
return True
elif isinstance(supertype, TupleType) and (
len(subtype.items) > MAX_TUPLE_ITEMS or len(supertype.items) > MAX_TUPLE_ITEMS
):
if len(subtype.items) != len(supertype.items):
if supertype_label is not None and subtype_label is not None:
msg = msg.with_additional_msg(
" ({} {}, {} {})".format(
subtype_label,
self.format_long_tuple_type(subtype),
supertype_label,
self.format_long_tuple_type(supertype),
)
)
self.fail(msg.value, context, code=msg.code)
return True
self.generate_incompatible_tuple_error(
supertype.items, subtype.items, context, msg
)
return True
return False
def format_long_tuple_type(self, typ: TupleType) -> str:
"""Format very long tuple type using an ellipsis notation"""
item_cnt = len(typ.items)
if item_cnt > MAX_TUPLE_ITEMS:
return "{}[{}, {}, ... <{} more items>]".format(
"tuple" if self.options.use_lowercase_names() else "Tuple",
format_type_bare(typ.items[0], self.options),
format_type_bare(typ.items[1], self.options),
str(item_cnt - 2),
)
else:
return format_type_bare(typ, self.options)
def generate_incompatible_tuple_error(
self,
lhs_types: list[Type],
rhs_types: list[Type],
context: Context,
msg: message_registry.ErrorMessage,
) -> None:
"""Generate error message for individual incompatible tuple pairs"""
error_cnt = 0
notes: list[str] = []
for i, (lhs_t, rhs_t) in enumerate(zip(lhs_types, rhs_types)):
if not is_subtype(lhs_t, rhs_t):
if error_cnt < 3:
notes.append(
"Expression tuple item {} has type {}; {} expected; ".format(
str(i),
format_type(rhs_t, self.options),
format_type(lhs_t, self.options),
)
)
error_cnt += 1
info = f" ({str(error_cnt)} tuple items are incompatible"
if error_cnt - 3 > 0:
info += f"; {str(error_cnt - 3)} items are omitted)"
else:
info += ")"
msg = msg.with_additional_msg(info)
self.fail(msg.value, context, code=msg.code)
for note in notes:
self.note(note, context, code=msg.code)
def add_fixture_note(self, fullname: str, ctx: Context) -> None:
self.note(f'Maybe your test fixture does not define "{fullname}"?', ctx)
if fullname in SUGGESTED_TEST_FIXTURES:
self.note(
"Consider adding [builtins fixtures/{}] to your test description".format(
SUGGESTED_TEST_FIXTURES[fullname]
),
ctx,
)
def annotation_in_unchecked_function(self, context: Context) -> None:
self.note(
"By default the bodies of untyped functions are not checked,"
" consider using --check-untyped-defs",
context,
code=codes.ANNOTATION_UNCHECKED,
)
def type_parameters_should_be_declared(self, undeclared: list[str], context: Context) -> None:
names = ", ".join('"' + n + '"' for n in undeclared)
self.fail(
message_registry.TYPE_PARAMETERS_SHOULD_BE_DECLARED.format(names),
context,
code=codes.VALID_TYPE,
)
def quote_type_string(type_string: str) -> str:
"""Quotes a type representation for use in messages."""
no_quote_regex = r"^<(tuple|union): \d+ items>$"
if (
type_string in ["Module", "overloaded function", "<deleted>"]
or type_string.startswith("Module ")
or re.match(no_quote_regex, type_string) is not None
or type_string.endswith("?")
):
# Messages are easier to read if these aren't quoted. We use a
# regex to match strings with variable contents.
return type_string
return f'"{type_string}"'
def format_callable_args(
arg_types: list[Type],
arg_kinds: list[ArgKind],
arg_names: list[str | None],
format: Callable[[Type], str],
verbosity: int,
) -> str:
"""Format a bunch of Callable arguments into a string"""
arg_strings = []
for arg_name, arg_type, arg_kind in zip(arg_names, arg_types, arg_kinds):
if arg_kind == ARG_POS and arg_name is None or verbosity == 0 and arg_kind.is_positional():
arg_strings.append(format(arg_type))
else:
constructor = ARG_CONSTRUCTOR_NAMES[arg_kind]
if arg_kind.is_star() or arg_name is None:
arg_strings.append(f"{constructor}({format(arg_type)})")
else:
arg_strings.append(f"{constructor}({format(arg_type)}, {repr(arg_name)})")
return ", ".join(arg_strings)
def format_type_inner(
typ: Type,
verbosity: int,
options: Options,
fullnames: set[str] | None,
module_names: bool = False,
) -> str:
"""
Convert a type to a relatively short string suitable for error messages.
Args:
verbosity: a coarse grained control on the verbosity of the type
fullnames: a set of names that should be printed in full
"""
def format(typ: Type) -> str:
return format_type_inner(typ, verbosity, options, fullnames)
def format_list(types: Sequence[Type]) -> str:
return ", ".join(format(typ) for typ in types)
def format_union_items(types: Sequence[Type]) -> list[str]:
formatted = [format(typ) for typ in types if format(typ) != "None"]
if len(formatted) > MAX_UNION_ITEMS and verbosity == 0:
more = len(formatted) - MAX_UNION_ITEMS // 2
formatted = formatted[: MAX_UNION_ITEMS // 2]
else:
more = 0
if more:
formatted.append(f"<{more} more items>")
if any(format(typ) == "None" for typ in types):
formatted.append("None")
return formatted
def format_union(types: Sequence[Type]) -> str:
return " | ".join(format_union_items(types))
def format_literal_value(typ: LiteralType) -> str:
if typ.is_enum_literal():
underlying_type = format(typ.fallback)
return f"{underlying_type}.{typ.value}"
else:
return typ.value_repr()
if isinstance(typ, TypeAliasType) and typ.is_recursive:
if typ.alias is None:
type_str = "<alias (unfixed)>"
else:
if verbosity >= 2 or (fullnames and typ.alias.fullname in fullnames):
type_str = typ.alias.fullname
else:
type_str = typ.alias.name
if typ.args:
type_str += f"[{format_list(typ.args)}]"
return type_str
# TODO: always mention type alias names in errors.
typ = get_proper_type(typ)
if isinstance(typ, Instance):
itype = typ
# Get the short name of the type.
if itype.type.fullname == "types.ModuleType":
# Make some common error messages simpler and tidier.
base_str = "Module"
if itype.extra_attrs and itype.extra_attrs.mod_name and module_names:
return f'{base_str} "{itype.extra_attrs.mod_name}"'
return base_str
if itype.type.fullname == "typing._SpecialForm":
# This is not a real type but used for some typing-related constructs.
return "<typing special form>"
if itype.type.fullname in reverse_builtin_aliases and not options.use_lowercase_names():
alias = reverse_builtin_aliases[itype.type.fullname]
base_str = alias.split(".")[-1]
elif verbosity >= 2 or (fullnames and itype.type.fullname in fullnames):
base_str = itype.type.fullname
else:
base_str = itype.type.name
if not itype.args:
if itype.type.has_type_var_tuple_type and len(itype.type.type_vars) == 1:
return base_str + "[()]"
# No type arguments, just return the type name
return base_str
elif itype.type.fullname == "builtins.tuple":
item_type_str = format(itype.args[0])
return f"{'tuple' if options.use_lowercase_names() else 'Tuple'}[{item_type_str}, ...]"
else:
# There are type arguments. Convert the arguments to strings.
return f"{base_str}[{format_list(itype.args)}]"
elif isinstance(typ, UnpackType):
if options.use_star_unpack():
return f"*{format(typ.type)}"
return f"Unpack[{format(typ.type)}]"
elif isinstance(typ, TypeVarType):
# This is similar to non-generic instance types.
fullname = scoped_type_var_name(typ)
if verbosity >= 2 or (fullnames and fullname in fullnames):
return fullname
return typ.name
elif isinstance(typ, TypeVarTupleType):
# This is similar to non-generic instance types.
fullname = scoped_type_var_name(typ)
if verbosity >= 2 or (fullnames and fullname in fullnames):
return fullname
return typ.name
elif isinstance(typ, ParamSpecType):
# Concatenate[..., P]
if typ.prefix.arg_types:
args = format_callable_args(
typ.prefix.arg_types, typ.prefix.arg_kinds, typ.prefix.arg_names, format, verbosity
)
return f"[{args}, **{typ.name_with_suffix()}]"
else:
# TODO: better disambiguate ParamSpec name clashes.
return typ.name_with_suffix()
elif isinstance(typ, TupleType):
# Prefer the name of the fallback class (if not tuple), as it's more informative.
if typ.partial_fallback.type.fullname != "builtins.tuple":
return format(typ.partial_fallback)
type_items = format_list(typ.items) or "()"
if options.use_lowercase_names():
s = f"tuple[{type_items}]"
else:
s = f"Tuple[{type_items}]"
return s
elif isinstance(typ, TypedDictType):
# If the TypedDictType is named, return the name
if not typ.is_anonymous():
return format(typ.fallback)
items = []
for item_name, item_type in typ.items.items():
modifier = ""
if item_name not in typ.required_keys:
modifier += "?"
if item_name in typ.readonly_keys:
modifier += "="
items.append(f"{item_name!r}{modifier}: {format(item_type)}")
return f"TypedDict({{{', '.join(items)}}})"
elif isinstance(typ, LiteralType):
return f"Literal[{format_literal_value(typ)}]"
elif isinstance(typ, UnionType):
typ = get_proper_type(ignore_last_known_values(typ))
if not isinstance(typ, UnionType):
return format(typ)
literal_items, union_items = separate_union_literals(typ)
# Coalesce multiple Literal[] members. This also changes output order.
# If there's just one Literal item, retain the original ordering.
if len(literal_items) > 1:
literal_str = "Literal[{}]".format(
", ".join(format_literal_value(t) for t in literal_items)
)
if len(union_items) == 1 and isinstance(get_proper_type(union_items[0]), NoneType):
return (
f"{literal_str} | None"
if options.use_or_syntax()
else f"Optional[{literal_str}]"
)
elif union_items:
return (
f"{literal_str} | {format_union(union_items)}"
if options.use_or_syntax()
else f"Union[{', '.join(format_union_items(union_items))}, {literal_str}]"
)
else:
return literal_str
else:
# Only print Union as Optional if the Optional wouldn't have to contain another Union
print_as_optional = (
len(typ.items) - sum(isinstance(get_proper_type(t), NoneType) for t in typ.items)
== 1
)
if print_as_optional:
rest = [t for t in typ.items if not isinstance(get_proper_type(t), NoneType)]
return (
f"{format(rest[0])} | None"
if options.use_or_syntax()
else f"Optional[{format(rest[0])}]"
)
else:
s = (
format_union(typ.items)
if options.use_or_syntax()
else f"Union[{', '.join(format_union_items(typ.items))}]"
)
return s
elif isinstance(typ, NoneType):
return "None"
elif isinstance(typ, AnyType):
return "Any"
elif isinstance(typ, DeletedType):
return "<deleted>"
elif isinstance(typ, UninhabitedType):
return "Never"
elif isinstance(typ, TypeType):
type_name = "type" if options.use_lowercase_names() else "Type"
return f"{type_name}[{format(typ.item)}]"
elif isinstance(typ, FunctionLike):
func = typ
if func.is_type_obj():
# The type of a type object type can be derived from the
# return type (this always works).
return format(TypeType.make_normalized(erase_type(func.items[0].ret_type)))
elif isinstance(func, CallableType):
if func.type_guard is not None:
return_type = f"TypeGuard[{format(func.type_guard)}]"
elif func.type_is is not None:
return_type = f"TypeIs[{format(func.type_is)}]"
else:
return_type = format(func.ret_type)
if func.is_ellipsis_args:
return f"Callable[..., {return_type}]"
param_spec = func.param_spec()
if param_spec is not None:
return f"Callable[{format(param_spec)}, {return_type}]"
args = format_callable_args(
func.arg_types, func.arg_kinds, func.arg_names, format, verbosity
)
return f"Callable[[{args}], {return_type}]"
else:
# Use a simple representation for function types; proper
# function types may result in long and difficult-to-read
# error messages.
return "overloaded function"
elif isinstance(typ, UnboundType):
return typ.accept(TypeStrVisitor(options=options))
elif isinstance(typ, Parameters):
args = format_callable_args(typ.arg_types, typ.arg_kinds, typ.arg_names, format, verbosity)
return f"[{args}]"
elif typ is None:
raise RuntimeError("Type is None")
else:
# Default case; we simply have to return something meaningful here.
return "object"
def collect_all_named_types(t: Type) -> list[Type]:
"""Return all instances/aliases/type variables that `t` contains (including `t`).
This is similar to collect_all_inner_types from typeanal but only
returns instances and will recurse into fallbacks.
"""
visitor = CollectAllNamedTypesQuery()
t.accept(visitor)
return visitor.types
class CollectAllNamedTypesQuery(TypeTraverserVisitor):
def __init__(self) -> None:
self.types: list[Type] = []
def visit_instance(self, t: Instance) -> None:
self.types.append(t)
super().visit_instance(t)
def visit_type_alias_type(self, t: TypeAliasType) -> None:
if t.alias and not t.is_recursive:
get_proper_type(t).accept(self)
else:
self.types.append(t)
super().visit_type_alias_type(t)
def visit_type_var(self, t: TypeVarType) -> None:
self.types.append(t)
super().visit_type_var(t)
def visit_type_var_tuple(self, t: TypeVarTupleType) -> None:
self.types.append(t)
super().visit_type_var_tuple(t)
def visit_param_spec(self, t: ParamSpecType) -> None:
self.types.append(t)
super().visit_param_spec(t)
def scoped_type_var_name(t: TypeVarLikeType) -> str:
if not t.id.namespace:
return t.name
# TODO: support rare cases when both TypeVar name and namespace suffix coincide.
*_, suffix = t.id.namespace.split(".")
return f"{t.name}@{suffix}"
def find_type_overlaps(*types: Type) -> set[str]:
"""Return a set of fullnames that share a short name and appear in either type.
This is used to ensure that distinct types with the same short name are printed
with their fullname.
"""
d: dict[str, set[str]] = {}
for type in types:
for t in collect_all_named_types(type):
if isinstance(t, ProperType) and isinstance(t, Instance):
d.setdefault(t.type.name, set()).add(t.type.fullname)
elif isinstance(t, TypeAliasType) and t.alias:
d.setdefault(t.alias.name, set()).add(t.alias.fullname)
else:
assert isinstance(t, TypeVarLikeType)
d.setdefault(t.name, set()).add(scoped_type_var_name(t))
for shortname in d.keys():
if f"typing.{shortname}" in TYPES_FOR_UNIMPORTED_HINTS:
d[shortname].add(f"typing.{shortname}")
overlaps: set[str] = set()
for fullnames in d.values():
if len(fullnames) > 1:
overlaps.update(fullnames)
return overlaps
def format_type(
typ: Type, options: Options, verbosity: int = 0, module_names: bool = False
) -> str:
"""
Convert a type to a relatively short string suitable for error messages.
`verbosity` is a coarse-grained control on the verbosity of the type
This function returns a string appropriate for unmodified use in error
messages; this means that it will be quoted in most cases. If
modification of the formatted string is required, callers should use
format_type_bare.
"""
return quote_type_string(format_type_bare(typ, options, verbosity, module_names))
def format_type_bare(
typ: Type, options: Options, verbosity: int = 0, module_names: bool = False
) -> str:
"""
Convert a type to a relatively short string suitable for error messages.
`verbosity` is a coarse-grained control on the verbosity of the type
`fullnames` specifies a set of names that should be printed in full
This function will return an unquoted string. If a caller doesn't need to
perform post-processing on the string output, format_type should be used
instead. (The caller may want to use quote_type_string after
processing has happened, to maintain consistent quoting in messages.)
"""
return format_type_inner(typ, verbosity, options, find_type_overlaps(typ), module_names)
def format_type_distinctly(*types: Type, options: Options, bare: bool = False) -> tuple[str, ...]:
"""Jointly format types to distinct strings.
Increase the verbosity of the type strings until they become distinct
while also requiring that distinct types with the same short name are
formatted distinctly.
By default, the returned strings are created using format_type() and will be
quoted accordingly. If ``bare`` is True, the returned strings will not
be quoted; callers who need to do post-processing of the strings before
quoting them (such as prepending * or **) should use this.
"""
overlapping = find_type_overlaps(*types)
for verbosity in range(2):
strs = [
format_type_inner(type, verbosity=verbosity, options=options, fullnames=overlapping)
for type in types
]
if len(set(strs)) == len(strs):
break
if bare:
return tuple(strs)
else:
return tuple(quote_type_string(s) for s in strs)
def pretty_class_or_static_decorator(tp: CallableType) -> str | None:
"""Return @classmethod or @staticmethod, if any, for the given callable type."""
if tp.definition is not None and isinstance(tp.definition, SYMBOL_FUNCBASE_TYPES):
if tp.definition.is_class:
return "@classmethod"
if tp.definition.is_static:
return "@staticmethod"
return None
def pretty_callable(tp: CallableType, options: Options, skip_self: bool = False) -> str:
"""Return a nice easily-readable representation of a callable type.
For example:
def [T <: int] f(self, x: int, y: T) -> None
If skip_self is True, print an actual callable type, as it would appear
when bound on an instance/class, rather than how it would appear in the
defining statement.
"""
s = ""
asterisk = False
slash = False
for i in range(len(tp.arg_types)):
if s:
s += ", "
if tp.arg_kinds[i].is_named() and not asterisk:
s += "*, "
asterisk = True
if tp.arg_kinds[i] == ARG_STAR:
s += "*"
asterisk = True
if tp.arg_kinds[i] == ARG_STAR2:
s += "**"
name = tp.arg_names[i]
if name:
s += name + ": "
type_str = format_type_bare(tp.arg_types[i], options)
if tp.arg_kinds[i] == ARG_STAR2 and tp.unpack_kwargs:
type_str = f"Unpack[{type_str}]"
s += type_str
if tp.arg_kinds[i].is_optional():
s += " = ..."
if (
not slash
and tp.arg_kinds[i].is_positional()
and name is None
and (
i == len(tp.arg_types) - 1
or (tp.arg_names[i + 1] is not None or not tp.arg_kinds[i + 1].is_positional())
)
):
s += ", /"
slash = True
# If we got a "special arg" (i.e: self, cls, etc...), prepend it to the arg list
if (
isinstance(tp.definition, FuncDef)
and hasattr(tp.definition, "arguments")
and not tp.from_concatenate
):
definition_arg_names = [arg.variable.name for arg in tp.definition.arguments]
if (
len(definition_arg_names) > len(tp.arg_names)
and definition_arg_names[0]
and not skip_self
):
if s:
s = ", " + s
s = definition_arg_names[0] + s
s = f"{tp.definition.name}({s})"
elif tp.name:
first_arg = tp.def_extras.get("first_arg")
if first_arg:
if s:
s = ", " + s
s = first_arg + s
s = f"{tp.name.split()[0]}({s})" # skip "of Class" part
else:
s = f"({s})"
s += " -> "
if tp.type_guard is not None:
s += f"TypeGuard[{format_type_bare(tp.type_guard, options)}]"
elif tp.type_is is not None:
s += f"TypeIs[{format_type_bare(tp.type_is, options)}]"
else:
s += format_type_bare(tp.ret_type, options)
if tp.variables:
tvars = []
for tvar in tp.variables:
if isinstance(tvar, TypeVarType):
upper_bound = get_proper_type(tvar.upper_bound)
if not (
isinstance(upper_bound, Instance)
and upper_bound.type.fullname == "builtins.object"
):
tvars.append(f"{tvar.name}: {format_type_bare(upper_bound, options)}")
elif tvar.values:
tvars.append(
"{}: ({})".format(
tvar.name,
", ".join([format_type_bare(tp, options) for tp in tvar.values]),
)
)
else:
tvars.append(tvar.name)
else:
# For other TypeVarLikeTypes, just use the repr
tvars.append(repr(tvar))
s = f"[{', '.join(tvars)}] {s}"
return f"def {s}"
def variance_string(variance: int) -> str:
if variance == COVARIANT:
return "covariant"
elif variance == CONTRAVARIANT:
return "contravariant"
else:
return "invariant"
def get_missing_protocol_members(left: Instance, right: Instance, skip: list[str]) -> list[str]:
"""Find all protocol members of 'right' that are not implemented
(i.e. completely missing) in 'left'.
"""
assert right.type.is_protocol
missing: list[str] = []
for member in right.type.protocol_members:
if member in skip:
continue
if not find_member(member, left, left):
missing.append(member)
return missing
def get_conflict_protocol_types(
left: Instance, right: Instance, class_obj: bool = False, options: Options | None = None
) -> list[tuple[str, Type, Type]]:
"""Find members that are defined in 'left' but have incompatible types.
Return them as a list of ('member', 'got', 'expected').
"""
assert right.type.is_protocol
conflicts: list[tuple[str, Type, Type]] = []
for member in right.type.protocol_members:
if member in ("__init__", "__new__"):
continue
supertype = find_member(member, right, left)
assert supertype is not None
subtype = mypy.typeops.get_protocol_member(left, member, class_obj)
if not subtype:
continue
is_compat = is_subtype(subtype, supertype, ignore_pos_arg_names=True, options=options)
if IS_SETTABLE in get_member_flags(member, right):
is_compat = is_compat and is_subtype(supertype, subtype, options=options)
if not is_compat:
conflicts.append((member, subtype, supertype))
return conflicts
def get_bad_protocol_flags(
left: Instance, right: Instance, class_obj: bool = False
) -> list[tuple[str, set[int], set[int]]]:
"""Return all incompatible attribute flags for members that are present in both
'left' and 'right'.
"""
assert right.type.is_protocol
all_flags: list[tuple[str, set[int], set[int]]] = []
for member in right.type.protocol_members:
if find_member(member, left, left):
item = (member, get_member_flags(member, left), get_member_flags(member, right))
all_flags.append(item)
bad_flags = []
for name, subflags, superflags in all_flags:
if (
IS_CLASSVAR in subflags
and IS_CLASSVAR not in superflags
and IS_SETTABLE in superflags
or IS_CLASSVAR in superflags
and IS_CLASSVAR not in subflags
or IS_SETTABLE in superflags
and IS_SETTABLE not in subflags
or IS_CLASS_OR_STATIC in superflags
and IS_CLASS_OR_STATIC not in subflags
or class_obj
and IS_VAR in superflags
and IS_CLASSVAR not in subflags
or class_obj
and IS_CLASSVAR in superflags
):
bad_flags.append((name, subflags, superflags))
return bad_flags
def capitalize(s: str) -> str:
"""Capitalize the first character of a string."""
if s == "":
return ""
else:
return s[0].upper() + s[1:]
def extract_type(name: str) -> str:
"""If the argument is the name of a method (of form C.m), return
the type portion in quotes (e.g. "y"). Otherwise, return the string
unmodified.
"""
name = re.sub('^"[a-zA-Z0-9_]+" of ', "", name)
return name
def strip_quotes(s: str) -> str:
"""Strip a double quote at the beginning and end of the string, if any."""
s = re.sub('^"', "", s)
s = re.sub('"$', "", s)
return s
def format_string_list(lst: list[str]) -> str:
assert lst
if len(lst) == 1:
return lst[0]
elif len(lst) <= 5:
return f"{', '.join(lst[:-1])} and {lst[-1]}"
else:
return "%s, ... and %s (%i methods suppressed)" % (
", ".join(lst[:2]),
lst[-1],
len(lst) - 3,
)
def format_item_name_list(s: Iterable[str]) -> str:
lst = list(s)
if len(lst) <= 5:
return "(" + ", ".join([f'"{name}"' for name in lst]) + ")"
else:
return "(" + ", ".join([f'"{name}"' for name in lst[:5]]) + ", ...)"
def callable_name(type: FunctionLike) -> str | None:
name = type.get_name()
if name is not None and name[0] != "<":
return f'"{name}"'.replace(" of ", '" of "')
return name
def for_function(callee: CallableType) -> str:
name = callable_name(callee)
if name is not None:
return f" for {name}"
return ""
def wrong_type_arg_count(low: int, high: int, act: str, name: str) -> str:
if low == high:
s = f"{low} type arguments"
if low == 0:
s = "no type arguments"
elif low == 1:
s = "1 type argument"
else:
s = f"between {low} and {high} type arguments"
if act == "0":
act = "none"
return f'"{name}" expects {s}, but {act} given'
def find_defining_module(modules: dict[str, MypyFile], typ: CallableType) -> MypyFile | None:
if not typ.definition:
return None
fullname = typ.definition.fullname
if "." in fullname:
for i in range(fullname.count(".")):
module_name = fullname.rsplit(".", i + 1)[0]
try:
return modules[module_name]
except KeyError:
pass
assert False, "Couldn't determine module from CallableType"
return None
# For hard-coding suggested missing member alternatives.
COMMON_MISTAKES: Final[dict[str, Sequence[str]]] = {"add": ("append", "extend")}
def _real_quick_ratio(a: str, b: str) -> float:
# this is an upper bound on difflib.SequenceMatcher.ratio
# similar to difflib.SequenceMatcher.real_quick_ratio, but faster since we don't instantiate
al = len(a)
bl = len(b)
return 2.0 * min(al, bl) / (al + bl)
def best_matches(current: str, options: Collection[str], n: int) -> list[str]:
if not current:
return []
# narrow down options cheaply
options = [o for o in options if _real_quick_ratio(current, o) > 0.75]
if len(options) >= 50:
options = [o for o in options if abs(len(o) - len(current)) <= 1]
ratios = {option: difflib.SequenceMatcher(a=current, b=option).ratio() for option in options}
options = [option for option, ratio in ratios.items() if ratio > 0.75]
return sorted(options, key=lambda v: (-ratios[v], v))[:n]
def pretty_seq(args: Sequence[str], conjunction: str) -> str:
quoted = ['"' + a + '"' for a in args]
if len(quoted) == 1:
return quoted[0]
if len(quoted) == 2:
return f"{quoted[0]} {conjunction} {quoted[1]}"
last_sep = ", " + conjunction + " "
return ", ".join(quoted[:-1]) + last_sep + quoted[-1]
def append_invariance_notes(
notes: list[str], arg_type: Instance, expected_type: Instance
) -> list[str]:
"""Explain that the type is invariant and give notes for how to solve the issue."""
invariant_type = ""
covariant_suggestion = ""
if (
arg_type.type.fullname == "builtins.list"
and expected_type.type.fullname == "builtins.list"
and is_subtype(arg_type.args[0], expected_type.args[0])
):
invariant_type = "List"
covariant_suggestion = 'Consider using "Sequence" instead, which is covariant'
elif (
arg_type.type.fullname == "builtins.dict"
and expected_type.type.fullname == "builtins.dict"
and is_same_type(arg_type.args[0], expected_type.args[0])
and is_subtype(arg_type.args[1], expected_type.args[1])
):
invariant_type = "Dict"
covariant_suggestion = (
'Consider using "Mapping" instead, which is covariant in the value type'
)
if invariant_type and covariant_suggestion:
notes.append(
f'"{invariant_type}" is invariant -- see '
+ "https://mypy.readthedocs.io/en/stable/common_issues.html#variance"
)
notes.append(covariant_suggestion)
return notes
def append_union_note(
notes: list[str], arg_type: UnionType, expected_type: UnionType, options: Options
) -> list[str]:
"""Point to specific union item(s) that may cause failure in subtype check."""
non_matching = []
items = flatten_nested_unions(arg_type.items)
if len(items) < MAX_UNION_ITEMS:
return notes
for item in items:
if not is_subtype(item, expected_type):
non_matching.append(item)
if non_matching:
types = ", ".join([format_type(typ, options) for typ in non_matching])
notes.append(f"Item{plural_s(non_matching)} in the first union not in the second: {types}")
return notes
def append_numbers_notes(
notes: list[str], arg_type: Instance, expected_type: Instance
) -> list[str]:
"""Explain if an unsupported type from "numbers" is used in a subtype check."""
if expected_type.type.fullname in UNSUPPORTED_NUMBERS_TYPES:
notes.append('Types from "numbers" aren\'t supported for static type checking')
notes.append("See https://peps.python.org/pep-0484/#the-numeric-tower")
notes.append("Consider using a protocol instead, such as typing.SupportsFloat")
return notes
def make_inferred_type_note(
context: Context, subtype: Type, supertype: Type, supertype_str: str
) -> str:
"""Explain that the user may have forgotten to type a variable.
The user does not expect an error if the inferred container type is the same as the return
type of a function and the argument type(s) are a subtype of the argument type(s) of the
return type. This note suggests that they add a type annotation with the return type instead
of relying on the inferred type.
"""
subtype = get_proper_type(subtype)
supertype = get_proper_type(supertype)
if (
isinstance(subtype, Instance)
and isinstance(supertype, Instance)
and subtype.type.fullname == supertype.type.fullname
and subtype.args
and supertype.args
and isinstance(context, ReturnStmt)
and isinstance(context.expr, NameExpr)
and isinstance(context.expr.node, Var)
and context.expr.node.is_inferred
):
for subtype_arg, supertype_arg in zip(subtype.args, supertype.args):
if not is_subtype(subtype_arg, supertype_arg):
return ""
var_name = context.expr.name
return 'Perhaps you need a type annotation for "{}"? Suggestion: {}'.format(
var_name, supertype_str
)
return ""
def format_key_list(keys: list[str], *, short: bool = False) -> str:
formatted_keys = [f'"{key}"' for key in keys]
td = "" if short else "TypedDict "
if len(keys) == 0:
return f"no {td}keys"
elif len(keys) == 1:
return f"{td}key {formatted_keys[0]}"
else:
return f"{td}keys ({', '.join(formatted_keys)})"
def ignore_last_known_values(t: UnionType) -> Type:
"""This will avoid types like str | str in error messages.
last_known_values are kept during union simplification, but may cause
weird formatting for e.g. tuples of literals.
"""
union_items: list[Type] = []
seen_instances = set()
for item in t.items:
if isinstance(item, ProperType) and isinstance(item, Instance):
erased = item.copy_modified(last_known_value=None)
if erased in seen_instances:
continue
seen_instances.add(erased)
union_items.append(erased)
else:
union_items.append(item)
return UnionType.make_union(union_items, t.line, t.column)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/messages.py
|
Python
|
NOASSERTION
| 130,191 |
"""Interfaces for accessing metadata.
We provide two implementations.
* The "classic" file system implementation, which uses a directory
structure of files.
* A hokey sqlite backed implementation, which basically simulates
the file system in an effort to work around poor file system performance
on OS X.
"""
from __future__ import annotations
import binascii
import os
import time
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Iterable
if TYPE_CHECKING:
# We avoid importing sqlite3 unless we are using it so we can mostly work
# on semi-broken pythons that are missing it.
import sqlite3
class MetadataStore:
"""Generic interface for metadata storage."""
@abstractmethod
def getmtime(self, name: str) -> float:
"""Read the mtime of a metadata entry..
Raises FileNotFound if the entry does not exist.
"""
@abstractmethod
def read(self, name: str) -> bytes:
"""Read the contents of a metadata entry.
Raises FileNotFound if the entry does not exist.
"""
@abstractmethod
def write(self, name: str, data: bytes, mtime: float | None = None) -> bool:
"""Write a metadata entry.
If mtime is specified, set it as the mtime of the entry. Otherwise,
the current time is used.
Returns True if the entry is successfully written, False otherwise.
"""
@abstractmethod
def remove(self, name: str) -> None:
"""Delete a metadata entry"""
@abstractmethod
def commit(self) -> None:
"""If the backing store requires a commit, do it.
But N.B. that this is not *guaranteed* to do anything, and
there is no guarantee that changes are not made until it is
called.
"""
@abstractmethod
def list_all(self) -> Iterable[str]: ...
def random_string() -> str:
return binascii.hexlify(os.urandom(8)).decode("ascii")
class FilesystemMetadataStore(MetadataStore):
def __init__(self, cache_dir_prefix: str) -> None:
# We check startswith instead of equality because the version
# will have already been appended by the time the cache dir is
# passed here.
if cache_dir_prefix.startswith(os.devnull):
self.cache_dir_prefix = None
else:
self.cache_dir_prefix = cache_dir_prefix
def getmtime(self, name: str) -> float:
if not self.cache_dir_prefix:
raise FileNotFoundError()
return int(os.path.getmtime(os.path.join(self.cache_dir_prefix, name)))
def read(self, name: str) -> bytes:
assert os.path.normpath(name) != os.path.abspath(name), "Don't use absolute paths!"
if not self.cache_dir_prefix:
raise FileNotFoundError()
with open(os.path.join(self.cache_dir_prefix, name), "rb") as f:
return f.read()
def write(self, name: str, data: bytes, mtime: float | None = None) -> bool:
assert os.path.normpath(name) != os.path.abspath(name), "Don't use absolute paths!"
if not self.cache_dir_prefix:
return False
path = os.path.join(self.cache_dir_prefix, name)
tmp_filename = path + "." + random_string()
try:
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(tmp_filename, "wb") as f:
f.write(data)
os.replace(tmp_filename, path)
if mtime is not None:
os.utime(path, times=(mtime, mtime))
except OSError:
return False
return True
def remove(self, name: str) -> None:
if not self.cache_dir_prefix:
raise FileNotFoundError()
os.remove(os.path.join(self.cache_dir_prefix, name))
def commit(self) -> None:
pass
def list_all(self) -> Iterable[str]:
if not self.cache_dir_prefix:
return
for dir, _, files in os.walk(self.cache_dir_prefix):
dir = os.path.relpath(dir, self.cache_dir_prefix)
for file in files:
yield os.path.join(dir, file)
SCHEMA = """
CREATE TABLE IF NOT EXISTS files2 (
path TEXT UNIQUE NOT NULL,
mtime REAL,
data BLOB
);
CREATE INDEX IF NOT EXISTS path_idx on files2(path);
"""
def connect_db(db_file: str) -> sqlite3.Connection:
import sqlite3.dbapi2
db = sqlite3.dbapi2.connect(db_file)
db.executescript(SCHEMA)
return db
class SqliteMetadataStore(MetadataStore):
def __init__(self, cache_dir_prefix: str) -> None:
# We check startswith instead of equality because the version
# will have already been appended by the time the cache dir is
# passed here.
if cache_dir_prefix.startswith(os.devnull):
self.db = None
return
os.makedirs(cache_dir_prefix, exist_ok=True)
self.db = connect_db(os.path.join(cache_dir_prefix, "cache.db"))
def _query(self, name: str, field: str) -> Any:
# Raises FileNotFound for consistency with the file system version
if not self.db:
raise FileNotFoundError()
cur = self.db.execute(f"SELECT {field} FROM files2 WHERE path = ?", (name,))
results = cur.fetchall()
if not results:
raise FileNotFoundError()
assert len(results) == 1
return results[0][0]
def getmtime(self, name: str) -> float:
mtime = self._query(name, "mtime")
assert isinstance(mtime, float)
return mtime
def read(self, name: str) -> bytes:
data = self._query(name, "data")
assert isinstance(data, bytes)
return data
def write(self, name: str, data: bytes, mtime: float | None = None) -> bool:
import sqlite3
if not self.db:
return False
try:
if mtime is None:
mtime = time.time()
self.db.execute(
"INSERT OR REPLACE INTO files2(path, mtime, data) VALUES(?, ?, ?)",
(name, mtime, data),
)
except sqlite3.OperationalError:
return False
return True
def remove(self, name: str) -> None:
if not self.db:
raise FileNotFoundError()
self.db.execute("DELETE FROM files2 WHERE path = ?", (name,))
def commit(self) -> None:
if self.db:
self.db.commit()
def list_all(self) -> Iterable[str]:
if self.db:
for row in self.db.execute("SELECT path FROM files2"):
yield row[0]
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/metastore.py
|
Python
|
NOASSERTION
| 6,554 |
from __future__ import annotations
from mypy.nodes import (
AssertTypeExpr,
AssignmentStmt,
CastExpr,
ClassDef,
ForStmt,
FuncItem,
NamedTupleExpr,
NewTypeExpr,
PromoteExpr,
TypeAliasExpr,
TypeApplication,
TypedDictExpr,
TypeVarExpr,
Var,
WithStmt,
)
from mypy.traverser import TraverserVisitor
from mypy.types import Type
from mypy.typetraverser import TypeTraverserVisitor
class MixedTraverserVisitor(TraverserVisitor, TypeTraverserVisitor):
"""Recursive traversal of both Node and Type objects."""
def __init__(self) -> None:
self.in_type_alias_expr = False
# Symbol nodes
def visit_var(self, var: Var) -> None:
self.visit_optional_type(var.type)
def visit_func(self, o: FuncItem) -> None:
super().visit_func(o)
self.visit_optional_type(o.type)
def visit_class_def(self, o: ClassDef) -> None:
# TODO: Should we visit generated methods/variables as well, either here or in
# TraverserVisitor?
super().visit_class_def(o)
info = o.info
if info:
for base in info.bases:
base.accept(self)
def visit_type_alias_expr(self, o: TypeAliasExpr) -> None:
super().visit_type_alias_expr(o)
self.in_type_alias_expr = True
o.node.target.accept(self)
self.in_type_alias_expr = False
def visit_type_var_expr(self, o: TypeVarExpr) -> None:
super().visit_type_var_expr(o)
o.upper_bound.accept(self)
for value in o.values:
value.accept(self)
def visit_typeddict_expr(self, o: TypedDictExpr) -> None:
super().visit_typeddict_expr(o)
self.visit_optional_type(o.info.typeddict_type)
def visit_namedtuple_expr(self, o: NamedTupleExpr) -> None:
super().visit_namedtuple_expr(o)
assert o.info.tuple_type
o.info.tuple_type.accept(self)
def visit__promote_expr(self, o: PromoteExpr) -> None:
super().visit__promote_expr(o)
o.type.accept(self)
def visit_newtype_expr(self, o: NewTypeExpr) -> None:
super().visit_newtype_expr(o)
self.visit_optional_type(o.old_type)
# Statements
def visit_assignment_stmt(self, o: AssignmentStmt) -> None:
super().visit_assignment_stmt(o)
self.visit_optional_type(o.type)
def visit_for_stmt(self, o: ForStmt) -> None:
super().visit_for_stmt(o)
self.visit_optional_type(o.index_type)
def visit_with_stmt(self, o: WithStmt) -> None:
super().visit_with_stmt(o)
for typ in o.analyzed_types:
typ.accept(self)
# Expressions
def visit_cast_expr(self, o: CastExpr) -> None:
super().visit_cast_expr(o)
o.type.accept(self)
def visit_assert_type_expr(self, o: AssertTypeExpr) -> None:
super().visit_assert_type_expr(o)
o.type.accept(self)
def visit_type_application(self, o: TypeApplication) -> None:
super().visit_type_application(o)
for t in o.types:
t.accept(self)
# Helpers
def visit_optional_type(self, t: Type | None) -> None:
if t:
t.accept(self)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/mixedtraverser.py
|
Python
|
NOASSERTION
| 3,205 |
"""Low-level infrastructure to find modules.
This builds on fscache.py; find_sources.py builds on top of this.
"""
from __future__ import annotations
import ast
import collections
import functools
import os
import re
import subprocess
import sys
from enum import Enum, unique
from typing import Dict, Final, List, NamedTuple, Optional, Tuple, Union
from typing_extensions import TypeAlias as _TypeAlias
from mypy import pyinfo
from mypy.errors import CompileError
from mypy.fscache import FileSystemCache
from mypy.nodes import MypyFile
from mypy.options import Options
from mypy.stubinfo import approved_stub_package_exists
from mypy.util import os_path_join
# Paths to be searched in find_module().
class SearchPaths(NamedTuple):
python_path: tuple[str, ...] # where user code is found
mypy_path: tuple[str, ...] # from $MYPYPATH or config variable
package_path: tuple[str, ...] # from get_site_packages_dirs()
typeshed_path: tuple[str, ...] # paths in typeshed
# Package dirs are a two-tuple of path to search and whether to verify the module
OnePackageDir = Tuple[str, bool]
PackageDirs = List[OnePackageDir]
# Minimum and maximum Python versions for modules in stdlib as (major, minor)
StdlibVersions: _TypeAlias = Dict[str, Tuple[Tuple[int, int], Optional[Tuple[int, int]]]]
PYTHON_EXTENSIONS: Final = [".pyi", ".py"]
# TODO: Consider adding more reasons here?
# E.g. if we deduce a module would likely be found if the user were
# to set the --namespace-packages flag.
@unique
class ModuleNotFoundReason(Enum):
# The module was not found: we found neither stubs nor a plausible code
# implementation (with or without a py.typed file).
NOT_FOUND = 0
# The implementation for this module plausibly exists (e.g. we
# found a matching folder or *.py file), but either the parent package
# did not contain a py.typed file or we were unable to find a
# corresponding *-stubs package.
FOUND_WITHOUT_TYPE_HINTS = 1
# The module was not found in the current working directory, but
# was able to be found in the parent directory.
WRONG_WORKING_DIRECTORY = 2
# Stub PyPI package (typically types-pkgname) known to exist but not installed.
APPROVED_STUBS_NOT_INSTALLED = 3
def error_message_templates(self, daemon: bool) -> tuple[str, list[str]]:
doc_link = "See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports"
if self is ModuleNotFoundReason.NOT_FOUND:
msg = 'Cannot find implementation or library stub for module named "{module}"'
notes = [doc_link]
elif self is ModuleNotFoundReason.WRONG_WORKING_DIRECTORY:
msg = 'Cannot find implementation or library stub for module named "{module}"'
notes = [
"You may be running mypy in a subpackage, "
"mypy should be run on the package root"
]
elif self is ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS:
msg = (
'Skipping analyzing "{module}": module is installed, but missing library stubs '
"or py.typed marker"
)
notes = [doc_link]
elif self is ModuleNotFoundReason.APPROVED_STUBS_NOT_INSTALLED:
msg = 'Library stubs not installed for "{module}"'
notes = ['Hint: "python3 -m pip install {stub_dist}"']
if not daemon:
notes.append(
'(or run "mypy --install-types" to install all missing stub packages)'
)
notes.append(doc_link)
else:
assert False
return msg, notes
# If we found the module, returns the path to the module as a str.
# Otherwise, returns the reason why the module wasn't found.
ModuleSearchResult = Union[str, ModuleNotFoundReason]
class BuildSource:
"""A single source file."""
def __init__(
self,
path: str | None,
module: str | None,
text: str | None = None,
base_dir: str | None = None,
followed: bool = False,
) -> None:
self.path = path # File where it's found (e.g. 'xxx/yyy/foo/bar.py')
self.module = module or "__main__" # Module name (e.g. 'foo.bar')
self.text = text # Source code, if initially supplied, else None
self.base_dir = base_dir # Directory where the package is rooted (e.g. 'xxx/yyy')
self.followed = followed # Was this found by following imports?
def __repr__(self) -> str:
return (
"BuildSource(path={!r}, module={!r}, has_text={}, base_dir={!r}, followed={})".format(
self.path, self.module, self.text is not None, self.base_dir, self.followed
)
)
class BuildSourceSet:
"""Helper to efficiently test a file's membership in a set of build sources."""
def __init__(self, sources: list[BuildSource]) -> None:
self.source_text_present = False
self.source_modules: dict[str, str] = {}
self.source_paths: set[str] = set()
for source in sources:
if source.text is not None:
self.source_text_present = True
if source.path:
self.source_paths.add(source.path)
if source.module:
self.source_modules[source.module] = source.path or ""
def is_source(self, file: MypyFile) -> bool:
return (
(file.path and file.path in self.source_paths)
or file._fullname in self.source_modules
or self.source_text_present
)
class FindModuleCache:
"""Module finder with integrated cache.
Module locations and some intermediate results are cached internally
and can be cleared with the clear() method.
All file system accesses are performed through a FileSystemCache,
which is not ever cleared by this class. If necessary it must be
cleared by client code.
"""
def __init__(
self,
search_paths: SearchPaths,
fscache: FileSystemCache | None,
options: Options | None,
stdlib_py_versions: StdlibVersions | None = None,
source_set: BuildSourceSet | None = None,
) -> None:
self.search_paths = search_paths
self.source_set = source_set
self.fscache = fscache or FileSystemCache()
# Cache for get_toplevel_possibilities:
# search_paths -> (toplevel_id -> list(package_dirs))
self.initial_components: dict[tuple[str, ...], dict[str, list[str]]] = {}
# Cache find_module: id -> result
self.results: dict[str, ModuleSearchResult] = {}
self.ns_ancestors: dict[str, str] = {}
self.options = options
custom_typeshed_dir = None
if options:
custom_typeshed_dir = options.custom_typeshed_dir
self.stdlib_py_versions = stdlib_py_versions or load_stdlib_py_versions(
custom_typeshed_dir
)
def clear(self) -> None:
self.results.clear()
self.initial_components.clear()
self.ns_ancestors.clear()
def find_module_via_source_set(self, id: str) -> ModuleSearchResult | None:
"""Fast path to find modules by looking through the input sources
This is only used when --fast-module-lookup is passed on the command line."""
if not self.source_set:
return None
p = self.source_set.source_modules.get(id, None)
if p and self.fscache.isfile(p):
# We need to make sure we still have __init__.py all the way up
# otherwise we might have false positives compared to slow path
# in case of deletion of init files, which is covered by some tests.
# TODO: are there some combination of flags in which this check should be skipped?
d = os.path.dirname(p)
for _ in range(id.count(".")):
if not any(
self.fscache.isfile(os_path_join(d, "__init__" + x)) for x in PYTHON_EXTENSIONS
):
return None
d = os.path.dirname(d)
return p
idx = id.rfind(".")
if idx != -1:
# When we're looking for foo.bar.baz and can't find a matching module
# in the source set, look up for a foo.bar module.
parent = self.find_module_via_source_set(id[:idx])
if parent is None or not isinstance(parent, str):
return None
basename, ext = os.path.splitext(parent)
if not any(parent.endswith("__init__" + x) for x in PYTHON_EXTENSIONS) and (
ext in PYTHON_EXTENSIONS and not self.fscache.isdir(basename)
):
# If we do find such a *module* (and crucially, we don't want a package,
# hence the filtering out of __init__ files, and checking for the presence
# of a folder with a matching name), then we can be pretty confident that
# 'baz' will either be a top-level variable in foo.bar, or will not exist.
#
# Either way, spelunking in other search paths for another 'foo.bar.baz'
# module should be avoided because:
# 1. in the unlikely event that one were found, it's highly likely that
# it would be unrelated to the source being typechecked and therefore
# more likely to lead to erroneous results
# 2. as described in _find_module, in some cases the search itself could
# potentially waste significant amounts of time
return ModuleNotFoundReason.NOT_FOUND
return None
def find_lib_path_dirs(self, id: str, lib_path: tuple[str, ...]) -> PackageDirs:
"""Find which elements of a lib_path have the directory a module needs to exist.
This is run for the python_path, mypy_path, and typeshed_path search paths.
"""
components = id.split(".")
dir_chain = os.sep.join(components[:-1]) # e.g., 'foo/bar'
dirs = []
for pathitem in self.get_toplevel_possibilities(lib_path, components[0]):
# e.g., '/usr/lib/python3.4/foo/bar'
dir = os.path.normpath(os_path_join(pathitem, dir_chain))
if self.fscache.isdir(dir):
dirs.append((dir, True))
return dirs
def get_toplevel_possibilities(self, lib_path: tuple[str, ...], id: str) -> list[str]:
"""Find which elements of lib_path could contain a particular top-level module.
In practice, almost all modules can be routed to the correct entry in
lib_path by looking at just the first component of the module name.
We take advantage of this by enumerating the contents of all of the
directories on the lib_path and building a map of which entries in
the lib_path could contain each potential top-level module that appears.
"""
if lib_path in self.initial_components:
return self.initial_components[lib_path].get(id, [])
# Enumerate all the files in the directories on lib_path and produce the map
components: dict[str, list[str]] = {}
for dir in lib_path:
try:
contents = self.fscache.listdir(dir)
except OSError:
contents = []
# False positives are fine for correctness here, since we will check
# precisely later, so we only look at the root of every filename without
# any concern for the exact details.
for name in contents:
name = os.path.splitext(name)[0]
components.setdefault(name, []).append(dir)
self.initial_components[lib_path] = components
return components.get(id, [])
def find_module(self, id: str, *, fast_path: bool = False) -> ModuleSearchResult:
"""Return the path of the module source file or why it wasn't found.
If fast_path is True, prioritize performance over generating detailed
error descriptions.
"""
if id not in self.results:
top_level = id.partition(".")[0]
use_typeshed = True
if id in self.stdlib_py_versions:
use_typeshed = self._typeshed_has_version(id)
elif top_level in self.stdlib_py_versions:
use_typeshed = self._typeshed_has_version(top_level)
self.results[id] = self._find_module(id, use_typeshed)
if (
not (fast_path or (self.options is not None and self.options.fast_module_lookup))
and self.results[id] is ModuleNotFoundReason.NOT_FOUND
and self._can_find_module_in_parent_dir(id)
):
self.results[id] = ModuleNotFoundReason.WRONG_WORKING_DIRECTORY
return self.results[id]
def _typeshed_has_version(self, module: str) -> bool:
if not self.options:
return True
version = typeshed_py_version(self.options)
min_version, max_version = self.stdlib_py_versions[module]
return version >= min_version and (max_version is None or version <= max_version)
def _find_module_non_stub_helper(
self, components: list[str], pkg_dir: str
) -> OnePackageDir | ModuleNotFoundReason:
plausible_match = False
dir_path = pkg_dir
for index, component in enumerate(components):
dir_path = os_path_join(dir_path, component)
if self.fscache.isfile(os_path_join(dir_path, "py.typed")):
return os.path.join(pkg_dir, *components[:-1]), index == 0
elif not plausible_match and (
self.fscache.isdir(dir_path) or self.fscache.isfile(dir_path + ".py")
):
plausible_match = True
# If this is not a directory then we can't traverse further into it
if not self.fscache.isdir(dir_path):
break
if approved_stub_package_exists(".".join(components)):
return ModuleNotFoundReason.APPROVED_STUBS_NOT_INSTALLED
if plausible_match:
return ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS
else:
return ModuleNotFoundReason.NOT_FOUND
def _update_ns_ancestors(self, components: list[str], match: tuple[str, bool]) -> None:
path, verify = match
for i in range(1, len(components)):
pkg_id = ".".join(components[:-i])
if pkg_id not in self.ns_ancestors and self.fscache.isdir(path):
self.ns_ancestors[pkg_id] = path
path = os.path.dirname(path)
def _can_find_module_in_parent_dir(self, id: str) -> bool:
"""Test if a module can be found by checking the parent directories
of the current working directory.
"""
working_dir = os.getcwd()
parent_search = FindModuleCache(
SearchPaths((), (), (), ()),
self.fscache,
self.options,
stdlib_py_versions=self.stdlib_py_versions,
)
while any(is_init_file(file) for file in os.listdir(working_dir)):
working_dir = os.path.dirname(working_dir)
parent_search.search_paths = SearchPaths((working_dir,), (), (), ())
if not isinstance(parent_search._find_module(id, False), ModuleNotFoundReason):
return True
return False
def _find_module(self, id: str, use_typeshed: bool) -> ModuleSearchResult:
fscache = self.fscache
# Fast path for any modules in the current source set.
# This is particularly important when there are a large number of search
# paths which share the first (few) component(s) due to the use of namespace
# packages, for instance:
# foo/
# company/
# __init__.py
# foo/
# bar/
# company/
# __init__.py
# bar/
# baz/
# company/
# __init__.py
# baz/
#
# mypy gets [foo/company/foo, bar/company/bar, baz/company/baz, ...] as input
# and computes [foo, bar, baz, ...] as the module search path.
#
# This would result in O(n) search for every import of company.*, leading to
# O(n**2) behavior in load_graph as such imports are unsurprisingly present
# at least once, and usually many more times than that, in each and every file
# being parsed.
#
# Thankfully, such cases are efficiently handled by looking up the module path
# via BuildSourceSet.
p = (
self.find_module_via_source_set(id)
if (self.options is not None and self.options.fast_module_lookup)
else None
)
if p:
return p
# If we're looking for a module like 'foo.bar.baz', it's likely that most of the
# many elements of lib_path don't even have a subdirectory 'foo/bar'. Discover
# that only once and cache it for when we look for modules like 'foo.bar.blah'
# that will require the same subdirectory.
components = id.split(".")
dir_chain = os.sep.join(components[:-1]) # e.g., 'foo/bar'
# We have two sets of folders so that we collect *all* stubs folders and
# put them in the front of the search path
third_party_inline_dirs: PackageDirs = []
third_party_stubs_dirs: PackageDirs = []
found_possible_third_party_missing_type_hints = False
need_installed_stubs = False
# Third-party stub/typed packages
for pkg_dir in self.search_paths.package_path:
stub_name = components[0] + "-stubs"
stub_dir = os_path_join(pkg_dir, stub_name)
if fscache.isdir(stub_dir):
stub_typed_file = os_path_join(stub_dir, "py.typed")
stub_components = [stub_name] + components[1:]
path = os.path.join(pkg_dir, *stub_components[:-1])
if fscache.isdir(path):
if fscache.isfile(stub_typed_file):
# Stub packages can have a py.typed file, which must include
# 'partial\n' to make the package partial
# Partial here means that mypy should look at the runtime
# package if installed.
if fscache.read(stub_typed_file).decode().strip() == "partial":
runtime_path = os_path_join(pkg_dir, dir_chain)
third_party_inline_dirs.append((runtime_path, True))
# if the package is partial, we don't verify the module, as
# the partial stub package may not have a __init__.pyi
third_party_stubs_dirs.append((path, False))
else:
# handle the edge case where people put a py.typed file
# in a stub package, but it isn't partial
third_party_stubs_dirs.append((path, True))
else:
third_party_stubs_dirs.append((path, True))
non_stub_match = self._find_module_non_stub_helper(components, pkg_dir)
if isinstance(non_stub_match, ModuleNotFoundReason):
if non_stub_match is ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS:
found_possible_third_party_missing_type_hints = True
elif non_stub_match is ModuleNotFoundReason.APPROVED_STUBS_NOT_INSTALLED:
need_installed_stubs = True
else:
third_party_inline_dirs.append(non_stub_match)
self._update_ns_ancestors(components, non_stub_match)
if self.options and self.options.use_builtins_fixtures:
# Everything should be in fixtures.
third_party_inline_dirs.clear()
third_party_stubs_dirs.clear()
found_possible_third_party_missing_type_hints = False
python_mypy_path = self.search_paths.mypy_path + self.search_paths.python_path
candidate_base_dirs = self.find_lib_path_dirs(id, python_mypy_path)
if use_typeshed:
# Search for stdlib stubs in typeshed before installed
# stubs to avoid picking up backports (dataclasses, for
# example) when the library is included in stdlib.
candidate_base_dirs += self.find_lib_path_dirs(id, self.search_paths.typeshed_path)
candidate_base_dirs += third_party_stubs_dirs + third_party_inline_dirs
# If we're looking for a module like 'foo.bar.baz', then candidate_base_dirs now
# contains just the subdirectories 'foo/bar' that actually exist under the
# elements of lib_path. This is probably much shorter than lib_path itself.
# Now just look for 'baz.pyi', 'baz/__init__.py', etc., inside those directories.
seplast = os.sep + components[-1] # so e.g. '/baz'
sepinit = os.sep + "__init__"
near_misses = [] # Collect near misses for namespace mode (see below).
for base_dir, verify in candidate_base_dirs:
base_path = base_dir + seplast # so e.g. '/usr/lib/python3.4/foo/bar/baz'
has_init = False
dir_prefix = base_dir
for _ in range(len(components) - 1):
dir_prefix = os.path.dirname(dir_prefix)
# Prefer package over module, i.e. baz/__init__.py* over baz.py*.
for extension in PYTHON_EXTENSIONS:
path = base_path + sepinit + extension
path_stubs = base_path + "-stubs" + sepinit + extension
if fscache.isfile_case(path, dir_prefix):
has_init = True
if verify and not verify_module(fscache, id, path, dir_prefix):
near_misses.append((path, dir_prefix))
continue
return path
elif fscache.isfile_case(path_stubs, dir_prefix):
if verify and not verify_module(fscache, id, path_stubs, dir_prefix):
near_misses.append((path_stubs, dir_prefix))
continue
return path_stubs
# In namespace mode, register a potential namespace package
if self.options and self.options.namespace_packages:
if (
not has_init
and fscache.exists_case(base_path, dir_prefix)
and not fscache.isfile_case(base_path, dir_prefix)
):
near_misses.append((base_path, dir_prefix))
# No package, look for module.
for extension in PYTHON_EXTENSIONS:
path = base_path + extension
if fscache.isfile_case(path, dir_prefix):
if verify and not verify_module(fscache, id, path, dir_prefix):
near_misses.append((path, dir_prefix))
continue
return path
# In namespace mode, re-check those entries that had 'verify'.
# Assume search path entries xxx, yyy and zzz, and we're
# looking for foo.bar.baz. Suppose near_misses has:
#
# - xxx/foo/bar/baz.py
# - yyy/foo/bar/baz/__init__.py
# - zzz/foo/bar/baz.pyi
#
# If any of the foo directories has __init__.py[i], it wins.
# Else, we look for foo/bar/__init__.py[i], etc. If there are
# none, the first hit wins. Note that this does not take into
# account whether the lowest-level module is a file (baz.py),
# a package (baz/__init__.py), or a stub file (baz.pyi) -- for
# these the first one encountered along the search path wins.
#
# The helper function highest_init_level() returns an int that
# indicates the highest level at which a __init__.py[i] file
# is found; if no __init__ was found it returns 0, if we find
# only foo/bar/__init__.py it returns 1, and if we have
# foo/__init__.py it returns 2 (regardless of what's in
# foo/bar). It doesn't look higher than that.
if self.options and self.options.namespace_packages and near_misses:
levels = [
highest_init_level(fscache, id, path, dir_prefix)
for path, dir_prefix in near_misses
]
index = levels.index(max(levels))
return near_misses[index][0]
# Finally, we may be asked to produce an ancestor for an
# installed package with a py.typed marker that is a
# subpackage of a namespace package. We only fess up to these
# if we would otherwise return "not found".
ancestor = self.ns_ancestors.get(id)
if ancestor is not None:
return ancestor
if need_installed_stubs:
return ModuleNotFoundReason.APPROVED_STUBS_NOT_INSTALLED
elif found_possible_third_party_missing_type_hints:
return ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS
else:
return ModuleNotFoundReason.NOT_FOUND
def find_modules_recursive(self, module: str) -> list[BuildSource]:
module_path = self.find_module(module, fast_path=True)
if isinstance(module_path, ModuleNotFoundReason):
return []
sources = [BuildSource(module_path, module, None)]
package_path = None
if is_init_file(module_path):
package_path = os.path.dirname(module_path)
elif self.fscache.isdir(module_path):
package_path = module_path
if package_path is None:
return sources
# This logic closely mirrors that in find_sources. One small but important difference is
# that we do not sort names with keyfunc. The recursive call to find_modules_recursive
# calls find_module, which will handle the preference between packages, pyi and py.
# Another difference is it doesn't handle nested search paths / package roots.
seen: set[str] = set()
names = sorted(self.fscache.listdir(package_path))
for name in names:
# Skip certain names altogether
if name in ("__pycache__", "site-packages", "node_modules") or name.startswith("."):
continue
subpath = os_path_join(package_path, name)
if self.options and matches_exclude(
subpath, self.options.exclude, self.fscache, self.options.verbosity >= 2
):
continue
if self.fscache.isdir(subpath):
# Only recurse into packages
if (self.options and self.options.namespace_packages) or (
self.fscache.isfile(os_path_join(subpath, "__init__.py"))
or self.fscache.isfile(os_path_join(subpath, "__init__.pyi"))
):
seen.add(name)
sources.extend(self.find_modules_recursive(module + "." + name))
else:
stem, suffix = os.path.splitext(name)
if stem == "__init__":
continue
if stem not in seen and "." not in stem and suffix in PYTHON_EXTENSIONS:
# (If we sorted names by keyfunc) we could probably just make the BuildSource
# ourselves, but this ensures compatibility with find_module / the cache
seen.add(stem)
sources.extend(self.find_modules_recursive(module + "." + stem))
return sources
def matches_exclude(
subpath: str, excludes: list[str], fscache: FileSystemCache, verbose: bool
) -> bool:
if not excludes:
return False
subpath_str = os.path.relpath(subpath).replace(os.sep, "/")
if fscache.isdir(subpath):
subpath_str += "/"
for exclude in excludes:
if re.search(exclude, subpath_str):
if verbose:
print(
f"TRACE: Excluding {subpath_str} (matches pattern {exclude})", file=sys.stderr
)
return True
return False
def is_init_file(path: str) -> bool:
return os.path.basename(path) in ("__init__.py", "__init__.pyi")
def verify_module(fscache: FileSystemCache, id: str, path: str, prefix: str) -> bool:
"""Check that all packages containing id have a __init__ file."""
if is_init_file(path):
path = os.path.dirname(path)
for i in range(id.count(".")):
path = os.path.dirname(path)
if not any(
fscache.isfile_case(os_path_join(path, f"__init__{extension}"), prefix)
for extension in PYTHON_EXTENSIONS
):
return False
return True
def highest_init_level(fscache: FileSystemCache, id: str, path: str, prefix: str) -> int:
"""Compute the highest level where an __init__ file is found."""
if is_init_file(path):
path = os.path.dirname(path)
level = 0
for i in range(id.count(".")):
path = os.path.dirname(path)
if any(
fscache.isfile_case(os_path_join(path, f"__init__{extension}"), prefix)
for extension in PYTHON_EXTENSIONS
):
level = i + 1
return level
def mypy_path() -> list[str]:
path_env = os.getenv("MYPYPATH")
if not path_env:
return []
return path_env.split(os.pathsep)
def default_lib_path(
data_dir: str, pyversion: tuple[int, int], custom_typeshed_dir: str | None
) -> list[str]:
"""Return default standard library search paths. Guaranteed to be normalised."""
data_dir = os.path.abspath(data_dir)
path: list[str] = []
if custom_typeshed_dir:
custom_typeshed_dir = os.path.abspath(custom_typeshed_dir)
typeshed_dir = os.path.join(custom_typeshed_dir, "stdlib")
mypy_extensions_dir = os.path.join(custom_typeshed_dir, "stubs", "mypy-extensions")
versions_file = os.path.join(typeshed_dir, "VERSIONS")
if not os.path.isdir(typeshed_dir) or not os.path.isfile(versions_file):
print(
"error: --custom-typeshed-dir does not point to a valid typeshed ({})".format(
custom_typeshed_dir
)
)
sys.exit(2)
else:
auto = os.path.join(data_dir, "stubs-auto")
if os.path.isdir(auto):
data_dir = auto
typeshed_dir = os.path.join(data_dir, "typeshed", "stdlib")
mypy_extensions_dir = os.path.join(data_dir, "typeshed", "stubs", "mypy-extensions")
path.append(typeshed_dir)
# Get mypy-extensions stubs from typeshed, since we treat it as an
# "internal" library, similar to typing and typing-extensions.
path.append(mypy_extensions_dir)
# Add fallback path that can be used if we have a broken installation.
if sys.platform != "win32":
path.append("/usr/local/lib/mypy")
if not path:
print(
"Could not resolve typeshed subdirectories. Your mypy install is broken.\n"
"Python executable is located at {}.\nMypy located at {}".format(
sys.executable, data_dir
),
file=sys.stderr,
)
sys.exit(1)
return path
@functools.lru_cache(maxsize=None)
def get_search_dirs(python_executable: str | None) -> tuple[list[str], list[str]]:
"""Find package directories for given python. Guaranteed to return absolute paths.
This runs a subprocess call, which generates a list of the directories in sys.path.
To avoid repeatedly calling a subprocess (which can be slow!) we
lru_cache the results.
"""
if python_executable is None:
return ([], [])
elif python_executable == sys.executable:
# Use running Python's package dirs
sys_path, site_packages = pyinfo.getsearchdirs()
else:
# Use subprocess to get the package directory of given Python
# executable
env = {**dict(os.environ), "PYTHONSAFEPATH": "1"}
try:
sys_path, site_packages = ast.literal_eval(
subprocess.check_output(
[python_executable, pyinfo.__file__, "getsearchdirs"],
env=env,
stderr=subprocess.PIPE,
).decode()
)
except subprocess.CalledProcessError as err:
print(err.stderr)
print(err.stdout)
raise
except OSError as err:
reason = os.strerror(err.errno)
raise CompileError(
[f"mypy: Invalid python executable '{python_executable}': {reason}"]
) from err
return sys_path, site_packages
def compute_search_paths(
sources: list[BuildSource], options: Options, data_dir: str, alt_lib_path: str | None = None
) -> SearchPaths:
"""Compute the search paths as specified in PEP 561.
There are the following 4 members created:
- User code (from `sources`)
- MYPYPATH (set either via config or environment variable)
- installed package directories (which will later be split into stub-only and inline)
- typeshed
"""
# Determine the default module search path.
lib_path = collections.deque(
default_lib_path(
data_dir, options.python_version, custom_typeshed_dir=options.custom_typeshed_dir
)
)
if options.use_builtins_fixtures:
# Use stub builtins (to speed up test cases and to make them easier to
# debug). This is a test-only feature, so assume our files are laid out
# as in the source tree.
# We also need to allow overriding where to look for it. Argh.
root_dir = os.getenv("MYPY_TEST_PREFIX", None)
if not root_dir:
root_dir = os.path.dirname(os.path.dirname(__file__))
root_dir = os.path.abspath(root_dir)
lib_path.appendleft(os.path.join(root_dir, "test-data", "unit", "lib-stub"))
# alt_lib_path is used by some tests to bypass the normal lib_path mechanics.
# If we don't have one, grab directories of source files.
python_path: list[str] = []
if not alt_lib_path:
for source in sources:
# Include directory of the program file in the module search path.
if source.base_dir:
dir = source.base_dir
if dir not in python_path:
python_path.append(dir)
# Do this even if running as a file, for sanity (mainly because with
# multiple builds, there could be a mix of files/modules, so its easier
# to just define the semantics that we always add the current director
# to the lib_path
# TODO: Don't do this in some cases; for motivation see see
# https://github.com/python/mypy/issues/4195#issuecomment-341915031
if options.bazel:
dir = "."
else:
dir = os.getcwd()
if dir not in lib_path:
python_path.insert(0, dir)
# Start with a MYPYPATH environment variable at the front of the mypy_path, if defined.
mypypath = mypy_path()
# Add a config-defined mypy path.
mypypath.extend(options.mypy_path)
# If provided, insert the caller-supplied extra module path to the
# beginning (highest priority) of the search path.
if alt_lib_path:
mypypath.insert(0, alt_lib_path)
sys_path, site_packages = get_search_dirs(options.python_executable)
# We only use site packages for this check
for site in site_packages:
assert site not in lib_path
if (
site in mypypath
or any(p.startswith(site + os.path.sep) for p in mypypath)
or (os.path.altsep and any(p.startswith(site + os.path.altsep) for p in mypypath))
):
print(f"{site} is in the MYPYPATH. Please remove it.", file=sys.stderr)
print(
"See https://mypy.readthedocs.io/en/stable/running_mypy.html"
"#how-mypy-handles-imports for more info",
file=sys.stderr,
)
sys.exit(1)
return SearchPaths(
python_path=tuple(reversed(python_path)),
mypy_path=tuple(mypypath),
# package_path and typeshed_path must be normalised and absolute via os.path.abspath
package_path=tuple(sys_path + site_packages),
typeshed_path=tuple(lib_path),
)
def load_stdlib_py_versions(custom_typeshed_dir: str | None) -> StdlibVersions:
"""Return dict with minimum and maximum Python versions of stdlib modules.
The contents look like
{..., 'secrets': ((3, 6), None), 'symbol': ((2, 7), (3, 9)), ...}
None means there is no maximum version.
"""
typeshed_dir = custom_typeshed_dir or os_path_join(os.path.dirname(__file__), "typeshed")
stdlib_dir = os_path_join(typeshed_dir, "stdlib")
result = {}
versions_path = os_path_join(stdlib_dir, "VERSIONS")
assert os.path.isfile(versions_path), (custom_typeshed_dir, versions_path, __file__)
with open(versions_path) as f:
for line in f:
line = line.split("#")[0].strip()
if line == "":
continue
module, version_range = line.split(":")
versions = version_range.split("-")
min_version = parse_version(versions[0])
max_version = (
parse_version(versions[1]) if len(versions) >= 2 and versions[1].strip() else None
)
result[module] = min_version, max_version
return result
def parse_version(version: str) -> tuple[int, int]:
major, minor = version.strip().split(".")
return int(major), int(minor)
def typeshed_py_version(options: Options) -> tuple[int, int]:
"""Return Python version used for checking whether module supports typeshed."""
# Typeshed no longer covers Python 3.x versions before 3.8, so 3.8 is
# the earliest we can support.
return max(options.python_version, (3, 8))
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/modulefinder.py
|
Python
|
NOASSERTION
| 38,192 |
"""Basic introspection of modules."""
from __future__ import annotations
import importlib
import inspect
import os
import pkgutil
import queue
import sys
from multiprocessing import Queue, get_context
from types import ModuleType
class ModuleProperties:
# Note that all __init__ args must have default values
def __init__(
self,
name: str = "",
file: str | None = None,
path: list[str] | None = None,
all: list[str] | None = None,
is_c_module: bool = False,
subpackages: list[str] | None = None,
) -> None:
self.name = name # __name__ attribute
self.file = file # __file__ attribute
self.path = path # __path__ attribute
self.all = all # __all__ attribute
self.is_c_module = is_c_module
self.subpackages = subpackages or []
def is_c_module(module: ModuleType) -> bool:
if module.__dict__.get("__file__") is None:
# Could be a namespace package. These must be handled through
# introspection, since there is no source file.
return True
return os.path.splitext(module.__dict__["__file__"])[-1] in [".so", ".pyd", ".dll"]
def is_pyc_only(file: str | None) -> bool:
return bool(file and file.endswith(".pyc") and not os.path.exists(file[:-1]))
class InspectError(Exception):
pass
def get_package_properties(package_id: str) -> ModuleProperties:
"""Use runtime introspection to get information about a module/package."""
try:
package = importlib.import_module(package_id)
except BaseException as e:
raise InspectError(str(e)) from e
name = getattr(package, "__name__", package_id)
file = getattr(package, "__file__", None)
path: list[str] | None = getattr(package, "__path__", None)
if not isinstance(path, list):
path = None
pkg_all = getattr(package, "__all__", None)
if pkg_all is not None:
try:
pkg_all = list(pkg_all)
except Exception:
pkg_all = None
is_c = is_c_module(package)
if path is None:
# Object has no path; this means it's either a module inside a package
# (and thus no sub-packages), or it could be a C extension package.
if is_c:
# This is a C extension module, now get the list of all sub-packages
# using the inspect module
subpackages = [
package.__name__ + "." + name
for name, val in inspect.getmembers(package)
if inspect.ismodule(val) and val.__name__ == package.__name__ + "." + name
]
else:
# It's a module inside a package. There's nothing else to walk/yield.
subpackages = []
else:
all_packages = pkgutil.walk_packages(
path, prefix=package.__name__ + ".", onerror=lambda r: None
)
subpackages = [qualified_name for importer, qualified_name, ispkg in all_packages]
return ModuleProperties(
name=name, file=file, path=path, all=pkg_all, is_c_module=is_c, subpackages=subpackages
)
def worker(tasks: Queue[str], results: Queue[str | ModuleProperties], sys_path: list[str]) -> None:
"""The main loop of a worker introspection process."""
sys.path = sys_path
while True:
mod = tasks.get()
try:
prop = get_package_properties(mod)
except InspectError as e:
results.put(str(e))
continue
results.put(prop)
class ModuleInspect:
"""Perform runtime introspection of modules in a separate process.
Reuse the process for multiple modules for efficiency. However, if there is an
error, retry using a fresh process to avoid cross-contamination of state between
modules.
We use a separate process to isolate us from many side effects. For example, the
import of a module may kill the current process, and we want to recover from that.
Always use in a with statement for proper clean-up:
with ModuleInspect() as m:
p = m.get_package_properties('urllib.parse')
"""
def __init__(self) -> None:
self._start()
def _start(self) -> None:
if sys.platform == "linux":
ctx = get_context("forkserver")
else:
ctx = get_context("spawn")
self.tasks: Queue[str] = ctx.Queue()
self.results: Queue[ModuleProperties | str] = ctx.Queue()
self.proc = ctx.Process(target=worker, args=(self.tasks, self.results, sys.path))
self.proc.start()
self.counter = 0 # Number of successful roundtrips
def close(self) -> None:
"""Free any resources used."""
self.proc.terminate()
def get_package_properties(self, package_id: str) -> ModuleProperties:
"""Return some properties of a module/package using runtime introspection.
Raise InspectError if the target couldn't be imported.
"""
self.tasks.put(package_id)
res = self._get_from_queue()
if res is None:
# The process died; recover and report error.
self._start()
raise InspectError(f"Process died when importing {package_id!r}")
if isinstance(res, str):
# Error importing module
if self.counter > 0:
# Also try with a fresh process. Maybe one of the previous imports has
# corrupted some global state.
self.close()
self._start()
return self.get_package_properties(package_id)
raise InspectError(res)
self.counter += 1
return res
def _get_from_queue(self) -> ModuleProperties | str | None:
"""Get value from the queue.
Return the value read from the queue, or None if the process unexpectedly died.
"""
max_iter = 600
n = 0
while True:
if n == max_iter:
raise RuntimeError("Timeout waiting for subprocess")
try:
return self.results.get(timeout=0.05)
except queue.Empty:
if not self.proc.is_alive():
return None
n += 1
def __enter__(self) -> ModuleInspect:
return self
def __exit__(self, *args: object) -> None:
self.close()
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/moduleinspect.py
|
Python
|
NOASSERTION
| 6,326 |
from __future__ import annotations
from typing import Callable
from mypy.nodes import TypeInfo
from mypy.types import Instance
from mypy.typestate import type_state
def calculate_mro(info: TypeInfo, obj_type: Callable[[], Instance] | None = None) -> None:
"""Calculate and set mro (method resolution order).
Raise MroError if cannot determine mro.
"""
mro = linearize_hierarchy(info, obj_type)
assert mro, f"Could not produce a MRO at all for {info}"
info.mro = mro
# The property of falling back to Any is inherited.
info.fallback_to_any = any(baseinfo.fallback_to_any for baseinfo in info.mro)
type_state.reset_all_subtype_caches_for(info)
class MroError(Exception):
"""Raised if a consistent mro cannot be determined for a class."""
def linearize_hierarchy(
info: TypeInfo, obj_type: Callable[[], Instance] | None = None
) -> list[TypeInfo]:
# TODO describe
if info.mro:
return info.mro
bases = info.direct_base_classes()
if not bases and info.fullname != "builtins.object" and obj_type is not None:
# Probably an error, add a dummy `object` base class,
# otherwise MRO calculation may spuriously fail.
bases = [obj_type().type]
lin_bases = []
for base in bases:
assert base is not None, f"Cannot linearize bases for {info.fullname} {bases}"
lin_bases.append(linearize_hierarchy(base, obj_type))
lin_bases.append(bases)
return [info] + merge(lin_bases)
def merge(seqs: list[list[TypeInfo]]) -> list[TypeInfo]:
seqs = [s.copy() for s in seqs]
result: list[TypeInfo] = []
while True:
seqs = [s for s in seqs if s]
if not seqs:
return result
for seq in seqs:
head = seq[0]
if not [s for s in seqs if head in s[1:]]:
break
else:
raise MroError()
result.append(head)
for s in seqs:
if s[0] is head:
del s[0]
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/mro.py
|
Python
|
NOASSERTION
| 1,993 |
"""Abstract syntax tree node classes (i.e. parse tree)."""
from __future__ import annotations
import os
from abc import abstractmethod
from collections import defaultdict
from enum import Enum, unique
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Final,
Iterator,
List,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
cast,
)
from typing_extensions import TypeAlias as _TypeAlias, TypeGuard
from mypy_extensions import trait
import mypy.strconv
from mypy.options import Options
from mypy.util import is_typeshed_file, short_type
from mypy.visitor import ExpressionVisitor, NodeVisitor, StatementVisitor
if TYPE_CHECKING:
from mypy.patterns import Pattern
class Context:
"""Base type for objects that are valid as error message locations."""
__slots__ = ("line", "column", "end_line", "end_column")
def __init__(self, line: int = -1, column: int = -1) -> None:
self.line = line
self.column = column
self.end_line: int | None = None
self.end_column: int | None = None
def set_line(
self,
target: Context | int,
column: int | None = None,
end_line: int | None = None,
end_column: int | None = None,
) -> None:
"""If target is a node, pull line (and column) information
into this node. If column is specified, this will override any column
information coming from a node.
"""
if isinstance(target, int):
self.line = target
else:
self.line = target.line
self.column = target.column
self.end_line = target.end_line
self.end_column = target.end_column
if column is not None:
self.column = column
if end_line is not None:
self.end_line = end_line
if end_column is not None:
self.end_column = end_column
if TYPE_CHECKING:
# break import cycle only needed for mypy
import mypy.types
T = TypeVar("T")
JsonDict: _TypeAlias = Dict[str, Any]
# Symbol table node kinds
#
# TODO rename to use more descriptive names
LDEF: Final = 0
GDEF: Final = 1
MDEF: Final = 2
# Placeholder for a name imported via 'from ... import'. Second phase of
# semantic will replace this the actual imported reference. This is
# needed so that we can detect whether a name has been imported during
# XXX what?
UNBOUND_IMPORTED: Final = 3
# RevealExpr node kinds
REVEAL_TYPE: Final = 0
REVEAL_LOCALS: Final = 1
LITERAL_YES: Final = 2
LITERAL_TYPE: Final = 1
LITERAL_NO: Final = 0
node_kinds: Final = {LDEF: "Ldef", GDEF: "Gdef", MDEF: "Mdef", UNBOUND_IMPORTED: "UnboundImported"}
inverse_node_kinds: Final = {_kind: _name for _name, _kind in node_kinds.items()}
implicit_module_attrs: Final = {
"__name__": "__builtins__.str",
"__doc__": None, # depends on Python version, see semanal.py
"__path__": None, # depends on if the module is a package
"__file__": "__builtins__.str",
"__package__": "__builtins__.str",
"__annotations__": None, # dict[str, Any] bounded in add_implicit_module_attrs()
"__spec__": None, # importlib.machinery.ModuleSpec bounded in add_implicit_module_attrs()
}
# These aliases exist because built-in class objects are not subscriptable.
# For example `list[int]` fails at runtime. Instead List[int] should be used.
type_aliases: Final = {
"typing.List": "builtins.list",
"typing.Dict": "builtins.dict",
"typing.Set": "builtins.set",
"typing.FrozenSet": "builtins.frozenset",
"typing.ChainMap": "collections.ChainMap",
"typing.Counter": "collections.Counter",
"typing.DefaultDict": "collections.defaultdict",
"typing.Deque": "collections.deque",
"typing.OrderedDict": "collections.OrderedDict",
# HACK: a lie in lieu of actual support for PEP 675
"typing.LiteralString": "builtins.str",
}
# This keeps track of the oldest supported Python version where the corresponding
# alias source is available.
type_aliases_source_versions: Final = {"typing.LiteralString": (3, 11)}
# This keeps track of aliases in `typing_extensions`, which we treat specially.
typing_extensions_aliases: Final = {
# See: https://github.com/python/mypy/issues/11528
"typing_extensions.OrderedDict": "collections.OrderedDict",
# HACK: a lie in lieu of actual support for PEP 675
"typing_extensions.LiteralString": "builtins.str",
}
reverse_builtin_aliases: Final = {
"builtins.list": "typing.List",
"builtins.dict": "typing.Dict",
"builtins.set": "typing.Set",
"builtins.frozenset": "typing.FrozenSet",
}
_nongen_builtins: Final = {"builtins.tuple": "typing.Tuple", "builtins.enumerate": ""}
_nongen_builtins.update((name, alias) for alias, name in type_aliases.items())
# Drop OrderedDict from this for backward compatibility
del _nongen_builtins["collections.OrderedDict"]
# HACK: consequence of hackily treating LiteralString as an alias for str
del _nongen_builtins["builtins.str"]
def get_nongen_builtins(python_version: tuple[int, int]) -> dict[str, str]:
# After 3.9 with pep585 generic builtins are allowed
return _nongen_builtins if python_version < (3, 9) else {}
RUNTIME_PROTOCOL_DECOS: Final = (
"typing.runtime_checkable",
"typing_extensions.runtime",
"typing_extensions.runtime_checkable",
)
LAMBDA_NAME: Final = "<lambda>"
class Node(Context):
"""Common base class for all non-type parse tree nodes."""
__slots__ = ()
def __str__(self) -> str:
ans = self.accept(mypy.strconv.StrConv(options=Options()))
if ans is None:
return repr(self)
return ans
def str_with_options(self, options: Options) -> str:
ans = self.accept(mypy.strconv.StrConv(options=options))
assert ans
return ans
def accept(self, visitor: NodeVisitor[T]) -> T:
raise RuntimeError("Not implemented", type(self))
@trait
class Statement(Node):
"""A statement node."""
__slots__ = ()
def accept(self, visitor: StatementVisitor[T]) -> T:
raise RuntimeError("Not implemented", type(self))
@trait
class Expression(Node):
"""An expression node."""
__slots__ = ()
def accept(self, visitor: ExpressionVisitor[T]) -> T:
raise RuntimeError("Not implemented", type(self))
class FakeExpression(Expression):
"""A dummy expression.
We need a dummy expression in one place, and can't instantiate Expression
because it is a trait and mypyc barfs.
"""
__slots__ = ()
# TODO:
# Lvalue = Union['NameExpr', 'MemberExpr', 'IndexExpr', 'SuperExpr', 'StarExpr'
# 'TupleExpr']; see #1783.
Lvalue: _TypeAlias = Expression
@trait
class SymbolNode(Node):
"""Nodes that can be stored in a symbol table."""
__slots__ = ()
@property
@abstractmethod
def name(self) -> str:
pass
# Fully qualified name
@property
@abstractmethod
def fullname(self) -> str:
pass
@abstractmethod
def serialize(self) -> JsonDict:
pass
@classmethod
def deserialize(cls, data: JsonDict) -> SymbolNode:
classname = data[".class"]
method = deserialize_map.get(classname)
if method is not None:
return method(data)
raise NotImplementedError(f"unexpected .class {classname}")
# Items: fullname, related symbol table node, surrounding type (if any)
Definition: _TypeAlias = Tuple[str, "SymbolTableNode", Optional["TypeInfo"]]
class MypyFile(SymbolNode):
"""The abstract syntax tree of a single source file."""
__slots__ = (
"_fullname",
"path",
"defs",
"alias_deps",
"is_bom",
"names",
"imports",
"ignored_lines",
"skipped_lines",
"is_stub",
"is_cache_skeleton",
"is_partial_stub_package",
"plugin_deps",
"future_import_flags",
"_is_typeshed_file",
)
__match_args__ = ("name", "path", "defs")
# Fully qualified module name
_fullname: str
# Path to the file (empty string if not known)
path: str
# Top-level definitions and statements
defs: list[Statement]
# Type alias dependencies as mapping from target to set of alias full names
alias_deps: defaultdict[str, set[str]]
# Is there a UTF-8 BOM at the start?
is_bom: bool
names: SymbolTable
# All import nodes within the file (also ones within functions etc.)
imports: list[ImportBase]
# Lines on which to ignore certain errors when checking.
# If the value is empty, ignore all errors; otherwise, the list contains all
# error codes to ignore.
ignored_lines: dict[int, list[str]]
# Lines that were skipped during semantic analysis e.g. due to ALWAYS_FALSE, MYPY_FALSE,
# or platform/version checks. Those lines would not be type-checked.
skipped_lines: set[int]
# Is this file represented by a stub file (.pyi)?
is_stub: bool
# Is this loaded from the cache and thus missing the actual body of the file?
is_cache_skeleton: bool
# Does this represent an __init__.pyi stub with a module __getattr__
# (i.e. a partial stub package), for such packages we suppress any missing
# module errors in addition to missing attribute errors.
is_partial_stub_package: bool
# Plugin-created dependencies
plugin_deps: dict[str, set[str]]
# Future imports defined in this file. Populated during semantic analysis.
future_import_flags: set[str]
_is_typeshed_file: bool | None
def __init__(
self,
defs: list[Statement],
imports: list[ImportBase],
is_bom: bool = False,
ignored_lines: dict[int, list[str]] | None = None,
) -> None:
super().__init__()
self.defs = defs
self.line = 1 # Dummy line number
self.column = 0 # Dummy column
self.imports = imports
self.is_bom = is_bom
self.alias_deps = defaultdict(set)
self.plugin_deps = {}
if ignored_lines:
self.ignored_lines = ignored_lines
else:
self.ignored_lines = {}
self.skipped_lines = set()
self.path = ""
self.is_stub = False
self.is_cache_skeleton = False
self.is_partial_stub_package = False
self.future_import_flags = set()
self._is_typeshed_file = None
def local_definitions(self) -> Iterator[Definition]:
"""Return all definitions within the module (including nested).
This doesn't include imported definitions.
"""
return local_definitions(self.names, self.fullname)
@property
def name(self) -> str:
return "" if not self._fullname else self._fullname.split(".")[-1]
@property
def fullname(self) -> str:
return self._fullname
def accept(self, visitor: NodeVisitor[T]) -> T:
return visitor.visit_mypy_file(self)
def is_package_init_file(self) -> bool:
return len(self.path) != 0 and os.path.basename(self.path).startswith("__init__.")
def is_future_flag_set(self, flag: str) -> bool:
return flag in self.future_import_flags
def is_typeshed_file(self, options: Options) -> bool:
# Cache result since this is called a lot
if self._is_typeshed_file is None:
self._is_typeshed_file = is_typeshed_file(options.abs_custom_typeshed_dir, self.path)
return self._is_typeshed_file
def serialize(self) -> JsonDict:
return {
".class": "MypyFile",
"_fullname": self._fullname,
"names": self.names.serialize(self._fullname),
"is_stub": self.is_stub,
"path": self.path,
"is_partial_stub_package": self.is_partial_stub_package,
"future_import_flags": list(self.future_import_flags),
}
@classmethod
def deserialize(cls, data: JsonDict) -> MypyFile:
assert data[".class"] == "MypyFile", data
tree = MypyFile([], [])
tree._fullname = data["_fullname"]
tree.names = SymbolTable.deserialize(data["names"])
tree.is_stub = data["is_stub"]
tree.path = data["path"]
tree.is_partial_stub_package = data["is_partial_stub_package"]
tree.is_cache_skeleton = True
tree.future_import_flags = set(data["future_import_flags"])
return tree
class ImportBase(Statement):
"""Base class for all import statements."""
__slots__ = ("is_unreachable", "is_top_level", "is_mypy_only", "assignments")
is_unreachable: bool # Set by semanal.SemanticAnalyzerPass1 if inside `if False` etc.
is_top_level: bool # Ditto if outside any class or def
is_mypy_only: bool # Ditto if inside `if TYPE_CHECKING` or `if MYPY`
# If an import replaces existing definitions, we construct dummy assignment
# statements that assign the imported names to the names in the current scope,
# for type checking purposes. Example:
#
# x = 1
# from m import x <-- add assignment representing "x = m.x"
assignments: list[AssignmentStmt]
def __init__(self) -> None:
super().__init__()
self.assignments = []
self.is_unreachable = False
self.is_top_level = False
self.is_mypy_only = False
class Import(ImportBase):
"""import m [as n]"""
__slots__ = ("ids",)
__match_args__ = ("ids",)
ids: list[tuple[str, str | None]] # (module id, as id)
def __init__(self, ids: list[tuple[str, str | None]]) -> None:
super().__init__()
self.ids = ids
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_import(self)
class ImportFrom(ImportBase):
"""from m import x [as y], ..."""
__slots__ = ("id", "names", "relative")
__match_args__ = ("id", "names", "relative")
id: str
relative: int
names: list[tuple[str, str | None]] # Tuples (name, as name)
def __init__(self, id: str, relative: int, names: list[tuple[str, str | None]]) -> None:
super().__init__()
self.id = id
self.names = names
self.relative = relative
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_import_from(self)
class ImportAll(ImportBase):
"""from m import *"""
__slots__ = ("id", "relative")
__match_args__ = ("id", "relative")
id: str
relative: int
def __init__(self, id: str, relative: int) -> None:
super().__init__()
self.id = id
self.relative = relative
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_import_all(self)
FUNCBASE_FLAGS: Final = ["is_property", "is_class", "is_static", "is_final"]
class FuncBase(Node):
"""Abstract base class for function-like nodes.
N.B: Although this has SymbolNode subclasses (FuncDef,
OverloadedFuncDef), avoid calling isinstance(..., FuncBase) on
something that is typed as SymbolNode. This is to work around
mypy bug #3603, in which mypy doesn't understand multiple
inheritance very well, and will assume that a SymbolNode
cannot be a FuncBase.
Instead, test against SYMBOL_FUNCBASE_TYPES, which enumerates
SymbolNode subclasses that are also FuncBase subclasses.
"""
__slots__ = (
"type",
"unanalyzed_type",
"info",
"is_property",
"is_class", # Uses "@classmethod" (explicit or implicit)
"is_static", # Uses "@staticmethod" (explicit or implicit)
"is_final", # Uses "@final"
"is_explicit_override", # Uses "@override"
"is_type_check_only", # Uses "@type_check_only"
"_fullname",
)
def __init__(self) -> None:
super().__init__()
# Type signature. This is usually CallableType or Overloaded, but it can be
# something else for decorated functions.
self.type: mypy.types.ProperType | None = None
# Original, not semantically analyzed type (used for reprocessing)
self.unanalyzed_type: mypy.types.ProperType | None = None
# If method, reference to TypeInfo
self.info = FUNC_NO_INFO
self.is_property = False
self.is_class = False
self.is_static = False
self.is_final = False
self.is_explicit_override = False
self.is_type_check_only = False
# Name with module prefix
self._fullname = ""
@property
@abstractmethod
def name(self) -> str:
pass
@property
def fullname(self) -> str:
return self._fullname
OverloadPart: _TypeAlias = Union["FuncDef", "Decorator"]
class OverloadedFuncDef(FuncBase, SymbolNode, Statement):
"""A logical node representing all the variants of a multi-declaration function.
A multi-declaration function is often an @overload, but can also be a
@property with a setter and a/or a deleter.
This node has no explicit representation in the source program.
Overloaded variants must be consecutive in the source file.
"""
__slots__ = ("items", "unanalyzed_items", "impl")
items: list[OverloadPart]
unanalyzed_items: list[OverloadPart]
impl: OverloadPart | None
def __init__(self, items: list[OverloadPart]) -> None:
super().__init__()
self.items = items
self.unanalyzed_items = items.copy()
self.impl = None
if items:
# TODO: figure out how to reliably set end position (we don't know the impl here).
self.set_line(items[0].line, items[0].column)
@property
def name(self) -> str:
if self.items:
return self.items[0].name
else:
# This may happen for malformed overload
assert self.impl is not None
return self.impl.name
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_overloaded_func_def(self)
def serialize(self) -> JsonDict:
return {
".class": "OverloadedFuncDef",
"items": [i.serialize() for i in self.items],
"type": None if self.type is None else self.type.serialize(),
"fullname": self._fullname,
"impl": None if self.impl is None else self.impl.serialize(),
"flags": get_flags(self, FUNCBASE_FLAGS),
}
@classmethod
def deserialize(cls, data: JsonDict) -> OverloadedFuncDef:
assert data[".class"] == "OverloadedFuncDef"
res = OverloadedFuncDef(
[cast(OverloadPart, SymbolNode.deserialize(d)) for d in data["items"]]
)
if data.get("impl") is not None:
res.impl = cast(OverloadPart, SymbolNode.deserialize(data["impl"]))
# set line for empty overload items, as not set in __init__
if len(res.items) > 0:
res.set_line(res.impl.line)
if data.get("type") is not None:
typ = mypy.types.deserialize_type(data["type"])
assert isinstance(typ, mypy.types.ProperType)
res.type = typ
res._fullname = data["fullname"]
set_flags(res, data["flags"])
# NOTE: res.info will be set in the fixup phase.
return res
def is_dynamic(self) -> bool:
return all(item.is_dynamic() for item in self.items)
class Argument(Node):
"""A single argument in a FuncItem."""
__slots__ = ("variable", "type_annotation", "initializer", "kind", "pos_only")
__match_args__ = ("variable", "type_annotation", "initializer", "kind", "pos_only")
def __init__(
self,
variable: Var,
type_annotation: mypy.types.Type | None,
initializer: Expression | None,
kind: ArgKind,
pos_only: bool = False,
) -> None:
super().__init__()
self.variable = variable
self.type_annotation = type_annotation
self.initializer = initializer
self.kind = kind # must be an ARG_* constant
self.pos_only = pos_only
def set_line(
self,
target: Context | int,
column: int | None = None,
end_line: int | None = None,
end_column: int | None = None,
) -> None:
super().set_line(target, column, end_line, end_column)
if self.initializer and self.initializer.line < 0:
self.initializer.set_line(self.line, self.column, self.end_line, self.end_column)
self.variable.set_line(self.line, self.column, self.end_line, self.end_column)
# These specify the kind of a TypeParam
TYPE_VAR_KIND: Final = 0
PARAM_SPEC_KIND: Final = 1
TYPE_VAR_TUPLE_KIND: Final = 2
class TypeParam:
__slots__ = ("name", "kind", "upper_bound", "values")
def __init__(
self,
name: str,
kind: int,
upper_bound: mypy.types.Type | None,
values: list[mypy.types.Type],
) -> None:
self.name = name
self.kind = kind
self.upper_bound = upper_bound
self.values = values
FUNCITEM_FLAGS: Final = FUNCBASE_FLAGS + [
"is_overload",
"is_generator",
"is_coroutine",
"is_async_generator",
"is_awaitable_coroutine",
]
class FuncItem(FuncBase):
"""Base class for nodes usable as overloaded function items."""
__slots__ = (
"arguments", # Note that can be unset if deserialized (type is a lie!)
"arg_names", # Names of arguments
"arg_kinds", # Kinds of arguments
"min_args", # Minimum number of arguments
"max_pos", # Maximum number of positional arguments, -1 if no explicit
# limit (*args not included)
"type_args", # New-style type parameters (PEP 695)
"body", # Body of the function
"is_overload", # Is this an overload variant of function with more than
# one overload variant?
"is_generator", # Contains a yield statement?
"is_coroutine", # Defined using 'async def' syntax?
"is_async_generator", # Is an async def generator?
"is_awaitable_coroutine", # Decorated with '@{typing,asyncio}.coroutine'?
"expanded", # Variants of function with type variables with values expanded
)
__deletable__ = ("arguments", "max_pos", "min_args")
def __init__(
self,
arguments: list[Argument] | None = None,
body: Block | None = None,
typ: mypy.types.FunctionLike | None = None,
type_args: list[TypeParam] | None = None,
) -> None:
super().__init__()
self.arguments = arguments or []
self.arg_names = [None if arg.pos_only else arg.variable.name for arg in self.arguments]
self.arg_kinds: list[ArgKind] = [arg.kind for arg in self.arguments]
self.max_pos: int = self.arg_kinds.count(ARG_POS) + self.arg_kinds.count(ARG_OPT)
self.type_args: list[TypeParam] | None = type_args
self.body: Block = body or Block([])
self.type = typ
self.unanalyzed_type = typ
self.is_overload: bool = False
self.is_generator: bool = False
self.is_coroutine: bool = False
self.is_async_generator: bool = False
self.is_awaitable_coroutine: bool = False
self.expanded: list[FuncItem] = []
self.min_args = 0
for i in range(len(self.arguments)):
if self.arguments[i] is None and i < self.max_fixed_argc():
self.min_args = i + 1
def max_fixed_argc(self) -> int:
return self.max_pos
def is_dynamic(self) -> bool:
return self.type is None
FUNCDEF_FLAGS: Final = FUNCITEM_FLAGS + [
"is_decorated",
"is_conditional",
"is_trivial_body",
"is_mypy_only",
]
# Abstract status of a function
NOT_ABSTRACT: Final = 0
# Explicitly abstract (with @abstractmethod or overload without implementation)
IS_ABSTRACT: Final = 1
# Implicitly abstract: used for functions with trivial bodies defined in Protocols
IMPLICITLY_ABSTRACT: Final = 2
class FuncDef(FuncItem, SymbolNode, Statement):
"""Function definition.
This is a non-lambda function defined using 'def'.
"""
__slots__ = (
"_name",
"is_decorated",
"is_conditional",
"abstract_status",
"original_def",
"deco_line",
"is_trivial_body",
"is_mypy_only",
# Present only when a function is decorated with @typing.datasclass_transform or similar
"dataclass_transform_spec",
"docstring",
)
__match_args__ = ("name", "arguments", "type", "body")
# Note that all __init__ args must have default values
def __init__(
self,
name: str = "", # Function name
arguments: list[Argument] | None = None,
body: Block | None = None,
typ: mypy.types.FunctionLike | None = None,
type_args: list[TypeParam] | None = None,
) -> None:
super().__init__(arguments, body, typ, type_args)
self._name = name
self.is_decorated = False
self.is_conditional = False # Defined conditionally (within block)?
self.abstract_status = NOT_ABSTRACT
# Is this an abstract method with trivial body?
# Such methods can't be called via super().
self.is_trivial_body = False
# Original conditional definition
self.original_def: None | FuncDef | Var | Decorator = None
# Used for error reporting (to keep backward compatibility with pre-3.8)
self.deco_line: int | None = None
# Definitions that appear in if TYPE_CHECKING are marked with this flag.
self.is_mypy_only = False
self.dataclass_transform_spec: DataclassTransformSpec | None = None
self.docstring: str | None = None
@property
def name(self) -> str:
return self._name
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_func_def(self)
def serialize(self) -> JsonDict:
# We're deliberating omitting arguments and storing only arg_names and
# arg_kinds for space-saving reasons (arguments is not used in later
# stages of mypy).
# TODO: After a FuncDef is deserialized, the only time we use `arg_names`
# and `arg_kinds` is when `type` is None and we need to infer a type. Can
# we store the inferred type ahead of time?
return {
".class": "FuncDef",
"name": self._name,
"fullname": self._fullname,
"arg_names": self.arg_names,
"arg_kinds": [int(x.value) for x in self.arg_kinds],
"type": None if self.type is None else self.type.serialize(),
"flags": get_flags(self, FUNCDEF_FLAGS),
"abstract_status": self.abstract_status,
# TODO: Do we need expanded, original_def?
"dataclass_transform_spec": (
None
if self.dataclass_transform_spec is None
else self.dataclass_transform_spec.serialize()
),
}
@classmethod
def deserialize(cls, data: JsonDict) -> FuncDef:
assert data[".class"] == "FuncDef"
body = Block([])
ret = FuncDef(
data["name"],
[],
body,
(
None
if data["type"] is None
else cast(mypy.types.FunctionLike, mypy.types.deserialize_type(data["type"]))
),
)
ret._fullname = data["fullname"]
set_flags(ret, data["flags"])
# NOTE: ret.info is set in the fixup phase.
ret.arg_names = data["arg_names"]
ret.arg_kinds = [ArgKind(x) for x in data["arg_kinds"]]
ret.abstract_status = data["abstract_status"]
ret.dataclass_transform_spec = (
DataclassTransformSpec.deserialize(data["dataclass_transform_spec"])
if data["dataclass_transform_spec"] is not None
else None
)
# Leave these uninitialized so that future uses will trigger an error
del ret.arguments
del ret.max_pos
del ret.min_args
return ret
# All types that are both SymbolNodes and FuncBases. See the FuncBase
# docstring for the rationale.
SYMBOL_FUNCBASE_TYPES = (OverloadedFuncDef, FuncDef)
class Decorator(SymbolNode, Statement):
"""A decorated function.
A single Decorator object can include any number of function decorators.
"""
__slots__ = ("func", "decorators", "original_decorators", "var", "is_overload")
__match_args__ = ("decorators", "var", "func")
func: FuncDef # Decorated function
decorators: list[Expression] # Decorators (may be empty)
# Some decorators are removed by semanal, keep the original here.
original_decorators: list[Expression]
# TODO: This is mostly used for the type; consider replacing with a 'type' attribute
var: Var # Represents the decorated function obj
is_overload: bool
def __init__(self, func: FuncDef, decorators: list[Expression], var: Var) -> None:
super().__init__()
self.func = func
self.decorators = decorators
self.original_decorators = decorators.copy()
self.var = var
self.is_overload = False
@property
def name(self) -> str:
return self.func.name
@property
def fullname(self) -> str:
return self.func.fullname
@property
def is_final(self) -> bool:
return self.func.is_final
@property
def info(self) -> TypeInfo:
return self.func.info
@property
def type(self) -> mypy.types.Type | None:
return self.var.type
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_decorator(self)
def serialize(self) -> JsonDict:
return {
".class": "Decorator",
"func": self.func.serialize(),
"var": self.var.serialize(),
"is_overload": self.is_overload,
}
@classmethod
def deserialize(cls, data: JsonDict) -> Decorator:
assert data[".class"] == "Decorator"
dec = Decorator(FuncDef.deserialize(data["func"]), [], Var.deserialize(data["var"]))
dec.is_overload = data["is_overload"]
return dec
def is_dynamic(self) -> bool:
return self.func.is_dynamic()
VAR_FLAGS: Final = [
"is_self",
"is_cls",
"is_initialized_in_class",
"is_staticmethod",
"is_classmethod",
"is_property",
"is_settable_property",
"is_suppressed_import",
"is_classvar",
"is_abstract_var",
"is_final",
"final_unset_in_class",
"final_set_in_init",
"explicit_self_type",
"is_ready",
"is_inferred",
"invalid_partial_type",
"from_module_getattr",
"has_explicit_value",
"allow_incompatible_override",
]
class Var(SymbolNode):
"""A variable.
It can refer to global/local variable or a data attribute.
"""
__slots__ = (
"_name",
"_fullname",
"info",
"type",
"final_value",
"is_self",
"is_cls",
"is_ready",
"is_inferred",
"is_initialized_in_class",
"is_staticmethod",
"is_classmethod",
"is_property",
"is_settable_property",
"is_classvar",
"is_abstract_var",
"is_final",
"final_unset_in_class",
"final_set_in_init",
"is_suppressed_import",
"explicit_self_type",
"from_module_getattr",
"has_explicit_value",
"allow_incompatible_override",
"invalid_partial_type",
)
__match_args__ = ("name", "type", "final_value")
def __init__(self, name: str, type: mypy.types.Type | None = None) -> None:
super().__init__()
self._name = name # Name without module prefix
# TODO: Should be Optional[str]
self._fullname = "" # Name with module prefix
# TODO: Should be Optional[TypeInfo]
self.info = VAR_NO_INFO
self.type: mypy.types.Type | None = type # Declared or inferred type, or None
# Is this the first argument to an ordinary method (usually "self")?
self.is_self = False
# Is this the first argument to a classmethod (typically "cls")?
self.is_cls = False
self.is_ready = True # If inferred, is the inferred type available?
self.is_inferred = self.type is None
# Is this initialized explicitly to a non-None value in class body?
self.is_initialized_in_class = False
self.is_staticmethod = False
self.is_classmethod = False
self.is_property = False
self.is_settable_property = False
self.is_classvar = False
self.is_abstract_var = False
# Set to true when this variable refers to a module we were unable to
# parse for some reason (eg a silenced module)
self.is_suppressed_import = False
# Was this "variable" (rather a constant) defined as Final[...]?
self.is_final = False
# If constant value is a simple literal,
# store the literal value (unboxed) for the benefit of
# tools like mypyc.
self.final_value: int | float | complex | bool | str | None = None
# Where the value was set (only for class attributes)
self.final_unset_in_class = False
self.final_set_in_init = False
# This is True for a variable that was declared on self with an explicit type:
# class C:
# def __init__(self) -> None:
# self.x: int
# This case is important because this defines a new Var, even if there is one
# present in a superclass (without explicit type this doesn't create a new Var).
# See SemanticAnalyzer.analyze_member_lvalue() for details.
self.explicit_self_type = False
# If True, this is an implicit Var created due to module-level __getattr__.
self.from_module_getattr = False
# Var can be created with an explicit value `a = 1` or without one `a: int`,
# we need a way to tell which one is which.
self.has_explicit_value = False
# If True, subclasses can override this with an incompatible type.
self.allow_incompatible_override = False
# If True, this means we didn't manage to infer full type and fall back to
# something like list[Any]. We may decide to not use such types as context.
self.invalid_partial_type = False
@property
def name(self) -> str:
return self._name
@property
def fullname(self) -> str:
return self._fullname
def accept(self, visitor: NodeVisitor[T]) -> T:
return visitor.visit_var(self)
def serialize(self) -> JsonDict:
# TODO: Leave default values out?
# NOTE: Sometimes self.is_ready is False here, but we don't care.
data: JsonDict = {
".class": "Var",
"name": self._name,
"fullname": self._fullname,
"type": None if self.type is None else self.type.serialize(),
"flags": get_flags(self, VAR_FLAGS),
}
if self.final_value is not None:
data["final_value"] = self.final_value
return data
@classmethod
def deserialize(cls, data: JsonDict) -> Var:
assert data[".class"] == "Var"
name = data["name"]
type = None if data["type"] is None else mypy.types.deserialize_type(data["type"])
v = Var(name, type)
v.is_ready = False # Override True default set in __init__
v._fullname = data["fullname"]
set_flags(v, data["flags"])
v.final_value = data.get("final_value")
return v
class ClassDef(Statement):
"""Class definition"""
__slots__ = (
"name",
"_fullname",
"defs",
"type_args",
"type_vars",
"base_type_exprs",
"removed_base_type_exprs",
"info",
"metaclass",
"decorators",
"keywords",
"analyzed",
"has_incompatible_baseclass",
"deco_line",
"docstring",
"removed_statements",
)
__match_args__ = ("name", "defs")
name: str # Name of the class without module prefix
_fullname: str # Fully qualified name of the class
defs: Block
# New-style type parameters (PEP 695), unanalyzed
type_args: list[TypeParam] | None
# Semantically analyzed type parameters (all syntax variants)
type_vars: list[mypy.types.TypeVarLikeType]
# Base class expressions (not semantically analyzed -- can be arbitrary expressions)
base_type_exprs: list[Expression]
# Special base classes like Generic[...] get moved here during semantic analysis
removed_base_type_exprs: list[Expression]
info: TypeInfo # Related TypeInfo
metaclass: Expression | None
decorators: list[Expression]
keywords: dict[str, Expression]
analyzed: Expression | None
has_incompatible_baseclass: bool
# Used by special forms like NamedTuple and TypedDict to store invalid statements
removed_statements: list[Statement]
def __init__(
self,
name: str,
defs: Block,
type_vars: list[mypy.types.TypeVarLikeType] | None = None,
base_type_exprs: list[Expression] | None = None,
metaclass: Expression | None = None,
keywords: list[tuple[str, Expression]] | None = None,
type_args: list[TypeParam] | None = None,
) -> None:
super().__init__()
self.name = name
self._fullname = ""
self.defs = defs
self.type_vars = type_vars or []
self.type_args = type_args
self.base_type_exprs = base_type_exprs or []
self.removed_base_type_exprs = []
self.info = CLASSDEF_NO_INFO
self.metaclass = metaclass
self.decorators = []
self.keywords = dict(keywords) if keywords else {}
self.analyzed = None
self.has_incompatible_baseclass = False
# Used for error reporting (to keep backwad compatibility with pre-3.8)
self.deco_line: int | None = None
self.docstring: str | None = None
self.removed_statements = []
@property
def fullname(self) -> str:
return self._fullname
@fullname.setter
def fullname(self, v: str) -> None:
self._fullname = v
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_class_def(self)
def is_generic(self) -> bool:
return self.info.is_generic()
def serialize(self) -> JsonDict:
# Not serialized: defs, base_type_exprs, metaclass, decorators,
# analyzed (for named tuples etc.)
return {
".class": "ClassDef",
"name": self.name,
"fullname": self.fullname,
"type_vars": [v.serialize() for v in self.type_vars],
}
@classmethod
def deserialize(cls, data: JsonDict) -> ClassDef:
assert data[".class"] == "ClassDef"
res = ClassDef(
data["name"],
Block([]),
# https://github.com/python/mypy/issues/12257
[
cast(mypy.types.TypeVarLikeType, mypy.types.deserialize_type(v))
for v in data["type_vars"]
],
)
res.fullname = data["fullname"]
return res
class GlobalDecl(Statement):
"""Declaration global x, y, ..."""
__slots__ = ("names",)
__match_args__ = ("names",)
names: list[str]
def __init__(self, names: list[str]) -> None:
super().__init__()
self.names = names
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_global_decl(self)
class NonlocalDecl(Statement):
"""Declaration nonlocal x, y, ..."""
__slots__ = ("names",)
__match_args__ = ("names",)
names: list[str]
def __init__(self, names: list[str]) -> None:
super().__init__()
self.names = names
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_nonlocal_decl(self)
class Block(Statement):
__slots__ = ("body", "is_unreachable")
__match_args__ = ("body", "is_unreachable")
def __init__(self, body: list[Statement]) -> None:
super().__init__()
self.body = body
# True if we can determine that this block is not executed during semantic
# analysis. For example, this applies to blocks that are protected by
# something like "if PY3:" when using Python 2. However, some code is
# only considered unreachable during type checking and this is not true
# in those cases.
self.is_unreachable = False
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_block(self)
# Statements
class ExpressionStmt(Statement):
"""An expression as a statement, such as print(s)."""
__slots__ = ("expr",)
__match_args__ = ("expr",)
expr: Expression
def __init__(self, expr: Expression) -> None:
super().__init__()
self.expr = expr
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_expression_stmt(self)
class AssignmentStmt(Statement):
"""Assignment statement.
The same node class is used for single assignment, multiple assignment
(e.g. x, y = z) and chained assignment (e.g. x = y = z), assignments
that define new names, and assignments with explicit types ("# type: t"
or "x: t [= ...]").
An lvalue can be NameExpr, TupleExpr, ListExpr, MemberExpr, or IndexExpr.
"""
__slots__ = (
"lvalues",
"rvalue",
"type",
"unanalyzed_type",
"new_syntax",
"is_alias_def",
"is_final_def",
"invalid_recursive_alias",
)
__match_args__ = ("lvalues", "rvalues", "type")
lvalues: list[Lvalue]
# This is a TempNode if and only if no rvalue (x: t).
rvalue: Expression
# Declared type in a comment, may be None.
type: mypy.types.Type | None
# Original, not semantically analyzed type in annotation (used for reprocessing)
unanalyzed_type: mypy.types.Type | None
# This indicates usage of PEP 526 type annotation syntax in assignment.
new_syntax: bool
# Does this assignment define a type alias?
is_alias_def: bool
# Is this a final definition?
# Final attributes can't be re-assigned once set, and can't be overridden
# in a subclass. This flag is not set if an attempted declaration was found to
# be invalid during semantic analysis. It is still set to `True` if
# a final declaration overrides another final declaration (this is checked
# during type checking when MROs are known).
is_final_def: bool
# Stop further processing of this assignment, to prevent flipping back and forth
# during semantic analysis passes.
invalid_recursive_alias: bool
def __init__(
self,
lvalues: list[Lvalue],
rvalue: Expression,
type: mypy.types.Type | None = None,
new_syntax: bool = False,
) -> None:
super().__init__()
self.lvalues = lvalues
self.rvalue = rvalue
self.type = type
self.unanalyzed_type = type
self.new_syntax = new_syntax
self.is_alias_def = False
self.is_final_def = False
self.invalid_recursive_alias = False
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_assignment_stmt(self)
class OperatorAssignmentStmt(Statement):
"""Operator assignment statement such as x += 1"""
__slots__ = ("op", "lvalue", "rvalue")
__match_args__ = ("lvalue", "op", "rvalue")
op: str # TODO: Enum?
lvalue: Lvalue
rvalue: Expression
def __init__(self, op: str, lvalue: Lvalue, rvalue: Expression) -> None:
super().__init__()
self.op = op
self.lvalue = lvalue
self.rvalue = rvalue
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_operator_assignment_stmt(self)
class WhileStmt(Statement):
__slots__ = ("expr", "body", "else_body")
__match_args__ = ("expr", "body", "else_body")
expr: Expression
body: Block
else_body: Block | None
def __init__(self, expr: Expression, body: Block, else_body: Block | None) -> None:
super().__init__()
self.expr = expr
self.body = body
self.else_body = else_body
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_while_stmt(self)
class ForStmt(Statement):
__slots__ = (
"index",
"index_type",
"unanalyzed_index_type",
"inferred_item_type",
"inferred_iterator_type",
"expr",
"body",
"else_body",
"is_async",
)
__match_args__ = ("index", "index_type", "expr", "body", "else_body")
# Index variables
index: Lvalue
# Type given by type comments for index, can be None
index_type: mypy.types.Type | None
# Original, not semantically analyzed type in annotation (used for reprocessing)
unanalyzed_index_type: mypy.types.Type | None
# Inferred iterable item type
inferred_item_type: mypy.types.Type | None
# Inferred iterator type
inferred_iterator_type: mypy.types.Type | None
# Expression to iterate
expr: Expression
body: Block
else_body: Block | None
is_async: bool # True if `async for ...` (PEP 492, Python 3.5)
def __init__(
self,
index: Lvalue,
expr: Expression,
body: Block,
else_body: Block | None,
index_type: mypy.types.Type | None = None,
) -> None:
super().__init__()
self.index = index
self.index_type = index_type
self.unanalyzed_index_type = index_type
self.inferred_item_type = None
self.inferred_iterator_type = None
self.expr = expr
self.body = body
self.else_body = else_body
self.is_async = False
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_for_stmt(self)
class ReturnStmt(Statement):
__slots__ = ("expr",)
__match_args__ = ("expr",)
expr: Expression | None
def __init__(self, expr: Expression | None) -> None:
super().__init__()
self.expr = expr
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_return_stmt(self)
class AssertStmt(Statement):
__slots__ = ("expr", "msg")
__match_args__ = ("expr", "msg")
expr: Expression
msg: Expression | None
def __init__(self, expr: Expression, msg: Expression | None = None) -> None:
super().__init__()
self.expr = expr
self.msg = msg
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_assert_stmt(self)
class DelStmt(Statement):
__slots__ = ("expr",)
__match_args__ = ("expr",)
expr: Lvalue
def __init__(self, expr: Lvalue) -> None:
super().__init__()
self.expr = expr
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_del_stmt(self)
class BreakStmt(Statement):
__slots__ = ()
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_break_stmt(self)
class ContinueStmt(Statement):
__slots__ = ()
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_continue_stmt(self)
class PassStmt(Statement):
__slots__ = ()
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_pass_stmt(self)
class IfStmt(Statement):
__slots__ = ("expr", "body", "else_body")
__match_args__ = ("expr", "body", "else_body")
expr: list[Expression]
body: list[Block]
else_body: Block | None
def __init__(self, expr: list[Expression], body: list[Block], else_body: Block | None) -> None:
super().__init__()
self.expr = expr
self.body = body
self.else_body = else_body
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_if_stmt(self)
class RaiseStmt(Statement):
__slots__ = ("expr", "from_expr")
__match_args__ = ("expr", "from_expr")
# Plain 'raise' is a valid statement.
expr: Expression | None
from_expr: Expression | None
def __init__(self, expr: Expression | None, from_expr: Expression | None) -> None:
super().__init__()
self.expr = expr
self.from_expr = from_expr
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_raise_stmt(self)
class TryStmt(Statement):
__slots__ = ("body", "types", "vars", "handlers", "else_body", "finally_body", "is_star")
__match_args__ = ("body", "types", "vars", "handlers", "else_body", "finally_body", "is_star")
body: Block # Try body
# Plain 'except:' also possible
types: list[Expression | None] # Except type expressions
vars: list[NameExpr | None] # Except variable names
handlers: list[Block] # Except bodies
else_body: Block | None
finally_body: Block | None
# Whether this is try ... except* (added in Python 3.11)
is_star: bool
def __init__(
self,
body: Block,
vars: list[NameExpr | None],
types: list[Expression | None],
handlers: list[Block],
else_body: Block | None,
finally_body: Block | None,
) -> None:
super().__init__()
self.body = body
self.vars = vars
self.types = types
self.handlers = handlers
self.else_body = else_body
self.finally_body = finally_body
self.is_star = False
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_try_stmt(self)
class WithStmt(Statement):
__slots__ = ("expr", "target", "unanalyzed_type", "analyzed_types", "body", "is_async")
__match_args__ = ("expr", "target", "body")
expr: list[Expression]
target: list[Lvalue | None]
# Type given by type comments for target, can be None
unanalyzed_type: mypy.types.Type | None
# Semantically analyzed types from type comment (TypeList type expanded)
analyzed_types: list[mypy.types.Type]
body: Block
is_async: bool # True if `async with ...` (PEP 492, Python 3.5)
def __init__(
self,
expr: list[Expression],
target: list[Lvalue | None],
body: Block,
target_type: mypy.types.Type | None = None,
) -> None:
super().__init__()
self.expr = expr
self.target = target
self.unanalyzed_type = target_type
self.analyzed_types = []
self.body = body
self.is_async = False
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_with_stmt(self)
class MatchStmt(Statement):
__slots__ = ("subject", "patterns", "guards", "bodies")
__match_args__ = ("subject", "patterns", "guards", "bodies")
subject: Expression
patterns: list[Pattern]
guards: list[Expression | None]
bodies: list[Block]
def __init__(
self,
subject: Expression,
patterns: list[Pattern],
guards: list[Expression | None],
bodies: list[Block],
) -> None:
super().__init__()
assert len(patterns) == len(guards) == len(bodies)
self.subject = subject
self.patterns = patterns
self.guards = guards
self.bodies = bodies
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_match_stmt(self)
class TypeAliasStmt(Statement):
__slots__ = ("name", "type_args", "value", "invalid_recursive_alias")
__match_args__ = ("name", "type_args", "value")
name: NameExpr
type_args: list[TypeParam]
value: LambdaExpr # Return value will get translated into a type
invalid_recursive_alias: bool
def __init__(self, name: NameExpr, type_args: list[TypeParam], value: LambdaExpr) -> None:
super().__init__()
self.name = name
self.type_args = type_args
self.value = value
self.invalid_recursive_alias = False
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_type_alias_stmt(self)
# Expressions
class IntExpr(Expression):
"""Integer literal"""
__slots__ = ("value",)
__match_args__ = ("value",)
value: int # 0 by default
def __init__(self, value: int) -> None:
super().__init__()
self.value = value
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_int_expr(self)
# How mypy uses StrExpr and BytesExpr:
#
# b'x' -> BytesExpr
# 'x', u'x' -> StrExpr
class StrExpr(Expression):
"""String literal"""
__slots__ = ("value",)
__match_args__ = ("value",)
value: str # '' by default
def __init__(self, value: str) -> None:
super().__init__()
self.value = value
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_str_expr(self)
def is_StrExpr_list(seq: list[Expression]) -> TypeGuard[list[StrExpr]]:
return all(isinstance(item, StrExpr) for item in seq)
class BytesExpr(Expression):
"""Bytes literal"""
__slots__ = ("value",)
__match_args__ = ("value",)
# Note: we deliberately do NOT use bytes here because it ends up
# unnecessarily complicating a lot of the result logic. For example,
# we'd have to worry about converting the bytes into a format we can
# easily serialize/deserialize to and from JSON, would have to worry
# about turning the bytes into a human-readable representation in
# error messages...
#
# It's more convenient to just store the human-readable representation
# from the very start.
value: str
def __init__(self, value: str) -> None:
super().__init__()
self.value = value
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_bytes_expr(self)
class FloatExpr(Expression):
"""Float literal"""
__slots__ = ("value",)
__match_args__ = ("value",)
value: float # 0.0 by default
def __init__(self, value: float) -> None:
super().__init__()
self.value = value
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_float_expr(self)
class ComplexExpr(Expression):
"""Complex literal"""
__slots__ = ("value",)
__match_args__ = ("value",)
value: complex
def __init__(self, value: complex) -> None:
super().__init__()
self.value = value
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_complex_expr(self)
class EllipsisExpr(Expression):
"""Ellipsis (...)"""
__slots__ = ()
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_ellipsis(self)
class StarExpr(Expression):
"""Star expression"""
__slots__ = ("expr", "valid")
__match_args__ = ("expr", "valid")
expr: Expression
valid: bool
def __init__(self, expr: Expression) -> None:
super().__init__()
self.expr = expr
# Whether this starred expression is used in a tuple/list and as lvalue
self.valid = False
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_star_expr(self)
class RefExpr(Expression):
"""Abstract base class for name-like constructs"""
__slots__ = (
"kind",
"node",
"_fullname",
"is_new_def",
"is_inferred_def",
"is_alias_rvalue",
"type_guard",
"type_is",
)
def __init__(self) -> None:
super().__init__()
# LDEF/GDEF/MDEF/... (None if not available)
self.kind: int | None = None
# Var, FuncDef or TypeInfo that describes this
self.node: SymbolNode | None = None
# Fully qualified name (or name if not global)
self._fullname = ""
# Does this define a new name?
self.is_new_def = False
# Does this define a new name with inferred type?
#
# For members, after semantic analysis, this does not take base
# classes into consideration at all; the type checker deals with these.
self.is_inferred_def = False
# Is this expression appears as an rvalue of a valid type alias definition?
self.is_alias_rvalue = False
# Cache type guard from callable_type.type_guard
self.type_guard: mypy.types.Type | None = None
# And same for TypeIs
self.type_is: mypy.types.Type | None = None
@property
def fullname(self) -> str:
return self._fullname
@fullname.setter
def fullname(self, v: str) -> None:
self._fullname = v
class NameExpr(RefExpr):
"""Name expression
This refers to a local name, global name or a module.
"""
__slots__ = ("name", "is_special_form")
__match_args__ = ("name", "node")
def __init__(self, name: str) -> None:
super().__init__()
self.name = name # Name referred to
# Is this a l.h.s. of a special form assignment like typed dict or type variable?
self.is_special_form = False
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_name_expr(self)
def serialize(self) -> JsonDict:
assert False, f"Serializing NameExpr: {self}"
class MemberExpr(RefExpr):
"""Member access expression x.y"""
__slots__ = ("expr", "name", "def_var")
__match_args__ = ("expr", "name", "node")
def __init__(self, expr: Expression, name: str) -> None:
super().__init__()
self.expr = expr
self.name = name
# The variable node related to a definition through 'self.x = <initializer>'.
# The nodes of other kinds of member expressions are resolved during type checking.
self.def_var: Var | None = None
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_member_expr(self)
# Kinds of arguments
@unique
class ArgKind(Enum):
# Positional argument
ARG_POS = 0
# Positional, optional argument (functions only, not calls)
ARG_OPT = 1
# *arg argument
ARG_STAR = 2
# Keyword argument x=y in call, or keyword-only function arg
ARG_NAMED = 3
# **arg argument
ARG_STAR2 = 4
# In an argument list, keyword-only and also optional
ARG_NAMED_OPT = 5
def is_positional(self, star: bool = False) -> bool:
return self == ARG_POS or self == ARG_OPT or (star and self == ARG_STAR)
def is_named(self, star: bool = False) -> bool:
return self == ARG_NAMED or self == ARG_NAMED_OPT or (star and self == ARG_STAR2)
def is_required(self) -> bool:
return self == ARG_POS or self == ARG_NAMED
def is_optional(self) -> bool:
return self == ARG_OPT or self == ARG_NAMED_OPT
def is_star(self) -> bool:
return self == ARG_STAR or self == ARG_STAR2
ARG_POS: Final = ArgKind.ARG_POS
ARG_OPT: Final = ArgKind.ARG_OPT
ARG_STAR: Final = ArgKind.ARG_STAR
ARG_NAMED: Final = ArgKind.ARG_NAMED
ARG_STAR2: Final = ArgKind.ARG_STAR2
ARG_NAMED_OPT: Final = ArgKind.ARG_NAMED_OPT
class CallExpr(Expression):
"""Call expression.
This can also represent several special forms that are syntactically calls
such as cast(...) and None # type: ....
"""
__slots__ = ("callee", "args", "arg_kinds", "arg_names", "analyzed")
__match_args__ = ("callee", "args", "arg_kinds", "arg_names")
def __init__(
self,
callee: Expression,
args: list[Expression],
arg_kinds: list[ArgKind],
arg_names: list[str | None],
analyzed: Expression | None = None,
) -> None:
super().__init__()
if not arg_names:
arg_names = [None] * len(args)
self.callee = callee
self.args = args
self.arg_kinds = arg_kinds # ARG_ constants
# Each name can be None if not a keyword argument.
self.arg_names: list[str | None] = arg_names
# If not None, the node that represents the meaning of the CallExpr. For
# cast(...) this is a CastExpr.
self.analyzed = analyzed
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_call_expr(self)
class YieldFromExpr(Expression):
__slots__ = ("expr",)
__match_args__ = ("expr",)
expr: Expression
def __init__(self, expr: Expression) -> None:
super().__init__()
self.expr = expr
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_yield_from_expr(self)
class YieldExpr(Expression):
__slots__ = ("expr",)
__match_args__ = ("expr",)
expr: Expression | None
def __init__(self, expr: Expression | None) -> None:
super().__init__()
self.expr = expr
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_yield_expr(self)
class IndexExpr(Expression):
"""Index expression x[y].
Also wraps type application such as List[int] as a special form.
"""
__slots__ = ("base", "index", "method_type", "analyzed")
__match_args__ = ("base", "index")
base: Expression
index: Expression
# Inferred __getitem__ method type
method_type: mypy.types.Type | None
# If not None, this is actually semantically a type application
# Class[type, ...] or a type alias initializer.
analyzed: TypeApplication | TypeAliasExpr | None
def __init__(self, base: Expression, index: Expression) -> None:
super().__init__()
self.base = base
self.index = index
self.method_type = None
self.analyzed = None
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_index_expr(self)
class UnaryExpr(Expression):
"""Unary operation"""
__slots__ = ("op", "expr", "method_type")
__match_args__ = ("op", "expr")
op: str # TODO: Enum?
expr: Expression
# Inferred operator method type
method_type: mypy.types.Type | None
def __init__(self, op: str, expr: Expression) -> None:
super().__init__()
self.op = op
self.expr = expr
self.method_type = None
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_unary_expr(self)
class AssignmentExpr(Expression):
"""Assignment expressions in Python 3.8+, like "a := 2"."""
__slots__ = ("target", "value")
__match_args__ = ("target", "value")
def __init__(self, target: Expression, value: Expression) -> None:
super().__init__()
self.target = target
self.value = value
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_assignment_expr(self)
class OpExpr(Expression):
"""Binary operation.
The dot (.), [] and comparison operators have more specific nodes.
"""
__slots__ = (
"op",
"left",
"right",
"method_type",
"right_always",
"right_unreachable",
"analyzed",
)
__match_args__ = ("left", "op", "right")
op: str # TODO: Enum?
left: Expression
right: Expression
# Inferred type for the operator method type (when relevant).
method_type: mypy.types.Type | None
# Per static analysis only: Is the right side going to be evaluated every time?
right_always: bool
# Per static analysis only: Is the right side unreachable?
right_unreachable: bool
# Used for expressions that represent a type "X | Y" in some contexts
analyzed: TypeAliasExpr | None
def __init__(
self, op: str, left: Expression, right: Expression, analyzed: TypeAliasExpr | None = None
) -> None:
super().__init__()
self.op = op
self.left = left
self.right = right
self.method_type = None
self.right_always = False
self.right_unreachable = False
self.analyzed = analyzed
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_op_expr(self)
class ComparisonExpr(Expression):
"""Comparison expression (e.g. a < b > c < d)."""
__slots__ = ("operators", "operands", "method_types")
__match_args__ = ("operands", "operators")
operators: list[str]
operands: list[Expression]
# Inferred type for the operator methods (when relevant; None for 'is').
method_types: list[mypy.types.Type | None]
def __init__(self, operators: list[str], operands: list[Expression]) -> None:
super().__init__()
self.operators = operators
self.operands = operands
self.method_types = []
def pairwise(self) -> Iterator[tuple[str, Expression, Expression]]:
"""If this comparison expr is "a < b is c == d", yields the sequence
("<", a, b), ("is", b, c), ("==", c, d)
"""
for i, operator in enumerate(self.operators):
yield operator, self.operands[i], self.operands[i + 1]
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_comparison_expr(self)
class SliceExpr(Expression):
"""Slice expression (e.g. 'x:y', 'x:', '::2' or ':').
This is only valid as index in index expressions.
"""
__slots__ = ("begin_index", "end_index", "stride")
__match_args__ = ("begin_index", "end_index", "stride")
begin_index: Expression | None
end_index: Expression | None
stride: Expression | None
def __init__(
self,
begin_index: Expression | None,
end_index: Expression | None,
stride: Expression | None,
) -> None:
super().__init__()
self.begin_index = begin_index
self.end_index = end_index
self.stride = stride
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_slice_expr(self)
class CastExpr(Expression):
"""Cast expression cast(type, expr)."""
__slots__ = ("expr", "type")
__match_args__ = ("expr", "type")
expr: Expression
type: mypy.types.Type
def __init__(self, expr: Expression, typ: mypy.types.Type) -> None:
super().__init__()
self.expr = expr
self.type = typ
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_cast_expr(self)
class AssertTypeExpr(Expression):
"""Represents a typing.assert_type(expr, type) call."""
__slots__ = ("expr", "type")
__match_args__ = ("expr", "type")
expr: Expression
type: mypy.types.Type
def __init__(self, expr: Expression, typ: mypy.types.Type) -> None:
super().__init__()
self.expr = expr
self.type = typ
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_assert_type_expr(self)
class RevealExpr(Expression):
"""Reveal type expression reveal_type(expr) or reveal_locals() expression."""
__slots__ = ("expr", "kind", "local_nodes", "is_imported")
__match_args__ = ("expr", "kind", "local_nodes", "is_imported")
expr: Expression | None
kind: int
local_nodes: list[Var] | None
def __init__(
self,
kind: int,
expr: Expression | None = None,
local_nodes: list[Var] | None = None,
is_imported: bool = False,
) -> None:
super().__init__()
self.expr = expr
self.kind = kind
self.local_nodes = local_nodes
self.is_imported = is_imported
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_reveal_expr(self)
class SuperExpr(Expression):
"""Expression super().name"""
__slots__ = ("name", "info", "call")
__match_args__ = ("name", "call", "info")
name: str
info: TypeInfo | None # Type that contains this super expression
call: CallExpr # The expression super(...)
def __init__(self, name: str, call: CallExpr) -> None:
super().__init__()
self.name = name
self.call = call
self.info = None
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_super_expr(self)
class LambdaExpr(FuncItem, Expression):
"""Lambda expression"""
__match_args__ = ("arguments", "arg_names", "arg_kinds", "body")
@property
def name(self) -> str:
return LAMBDA_NAME
def expr(self) -> Expression:
"""Return the expression (the body) of the lambda."""
ret = self.body.body[-1]
assert isinstance(ret, ReturnStmt)
expr = ret.expr
assert expr is not None # lambda can't have empty body
return expr
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_lambda_expr(self)
def is_dynamic(self) -> bool:
return False
class ListExpr(Expression):
"""List literal expression [...]."""
__slots__ = ("items",)
__match_args__ = ("items",)
items: list[Expression]
def __init__(self, items: list[Expression]) -> None:
super().__init__()
self.items = items
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_list_expr(self)
class DictExpr(Expression):
"""Dictionary literal expression {key: value, ...}."""
__slots__ = ("items",)
__match_args__ = ("items",)
items: list[tuple[Expression | None, Expression]]
def __init__(self, items: list[tuple[Expression | None, Expression]]) -> None:
super().__init__()
self.items = items
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_dict_expr(self)
class TupleExpr(Expression):
"""Tuple literal expression (..., ...)
Also lvalue sequences (..., ...) and [..., ...]"""
__slots__ = ("items",)
__match_args__ = ("items",)
items: list[Expression]
def __init__(self, items: list[Expression]) -> None:
super().__init__()
self.items = items
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_tuple_expr(self)
class SetExpr(Expression):
"""Set literal expression {value, ...}."""
__slots__ = ("items",)
__match_args__ = ("items",)
items: list[Expression]
def __init__(self, items: list[Expression]) -> None:
super().__init__()
self.items = items
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_set_expr(self)
class GeneratorExpr(Expression):
"""Generator expression ... for ... in ... [ for ... in ... ] [ if ... ]."""
__slots__ = ("left_expr", "sequences", "condlists", "is_async", "indices")
__match_args__ = ("left_expr", "indices", "sequences", "condlists")
left_expr: Expression
sequences: list[Expression]
condlists: list[list[Expression]]
is_async: list[bool]
indices: list[Lvalue]
def __init__(
self,
left_expr: Expression,
indices: list[Lvalue],
sequences: list[Expression],
condlists: list[list[Expression]],
is_async: list[bool],
) -> None:
super().__init__()
self.left_expr = left_expr
self.sequences = sequences
self.condlists = condlists
self.indices = indices
self.is_async = is_async
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_generator_expr(self)
class ListComprehension(Expression):
"""List comprehension (e.g. [x + 1 for x in a])"""
__slots__ = ("generator",)
__match_args__ = ("generator",)
generator: GeneratorExpr
def __init__(self, generator: GeneratorExpr) -> None:
super().__init__()
self.generator = generator
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_list_comprehension(self)
class SetComprehension(Expression):
"""Set comprehension (e.g. {x + 1 for x in a})"""
__slots__ = ("generator",)
__match_args__ = ("generator",)
generator: GeneratorExpr
def __init__(self, generator: GeneratorExpr) -> None:
super().__init__()
self.generator = generator
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_set_comprehension(self)
class DictionaryComprehension(Expression):
"""Dictionary comprehension (e.g. {k: v for k, v in a}"""
__slots__ = ("key", "value", "sequences", "condlists", "is_async", "indices")
__match_args__ = ("key", "value", "indices", "sequences", "condlists")
key: Expression
value: Expression
sequences: list[Expression]
condlists: list[list[Expression]]
is_async: list[bool]
indices: list[Lvalue]
def __init__(
self,
key: Expression,
value: Expression,
indices: list[Lvalue],
sequences: list[Expression],
condlists: list[list[Expression]],
is_async: list[bool],
) -> None:
super().__init__()
self.key = key
self.value = value
self.sequences = sequences
self.condlists = condlists
self.indices = indices
self.is_async = is_async
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_dictionary_comprehension(self)
class ConditionalExpr(Expression):
"""Conditional expression (e.g. x if y else z)"""
__slots__ = ("cond", "if_expr", "else_expr")
__match_args__ = ("if_expr", "cond", "else_expr")
cond: Expression
if_expr: Expression
else_expr: Expression
def __init__(self, cond: Expression, if_expr: Expression, else_expr: Expression) -> None:
super().__init__()
self.cond = cond
self.if_expr = if_expr
self.else_expr = else_expr
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_conditional_expr(self)
class TypeApplication(Expression):
"""Type application expr[type, ...]"""
__slots__ = ("expr", "types")
__match_args__ = ("expr", "types")
expr: Expression
types: list[mypy.types.Type]
def __init__(self, expr: Expression, types: list[mypy.types.Type]) -> None:
super().__init__()
self.expr = expr
self.types = types
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_type_application(self)
# Variance of a type variable. For example, T in the definition of
# List[T] is invariant, so List[int] is not a subtype of List[object],
# and also List[object] is not a subtype of List[int].
#
# The T in Iterable[T] is covariant, so Iterable[int] is a subtype of
# Iterable[object], but not vice versa.
#
# If T is contravariant in Foo[T], Foo[object] is a subtype of
# Foo[int], but not vice versa.
INVARIANT: Final = 0
COVARIANT: Final = 1
CONTRAVARIANT: Final = 2
VARIANCE_NOT_READY: Final = 3 # Variance hasn't been inferred (using Python 3.12 syntax)
class TypeVarLikeExpr(SymbolNode, Expression):
"""Base class for TypeVarExpr, ParamSpecExpr and TypeVarTupleExpr.
Note that they are constructed by the semantic analyzer.
"""
__slots__ = ("_name", "_fullname", "upper_bound", "default", "variance", "is_new_style")
_name: str
_fullname: str
# Upper bound: only subtypes of upper_bound are valid as values. By default
# this is 'object', meaning no restriction.
upper_bound: mypy.types.Type
# Default: used to resolve the TypeVar if the default is not explicitly given.
# By default this is 'AnyType(TypeOfAny.from_omitted_generics)'. See PEP 696.
default: mypy.types.Type
# Variance of the type variable. Invariant is the default.
# TypeVar(..., covariant=True) defines a covariant type variable.
# TypeVar(..., contravariant=True) defines a contravariant type
# variable.
variance: int
def __init__(
self,
name: str,
fullname: str,
upper_bound: mypy.types.Type,
default: mypy.types.Type,
variance: int = INVARIANT,
is_new_style: bool = False,
line: int = -1,
) -> None:
super().__init__(line=line)
self._name = name
self._fullname = fullname
self.upper_bound = upper_bound
self.default = default
self.variance = variance
self.is_new_style = is_new_style
@property
def name(self) -> str:
return self._name
@property
def fullname(self) -> str:
return self._fullname
class TypeVarExpr(TypeVarLikeExpr):
"""Type variable expression TypeVar(...).
This is also used to represent type variables in symbol tables.
A type variable is not valid as a type unless bound in a TypeVarLikeScope.
That happens within:
1. a generic class that uses the type variable as a type argument or
2. a generic function that refers to the type variable in its signature.
"""
__slots__ = ("values",)
__match_args__ = ("name", "values", "upper_bound", "default")
# Value restriction: only types in the list are valid as values. If the
# list is empty, there is no restriction.
values: list[mypy.types.Type]
def __init__(
self,
name: str,
fullname: str,
values: list[mypy.types.Type],
upper_bound: mypy.types.Type,
default: mypy.types.Type,
variance: int = INVARIANT,
is_new_style: bool = False,
line: int = -1,
) -> None:
super().__init__(name, fullname, upper_bound, default, variance, is_new_style, line=line)
self.values = values
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_type_var_expr(self)
def serialize(self) -> JsonDict:
return {
".class": "TypeVarExpr",
"name": self._name,
"fullname": self._fullname,
"values": [t.serialize() for t in self.values],
"upper_bound": self.upper_bound.serialize(),
"default": self.default.serialize(),
"variance": self.variance,
}
@classmethod
def deserialize(cls, data: JsonDict) -> TypeVarExpr:
assert data[".class"] == "TypeVarExpr"
return TypeVarExpr(
data["name"],
data["fullname"],
[mypy.types.deserialize_type(v) for v in data["values"]],
mypy.types.deserialize_type(data["upper_bound"]),
mypy.types.deserialize_type(data["default"]),
data["variance"],
)
class ParamSpecExpr(TypeVarLikeExpr):
__slots__ = ()
__match_args__ = ("name", "upper_bound", "default")
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_paramspec_expr(self)
def serialize(self) -> JsonDict:
return {
".class": "ParamSpecExpr",
"name": self._name,
"fullname": self._fullname,
"upper_bound": self.upper_bound.serialize(),
"default": self.default.serialize(),
"variance": self.variance,
}
@classmethod
def deserialize(cls, data: JsonDict) -> ParamSpecExpr:
assert data[".class"] == "ParamSpecExpr"
return ParamSpecExpr(
data["name"],
data["fullname"],
mypy.types.deserialize_type(data["upper_bound"]),
mypy.types.deserialize_type(data["default"]),
data["variance"],
)
class TypeVarTupleExpr(TypeVarLikeExpr):
"""Type variable tuple expression TypeVarTuple(...)."""
__slots__ = "tuple_fallback"
tuple_fallback: mypy.types.Instance
__match_args__ = ("name", "upper_bound", "default")
def __init__(
self,
name: str,
fullname: str,
upper_bound: mypy.types.Type,
tuple_fallback: mypy.types.Instance,
default: mypy.types.Type,
variance: int = INVARIANT,
is_new_style: bool = False,
line: int = -1,
) -> None:
super().__init__(name, fullname, upper_bound, default, variance, is_new_style, line=line)
self.tuple_fallback = tuple_fallback
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_type_var_tuple_expr(self)
def serialize(self) -> JsonDict:
return {
".class": "TypeVarTupleExpr",
"name": self._name,
"fullname": self._fullname,
"upper_bound": self.upper_bound.serialize(),
"tuple_fallback": self.tuple_fallback.serialize(),
"default": self.default.serialize(),
"variance": self.variance,
}
@classmethod
def deserialize(cls, data: JsonDict) -> TypeVarTupleExpr:
assert data[".class"] == "TypeVarTupleExpr"
return TypeVarTupleExpr(
data["name"],
data["fullname"],
mypy.types.deserialize_type(data["upper_bound"]),
mypy.types.Instance.deserialize(data["tuple_fallback"]),
mypy.types.deserialize_type(data["default"]),
data["variance"],
)
class TypeAliasExpr(Expression):
"""Type alias expression (rvalue)."""
__slots__ = ("node",)
__match_args__ = ("node",)
node: TypeAlias
def __init__(self, node: TypeAlias) -> None:
super().__init__()
self.node = node
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_type_alias_expr(self)
class NamedTupleExpr(Expression):
"""Named tuple expression namedtuple(...) or NamedTuple(...)."""
__slots__ = ("info", "is_typed")
__match_args__ = ("info",)
# The class representation of this named tuple (its tuple_type attribute contains
# the tuple item types)
info: TypeInfo
is_typed: bool # whether this class was created with typing(_extensions).NamedTuple
def __init__(self, info: TypeInfo, is_typed: bool = False) -> None:
super().__init__()
self.info = info
self.is_typed = is_typed
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_namedtuple_expr(self)
class TypedDictExpr(Expression):
"""Typed dict expression TypedDict(...)."""
__slots__ = ("info",)
__match_args__ = ("info",)
# The class representation of this typed dict
info: TypeInfo
def __init__(self, info: TypeInfo) -> None:
super().__init__()
self.info = info
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_typeddict_expr(self)
class EnumCallExpr(Expression):
"""Named tuple expression Enum('name', 'val1 val2 ...')."""
__slots__ = ("info", "items", "values")
__match_args__ = ("info", "items", "values")
# The class representation of this enumerated type
info: TypeInfo
# The item names (for debugging)
items: list[str]
values: list[Expression | None]
def __init__(self, info: TypeInfo, items: list[str], values: list[Expression | None]) -> None:
super().__init__()
self.info = info
self.items = items
self.values = values
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_enum_call_expr(self)
class PromoteExpr(Expression):
"""Ducktype class decorator expression _promote(...)."""
__slots__ = ("type",)
type: mypy.types.ProperType
def __init__(self, type: mypy.types.ProperType) -> None:
super().__init__()
self.type = type
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit__promote_expr(self)
class NewTypeExpr(Expression):
"""NewType expression NewType(...)."""
__slots__ = ("name", "old_type", "info")
__match_args__ = ("name", "old_type", "info")
name: str
# The base type (the second argument to NewType)
old_type: mypy.types.Type | None
# The synthesized class representing the new type (inherits old_type)
info: TypeInfo | None
def __init__(
self, name: str, old_type: mypy.types.Type | None, line: int, column: int
) -> None:
super().__init__(line=line, column=column)
self.name = name
self.old_type = old_type
self.info = None
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_newtype_expr(self)
class AwaitExpr(Expression):
"""Await expression (await ...)."""
__slots__ = ("expr",)
__match_args__ = ("expr",)
expr: Expression
def __init__(self, expr: Expression) -> None:
super().__init__()
self.expr = expr
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_await_expr(self)
# Constants
class TempNode(Expression):
"""Temporary dummy node used during type checking.
This node is not present in the original program; it is just an artifact
of the type checker implementation. It only represents an opaque node with
some fixed type.
"""
__slots__ = ("type", "no_rhs")
type: mypy.types.Type
# Is this TempNode used to indicate absence of a right hand side in an annotated assignment?
# (e.g. for 'x: int' the rvalue is TempNode(AnyType(TypeOfAny.special_form), no_rhs=True))
no_rhs: bool
def __init__(
self, typ: mypy.types.Type, no_rhs: bool = False, *, context: Context | None = None
) -> None:
"""Construct a dummy node; optionally borrow line/column from context object."""
super().__init__()
self.type = typ
self.no_rhs = no_rhs
if context is not None:
self.line = context.line
self.column = context.column
def __repr__(self) -> str:
return "TempNode:%d(%s)" % (self.line, str(self.type))
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_temp_node(self)
# Special attributes not collected as protocol members by Python 3.12
# See typing._SPECIAL_NAMES
EXCLUDED_PROTOCOL_ATTRIBUTES: Final = frozenset(
{
"__abstractmethods__",
"__annotations__",
"__dict__",
"__doc__",
"__init__",
"__module__",
"__new__",
"__slots__",
"__subclasshook__",
"__weakref__",
"__class_getitem__", # Since Python 3.9
}
)
class TypeInfo(SymbolNode):
"""The type structure of a single class.
Each TypeInfo corresponds one-to-one to a ClassDef, which
represents the AST of the class.
In type-theory terms, this is a "type constructor", and if the
class is generic then it will be a type constructor of higher kind.
Where the class is used in an actual type, it's in the form of an
Instance, which amounts to a type application of the tycon to
the appropriate number of arguments.
"""
__slots__ = (
"_fullname",
"module_name",
"defn",
"mro",
"_mro_refs",
"bad_mro",
"is_final",
"declared_metaclass",
"metaclass_type",
"names",
"is_abstract",
"is_protocol",
"runtime_protocol",
"abstract_attributes",
"deletable_attributes",
"slots",
"assuming",
"assuming_proper",
"inferring",
"is_enum",
"fallback_to_any",
"meta_fallback_to_any",
"type_vars",
"has_param_spec_type",
"bases",
"_promote",
"tuple_type",
"special_alias",
"is_named_tuple",
"typeddict_type",
"is_newtype",
"is_intersection",
"metadata",
"alt_promote",
"has_type_var_tuple_type",
"type_var_tuple_prefix",
"type_var_tuple_suffix",
"self_type",
"dataclass_transform_spec",
"is_type_check_only",
)
_fullname: str # Fully qualified name
# Fully qualified name for the module this type was defined in. This
# information is also in the fullname, but is harder to extract in the
# case of nested class definitions.
module_name: str
defn: ClassDef # Corresponding ClassDef
# Method Resolution Order: the order of looking up attributes. The first
# value always to refers to this class.
mro: list[TypeInfo]
# Used to stash the names of the mro classes temporarily between
# deserialization and fixup. See deserialize() for why.
_mro_refs: list[str] | None
bad_mro: bool # Could not construct full MRO
is_final: bool
declared_metaclass: mypy.types.Instance | None
metaclass_type: mypy.types.Instance | None
names: SymbolTable # Names defined directly in this type
is_abstract: bool # Does the class have any abstract attributes?
is_protocol: bool # Is this a protocol class?
runtime_protocol: bool # Does this protocol support isinstance checks?
# List of names of abstract attributes together with their abstract status.
# The abstract status must be one of `NOT_ABSTRACT`, `IS_ABSTRACT`, `IMPLICITLY_ABSTRACT`.
abstract_attributes: list[tuple[str, int]]
deletable_attributes: list[str] # Used by mypyc only
# Does this type have concrete `__slots__` defined?
# If class does not have `__slots__` defined then it is `None`,
# if it has empty `__slots__` then it is an empty set.
slots: set[str] | None
# The attributes 'assuming' and 'assuming_proper' represent structural subtype matrices.
#
# In languages with structural subtyping, one can keep a global subtype matrix like this:
# . A B C .
# A 1 0 0
# B 1 1 1
# C 1 0 1
# .
# where 1 indicates that the type in corresponding row is a subtype of the type
# in corresponding column. This matrix typically starts filled with all 1's and
# a typechecker tries to "disprove" every subtyping relation using atomic (or nominal) types.
# However, we don't want to keep this huge global state. Instead, we keep the subtype
# information in the form of list of pairs (subtype, supertype) shared by all Instances
# with given supertype's TypeInfo. When we enter a subtype check we push a pair in this list
# thus assuming that we started with 1 in corresponding matrix element. Such algorithm allows
# to treat recursive and mutually recursive protocols and other kinds of complex situations.
#
# If concurrent/parallel type checking will be added in future,
# then there should be one matrix per thread/process to avoid false negatives
# during the type checking phase.
assuming: list[tuple[mypy.types.Instance, mypy.types.Instance]]
assuming_proper: list[tuple[mypy.types.Instance, mypy.types.Instance]]
# Ditto for temporary 'inferring' stack of recursive constraint inference.
# It contains Instances of protocol types that appeared as an argument to
# constraints.infer_constraints(). We need 'inferring' to avoid infinite recursion for
# recursive and mutually recursive protocols.
#
# We make 'assuming' and 'inferring' attributes here instead of passing they as kwargs,
# since this would require to pass them in many dozens of calls. In particular,
# there is a dependency infer_constraint -> is_subtype -> is_callable_subtype ->
# -> infer_constraints.
inferring: list[mypy.types.Instance]
# 'inferring' and 'assuming' can't be made sets, since we need to use
# is_same_type to correctly treat unions.
# Classes inheriting from Enum shadow their true members with a __getattr__, so we
# have to treat them as a special case.
is_enum: bool
# If true, any unknown attributes should have type 'Any' instead
# of generating a type error. This would be true if there is a
# base class with type 'Any', but other use cases may be
# possible. This is similar to having __getattr__ that returns Any
# (and __setattr__), but without the __getattr__ method.
fallback_to_any: bool
# Same as above but for cases where metaclass has type Any. This will suppress
# all attribute errors only for *class object* access.
meta_fallback_to_any: bool
# Information related to type annotations.
# Generic type variable names (full names)
type_vars: list[str]
# Whether this class has a ParamSpec type variable
has_param_spec_type: bool
# Direct base classes.
bases: list[mypy.types.Instance]
# Another type which this type will be treated as a subtype of,
# even though it's not a subclass in Python. The non-standard
# `@_promote` decorator introduces this, and there are also
# several builtin examples, in particular `int` -> `float`.
_promote: list[mypy.types.ProperType]
# This is used for promoting native integer types such as 'i64' to
# 'int'. (_promote is used for the other direction.) This only
# supports one-step promotions (e.g., i64 -> int, not
# i64 -> int -> float, and this isn't used to promote in joins.
#
# This results in some unintuitive results, such as that even
# though i64 is compatible with int and int is compatible with
# float, i64 is *not* compatible with float.
alt_promote: mypy.types.Instance | None
# Representation of a Tuple[...] base class, if the class has any
# (e.g., for named tuples). If this is not None, the actual Type
# object used for this class is not an Instance but a TupleType;
# the corresponding Instance is set as the fallback type of the
# tuple type.
tuple_type: mypy.types.TupleType | None
# Is this a named tuple type?
is_named_tuple: bool
# If this class is defined by the TypedDict type constructor,
# then this is not None.
typeddict_type: mypy.types.TypedDictType | None
# Is this a newtype type?
is_newtype: bool
# Is this a synthesized intersection type?
is_intersection: bool
# This is a dictionary that will be serialized and un-serialized as is.
# It is useful for plugins to add their data to save in the cache.
metadata: dict[str, JsonDict]
# Store type alias representing this type (for named tuples and TypedDicts).
# Although definitions of these types are stored in symbol tables as TypeInfo,
# when a type analyzer will find them, it should construct a TupleType, or
# a TypedDict type. However, we can't use the plain types, since if the definition
# is recursive, this will create an actual recursive structure of types (i.e. as
# internal Python objects) causing infinite recursions everywhere during type checking.
# To overcome this, we create a TypeAlias node, that will point to these types.
# We store this node in the `special_alias` attribute, because it must be the same node
# in case we are doing multiple semantic analysis passes.
special_alias: TypeAlias | None
# Shared type variable for typing.Self in this class (if used, otherwise None).
self_type: mypy.types.TypeVarType | None
# Added if the corresponding class is directly decorated with `typing.dataclass_transform`
dataclass_transform_spec: DataclassTransformSpec | None
# Is set to `True` when class is decorated with `@typing.type_check_only`
is_type_check_only: bool
FLAGS: Final = [
"is_abstract",
"is_enum",
"fallback_to_any",
"meta_fallback_to_any",
"is_named_tuple",
"is_newtype",
"is_protocol",
"runtime_protocol",
"is_final",
"is_intersection",
]
def __init__(self, names: SymbolTable, defn: ClassDef, module_name: str) -> None:
"""Initialize a TypeInfo."""
super().__init__()
self._fullname = defn.fullname
self.names = names
self.defn = defn
self.module_name = module_name
self.type_vars = []
self.has_param_spec_type = False
self.has_type_var_tuple_type = False
self.bases = []
self.mro = []
self._mro_refs = None
self.bad_mro = False
self.declared_metaclass = None
self.metaclass_type = None
self.is_abstract = False
self.abstract_attributes = []
self.deletable_attributes = []
self.slots = None
self.assuming = []
self.assuming_proper = []
self.inferring = []
self.is_protocol = False
self.runtime_protocol = False
self.type_var_tuple_prefix: int | None = None
self.type_var_tuple_suffix: int | None = None
self.add_type_vars()
self.is_final = False
self.is_enum = False
self.fallback_to_any = False
self.meta_fallback_to_any = False
self._promote = []
self.alt_promote = None
self.tuple_type = None
self.special_alias = None
self.is_named_tuple = False
self.typeddict_type = None
self.is_newtype = False
self.is_intersection = False
self.metadata = {}
self.self_type = None
self.dataclass_transform_spec = None
self.is_type_check_only = False
def add_type_vars(self) -> None:
self.has_type_var_tuple_type = False
if self.defn.type_vars:
for i, vd in enumerate(self.defn.type_vars):
if isinstance(vd, mypy.types.ParamSpecType):
self.has_param_spec_type = True
if isinstance(vd, mypy.types.TypeVarTupleType):
assert not self.has_type_var_tuple_type
self.has_type_var_tuple_type = True
self.type_var_tuple_prefix = i
self.type_var_tuple_suffix = len(self.defn.type_vars) - i - 1
self.type_vars.append(vd.name)
@property
def name(self) -> str:
"""Short name."""
return self.defn.name
@property
def fullname(self) -> str:
return self._fullname
def is_generic(self) -> bool:
"""Is the type generic (i.e. does it have type variables)?"""
return len(self.type_vars) > 0
def get(self, name: str) -> SymbolTableNode | None:
for cls in self.mro:
n = cls.names.get(name)
if n:
return n
return None
def get_containing_type_info(self, name: str) -> TypeInfo | None:
for cls in self.mro:
if name in cls.names:
return cls
return None
@property
def protocol_members(self) -> list[str]:
# Protocol members are names of all attributes/methods defined in a protocol
# and in all its supertypes (except for 'object').
members: set[str] = set()
assert self.mro, "This property can be only accessed after MRO is (re-)calculated"
for base in self.mro[:-1]: # we skip "object" since everyone implements it
if base.is_protocol:
for name, node in base.names.items():
if isinstance(node.node, (TypeAlias, TypeVarExpr, MypyFile)):
# These are auxiliary definitions (and type aliases are prohibited).
continue
if name in EXCLUDED_PROTOCOL_ATTRIBUTES:
continue
members.add(name)
return sorted(members)
def __getitem__(self, name: str) -> SymbolTableNode:
n = self.get(name)
if n:
return n
else:
raise KeyError(name)
def __repr__(self) -> str:
return f"<TypeInfo {self.fullname}>"
def __bool__(self) -> bool:
# We defined this here instead of just overriding it in
# FakeInfo so that mypyc can generate a direct call instead of
# using the generic bool handling.
return not isinstance(self, FakeInfo)
def has_readable_member(self, name: str) -> bool:
return self.get(name) is not None
def get_method(self, name: str) -> FuncBase | Decorator | None:
for cls in self.mro:
if name in cls.names:
node = cls.names[name].node
if isinstance(node, FuncBase):
return node
elif isinstance(node, Decorator): # Two `if`s make `mypyc` happy
return node
else:
return None
return None
def calculate_metaclass_type(self) -> mypy.types.Instance | None:
declared = self.declared_metaclass
if declared is not None and not declared.type.has_base("builtins.type"):
return declared
if self._fullname == "builtins.type":
return mypy.types.Instance(self, [])
candidates = [
s.declared_metaclass
for s in self.mro
if s.declared_metaclass is not None and s.declared_metaclass.type is not None
]
for c in candidates:
if all(other.type in c.type.mro for other in candidates):
return c
return None
def is_metaclass(self) -> bool:
return (
self.has_base("builtins.type")
or self.fullname == "abc.ABCMeta"
or self.fallback_to_any
)
def has_base(self, fullname: str) -> bool:
"""Return True if type has a base type with the specified name.
This can be either via extension or via implementation.
"""
for cls in self.mro:
if cls.fullname == fullname:
return True
return False
def direct_base_classes(self) -> list[TypeInfo]:
"""Return a direct base classes.
Omit base classes of other base classes.
"""
return [base.type for base in self.bases]
def update_tuple_type(self, typ: mypy.types.TupleType) -> None:
"""Update tuple_type and special_alias as needed."""
self.tuple_type = typ
alias = TypeAlias.from_tuple_type(self)
if not self.special_alias:
self.special_alias = alias
else:
self.special_alias.target = alias.target
def update_typeddict_type(self, typ: mypy.types.TypedDictType) -> None:
"""Update typeddict_type and special_alias as needed."""
self.typeddict_type = typ
alias = TypeAlias.from_typeddict_type(self)
if not self.special_alias:
self.special_alias = alias
else:
self.special_alias.target = alias.target
def __str__(self) -> str:
"""Return a string representation of the type.
This includes the most important information about the type.
"""
options = Options()
return self.dump(
str_conv=mypy.strconv.StrConv(options=options),
type_str_conv=mypy.types.TypeStrVisitor(options=options),
)
def dump(
self, str_conv: mypy.strconv.StrConv, type_str_conv: mypy.types.TypeStrVisitor
) -> str:
"""Return a string dump of the contents of the TypeInfo."""
base: str = ""
def type_str(typ: mypy.types.Type) -> str:
return typ.accept(type_str_conv)
head = "TypeInfo" + str_conv.format_id(self)
if self.bases:
base = f"Bases({', '.join(type_str(base) for base in self.bases)})"
mro = "Mro({})".format(
", ".join(item.fullname + str_conv.format_id(item) for item in self.mro)
)
names = []
for name in sorted(self.names):
description = name + str_conv.format_id(self.names[name].node)
node = self.names[name].node
if isinstance(node, Var) and node.type:
description += f" ({type_str(node.type)})"
names.append(description)
items = [f"Name({self.fullname})", base, mro, ("Names", names)]
if self.declared_metaclass:
items.append(f"DeclaredMetaclass({type_str(self.declared_metaclass)})")
if self.metaclass_type:
items.append(f"MetaclassType({type_str(self.metaclass_type)})")
return mypy.strconv.dump_tagged(items, head, str_conv=str_conv)
def serialize(self) -> JsonDict:
# NOTE: This is where all ClassDefs originate, so there shouldn't be duplicates.
data = {
".class": "TypeInfo",
"module_name": self.module_name,
"fullname": self.fullname,
"names": self.names.serialize(self.fullname),
"defn": self.defn.serialize(),
"abstract_attributes": self.abstract_attributes,
"type_vars": self.type_vars,
"has_param_spec_type": self.has_param_spec_type,
"bases": [b.serialize() for b in self.bases],
"mro": [c.fullname for c in self.mro],
"_promote": [p.serialize() for p in self._promote],
"alt_promote": None if self.alt_promote is None else self.alt_promote.serialize(),
"declared_metaclass": (
None if self.declared_metaclass is None else self.declared_metaclass.serialize()
),
"metaclass_type": (
None if self.metaclass_type is None else self.metaclass_type.serialize()
),
"tuple_type": None if self.tuple_type is None else self.tuple_type.serialize(),
"typeddict_type": (
None if self.typeddict_type is None else self.typeddict_type.serialize()
),
"flags": get_flags(self, TypeInfo.FLAGS),
"metadata": self.metadata,
"slots": sorted(self.slots) if self.slots is not None else None,
"deletable_attributes": self.deletable_attributes,
"self_type": self.self_type.serialize() if self.self_type is not None else None,
"dataclass_transform_spec": (
self.dataclass_transform_spec.serialize()
if self.dataclass_transform_spec is not None
else None
),
}
return data
@classmethod
def deserialize(cls, data: JsonDict) -> TypeInfo:
names = SymbolTable.deserialize(data["names"])
defn = ClassDef.deserialize(data["defn"])
module_name = data["module_name"]
ti = TypeInfo(names, defn, module_name)
ti._fullname = data["fullname"]
# TODO: Is there a reason to reconstruct ti.subtypes?
ti.abstract_attributes = [(attr[0], attr[1]) for attr in data["abstract_attributes"]]
ti.type_vars = data["type_vars"]
ti.has_param_spec_type = data["has_param_spec_type"]
ti.bases = [mypy.types.Instance.deserialize(b) for b in data["bases"]]
_promote = []
for p in data["_promote"]:
t = mypy.types.deserialize_type(p)
assert isinstance(t, mypy.types.ProperType)
_promote.append(t)
ti._promote = _promote
ti.alt_promote = (
None
if data["alt_promote"] is None
else mypy.types.Instance.deserialize(data["alt_promote"])
)
ti.declared_metaclass = (
None
if data["declared_metaclass"] is None
else mypy.types.Instance.deserialize(data["declared_metaclass"])
)
ti.metaclass_type = (
None
if data["metaclass_type"] is None
else mypy.types.Instance.deserialize(data["metaclass_type"])
)
# NOTE: ti.mro will be set in the fixup phase based on these
# names. The reason we need to store the mro instead of just
# recomputing it from base classes has to do with a subtle
# point about fine-grained incremental: the cache files might
# not be loaded until after a class in the mro has changed its
# bases, which causes the mro to change. If we recomputed our
# mro, we would compute the *new* mro, which leaves us with no
# way to detect that the mro has changed! Thus we need to make
# sure to load the original mro so that once the class is
# rechecked, it can tell that the mro has changed.
ti._mro_refs = data["mro"]
ti.tuple_type = (
None
if data["tuple_type"] is None
else mypy.types.TupleType.deserialize(data["tuple_type"])
)
ti.typeddict_type = (
None
if data["typeddict_type"] is None
else mypy.types.TypedDictType.deserialize(data["typeddict_type"])
)
ti.metadata = data["metadata"]
ti.slots = set(data["slots"]) if data["slots"] is not None else None
ti.deletable_attributes = data["deletable_attributes"]
set_flags(ti, data["flags"])
st = data["self_type"]
ti.self_type = mypy.types.TypeVarType.deserialize(st) if st is not None else None
if data.get("dataclass_transform_spec") is not None:
ti.dataclass_transform_spec = DataclassTransformSpec.deserialize(
data["dataclass_transform_spec"]
)
return ti
class FakeInfo(TypeInfo):
__slots__ = ("msg",)
# types.py defines a single instance of this class, called types.NOT_READY.
# This instance is used as a temporary placeholder in the process of de-serialization
# of 'Instance' types. The de-serialization happens in two steps: In the first step,
# Instance.type is set to NOT_READY. In the second step (in fixup.py) it is replaced by
# an actual TypeInfo. If you see the assertion error below, then most probably something
# went wrong during the second step and an 'Instance' that raised this error was not fixed.
# Note:
# 'None' is not used as a dummy value for two reasons:
# 1. This will require around 80-100 asserts to make 'mypy --strict-optional mypy'
# pass cleanly.
# 2. If NOT_READY value is accidentally used somewhere, it will be obvious where the value
# is from, whereas a 'None' value could come from anywhere.
#
# Additionally, this serves as a more general-purpose placeholder
# for missing TypeInfos in a number of places where the excuses
# for not being Optional are a little weaker.
#
# TypeInfo defines a __bool__ method that returns False for FakeInfo
# so that it can be conveniently tested against in the same way that it
# would be if things were properly optional.
def __init__(self, msg: str) -> None:
self.msg = msg
def __getattribute__(self, attr: str) -> type:
# Handle __class__ so that isinstance still works...
if attr == "__class__":
return object.__getattribute__(self, attr) # type: ignore[no-any-return]
raise AssertionError(object.__getattribute__(self, "msg"))
VAR_NO_INFO: Final[TypeInfo] = FakeInfo("Var is lacking info")
CLASSDEF_NO_INFO: Final[TypeInfo] = FakeInfo("ClassDef is lacking info")
FUNC_NO_INFO: Final[TypeInfo] = FakeInfo("FuncBase for non-methods lack info")
MISSING_FALLBACK: Final = FakeInfo("fallback can't be filled out until semanal")
class TypeAlias(SymbolNode):
"""
A symbol node representing a type alias.
Type alias is a static concept, in contrast to variables with types
like Type[...]. Namely:
* type aliases
- can be used in type context (annotations)
- cannot be re-assigned
* variables with type Type[...]
- cannot be used in type context
- but can be re-assigned
An alias can be defined only by an assignment to a name (not any other lvalues).
Such assignment defines an alias by default. To define a variable,
an explicit Type[...] annotation is required. As an exception,
at non-global scope non-subscripted rvalue creates a variable even without
an annotation. This exception exists to accommodate the common use case of
class-valued attributes. See SemanticAnalyzerPass2.check_and_set_up_type_alias
for details.
Aliases can be generic. We use bound type variables for generic aliases, similar
to classes. Essentially, type aliases work as macros that expand textually.
The definition and expansion rules are following:
1. An alias targeting a generic class without explicit variables act as
the given class (this doesn't apply to TypedDict, Tuple and Callable, which
are not proper classes but special type constructors):
A = List
AA = List[Any]
x: A # same as List[Any]
x: A[int] # same as List[int]
x: AA # same as List[Any]
x: AA[int] # Error!
C = Callable # Same as Callable[..., Any]
T = Tuple # Same as Tuple[Any, ...]
2. An alias using explicit type variables in its rvalue expects
replacements (type arguments) for these variables. If missing, they
are treated as Any, like for other generics:
B = List[Tuple[T, T]]
x: B # same as List[Tuple[Any, Any]]
x: B[int] # same as List[Tuple[int, int]]
def f(x: B[T]) -> T: ... # without T, Any would be used here
3. An alias can be defined using another aliases. In the definition
rvalue the Any substitution doesn't happen for top level unsubscripted
generic classes:
A = List
B = A # here A is expanded to List, _not_ List[Any],
# to match the Python runtime behaviour
x: B[int] # same as List[int]
C = List[A] # this expands to List[List[Any]]
AA = List[T]
D = AA # here AA expands to List[Any]
x: D[int] # Error!
Note: the fact that we support aliases like `A = List` means that the target
type will be initially an instance type with wrong number of type arguments.
Such instances are all fixed either during or after main semantic analysis passes.
We therefore store the difference between `List` and `List[Any]` rvalues (targets)
using the `no_args` flag. See also TypeAliasExpr.no_args.
Meaning of other fields:
target: The target type. For generic aliases contains bound type variables
as nested types (currently TypeVar and ParamSpec are supported).
_fullname: Qualified name of this type alias. This is used in particular
to track fine grained dependencies from aliases.
alias_tvars: Type variables used to define this alias.
normalized: Used to distinguish between `A = List`, and `A = list`. Both
are internally stored using `builtins.list` (because `typing.List` is
itself an alias), while the second cannot be subscripted because of
Python runtime limitation.
line and column: Line and column on the original alias definition.
eager: If True, immediately expand alias when referred to (useful for aliases
within functions that can't be looked up from the symbol table)
"""
__slots__ = (
"target",
"_fullname",
"alias_tvars",
"no_args",
"normalized",
"_is_recursive",
"eager",
"tvar_tuple_index",
"python_3_12_type_alias",
)
__match_args__ = ("name", "target", "alias_tvars", "no_args")
def __init__(
self,
target: mypy.types.Type,
fullname: str,
line: int,
column: int,
*,
alias_tvars: list[mypy.types.TypeVarLikeType] | None = None,
no_args: bool = False,
normalized: bool = False,
eager: bool = False,
python_3_12_type_alias: bool = False,
) -> None:
self._fullname = fullname
self.target = target
if alias_tvars is None:
alias_tvars = []
self.alias_tvars = alias_tvars
self.no_args = no_args
self.normalized = normalized
# This attribute is manipulated by TypeAliasType. If non-None,
# it is the cached value.
self._is_recursive: bool | None = None
self.eager = eager
self.python_3_12_type_alias = python_3_12_type_alias
self.tvar_tuple_index = None
for i, t in enumerate(alias_tvars):
if isinstance(t, mypy.types.TypeVarTupleType):
self.tvar_tuple_index = i
super().__init__(line, column)
@classmethod
def from_tuple_type(cls, info: TypeInfo) -> TypeAlias:
"""Generate an alias to the tuple type described by a given TypeInfo.
NOTE: this doesn't set type alias type variables (for generic tuple types),
they must be set by the caller (when fully analyzed).
"""
assert info.tuple_type
# TODO: is it possible to refactor this to set the correct type vars here?
return TypeAlias(
info.tuple_type.copy_modified(
# Create an Instance similar to fill_typevars().
fallback=mypy.types.Instance(
info, mypy.types.type_vars_as_args(info.defn.type_vars)
)
),
info.fullname,
info.line,
info.column,
)
@classmethod
def from_typeddict_type(cls, info: TypeInfo) -> TypeAlias:
"""Generate an alias to the TypedDict type described by a given TypeInfo.
NOTE: this doesn't set type alias type variables (for generic TypedDicts),
they must be set by the caller (when fully analyzed).
"""
assert info.typeddict_type
# TODO: is it possible to refactor this to set the correct type vars here?
return TypeAlias(
info.typeddict_type.copy_modified(
# Create an Instance similar to fill_typevars().
fallback=mypy.types.Instance(
info, mypy.types.type_vars_as_args(info.defn.type_vars)
)
),
info.fullname,
info.line,
info.column,
)
@property
def name(self) -> str:
return self._fullname.split(".")[-1]
@property
def fullname(self) -> str:
return self._fullname
@property
def has_param_spec_type(self) -> bool:
return any(isinstance(v, mypy.types.ParamSpecType) for v in self.alias_tvars)
def serialize(self) -> JsonDict:
data: JsonDict = {
".class": "TypeAlias",
"fullname": self._fullname,
"target": self.target.serialize(),
"alias_tvars": [v.serialize() for v in self.alias_tvars],
"no_args": self.no_args,
"normalized": self.normalized,
"line": self.line,
"column": self.column,
"python_3_12_type_alias": self.python_3_12_type_alias,
}
return data
def accept(self, visitor: NodeVisitor[T]) -> T:
return visitor.visit_type_alias(self)
@classmethod
def deserialize(cls, data: JsonDict) -> TypeAlias:
assert data[".class"] == "TypeAlias"
fullname = data["fullname"]
alias_tvars = [mypy.types.deserialize_type(v) for v in data["alias_tvars"]]
assert all(isinstance(t, mypy.types.TypeVarLikeType) for t in alias_tvars)
target = mypy.types.deserialize_type(data["target"])
no_args = data["no_args"]
normalized = data["normalized"]
line = data["line"]
column = data["column"]
python_3_12_type_alias = data["python_3_12_type_alias"]
return cls(
target,
fullname,
line,
column,
alias_tvars=cast(List[mypy.types.TypeVarLikeType], alias_tvars),
no_args=no_args,
normalized=normalized,
python_3_12_type_alias=python_3_12_type_alias,
)
class PlaceholderNode(SymbolNode):
"""Temporary symbol node that will later become a real SymbolNode.
These are only present during semantic analysis when using the new
semantic analyzer. These are created if some essential dependencies
of a definition are not yet complete.
A typical use is for names imported from a module which is still
incomplete (within an import cycle):
from m import f # Initially may create PlaceholderNode
This is particularly important if the imported shadows a name from
an enclosing scope or builtins:
from m import int # Placeholder avoids mixups with builtins.int
Another case where this is useful is when there is another definition
or assignment:
from m import f
def f() -> None: ...
In the above example, the presence of PlaceholderNode allows us to
handle the second definition as a redefinition.
They are also used to create PlaceholderType instances for types
that refer to incomplete types. Example:
class C(Sequence[C]): ...
We create a PlaceholderNode (with becomes_typeinfo=True) for C so
that the type C in Sequence[C] can be bound.
Attributes:
fullname: Full name of the PlaceholderNode.
node: AST node that contains the definition that caused this to
be created. This is useful for tracking order of incomplete definitions
and for debugging.
becomes_typeinfo: If True, this refers something that could later
become a TypeInfo. It can't be used with type variables, in
particular, as this would cause issues with class type variable
detection.
The long-term purpose of placeholder nodes/types is to evolve into
something that can support general recursive types.
"""
__slots__ = ("_fullname", "node", "becomes_typeinfo")
def __init__(
self, fullname: str, node: Node, line: int, *, becomes_typeinfo: bool = False
) -> None:
self._fullname = fullname
self.node = node
self.becomes_typeinfo = becomes_typeinfo
self.line = line
@property
def name(self) -> str:
return self._fullname.split(".")[-1]
@property
def fullname(self) -> str:
return self._fullname
def serialize(self) -> JsonDict:
assert False, "PlaceholderNode can't be serialized"
def accept(self, visitor: NodeVisitor[T]) -> T:
return visitor.visit_placeholder_node(self)
class SymbolTableNode:
"""Description of a name binding in a symbol table.
These are only used as values in module (global), function (local)
and class symbol tables (see SymbolTable). The name that is bound is
the key in SymbolTable.
Symbol tables don't contain direct references to AST nodes primarily
because there can be multiple symbol table references to a single
AST node (due to imports and aliases), and different references can
behave differently. This class describes the unique properties of
each reference.
The most fundamental attribute is 'node', which is the AST node that
the name refers to.
The kind is usually one of LDEF, GDEF or MDEF, depending on the scope
of the definition. These three kinds can usually be used
interchangeably and the difference between local, global and class
scopes is mostly descriptive, with no semantic significance.
However, some tools that consume mypy ASTs may care about these so
they should be correct.
Attributes:
node: AST node of definition. Among others, this can be one of
FuncDef, Var, TypeInfo, TypeVarExpr or MypyFile -- or None
for cross_ref that hasn't been fixed up yet.
kind: Kind of node. Possible values:
- LDEF: local definition
- GDEF: global (module-level) definition
- MDEF: class member definition
- UNBOUND_IMPORTED: temporary kind for imported names (we
don't know the final kind yet)
module_public: If False, this name won't be imported via
'from <module> import *'. This has no effect on names within
classes.
module_hidden: If True, the name will be never exported (needed for
stub files)
cross_ref: For deserialized MypyFile nodes, the referenced module
name; for other nodes, optionally the name of the referenced object.
implicit: Was this defined by assignment to self attribute?
plugin_generated: Was this symbol generated by a plugin?
(And therefore needs to be removed in aststrip.)
no_serialize: Do not serialize this node if True. This is used to prevent
keys in the cache that refer to modules on which this file does not
depend. Currently this can happen if there is a module not in build
used e.g. like this:
import a.b.c # type: ignore
This will add a submodule symbol to parent module `a` symbol table,
but `a.b` is _not_ added as its dependency. Therefore, we should
not serialize these symbols as they may not be found during fixup
phase, instead they will be re-added during subsequent patch parents
phase.
TODO: Refactor build.py to make dependency tracking more transparent
and/or refactor look-up functions to not require parent patching.
NOTE: No other attributes should be added to this class unless they
are shared by all node kinds.
"""
__slots__ = (
"kind",
"node",
"module_public",
"module_hidden",
"cross_ref",
"implicit",
"plugin_generated",
"no_serialize",
)
def __init__(
self,
kind: int,
node: SymbolNode | None,
module_public: bool = True,
implicit: bool = False,
module_hidden: bool = False,
*,
plugin_generated: bool = False,
no_serialize: bool = False,
) -> None:
self.kind = kind
self.node = node
self.module_public = module_public
self.implicit = implicit
self.module_hidden = module_hidden
self.cross_ref: str | None = None
self.plugin_generated = plugin_generated
self.no_serialize = no_serialize
@property
def fullname(self) -> str | None:
if self.node is not None:
return self.node.fullname
else:
return None
@property
def type(self) -> mypy.types.Type | None:
node = self.node
if isinstance(node, (Var, SYMBOL_FUNCBASE_TYPES)) and node.type is not None:
return node.type
elif isinstance(node, Decorator):
return node.var.type
else:
return None
def copy(self) -> SymbolTableNode:
new = SymbolTableNode(
self.kind, self.node, self.module_public, self.implicit, self.module_hidden
)
new.cross_ref = self.cross_ref
return new
def __str__(self) -> str:
s = f"{node_kinds[self.kind]}/{short_type(self.node)}"
if isinstance(self.node, SymbolNode):
s += f" ({self.node.fullname})"
# Include declared type of variables and functions.
if self.type is not None:
s += f" : {self.type}"
if self.cross_ref:
s += f" cross_ref:{self.cross_ref}"
return s
def serialize(self, prefix: str, name: str) -> JsonDict:
"""Serialize a SymbolTableNode.
Args:
prefix: full name of the containing module or class; or None
name: name of this object relative to the containing object
"""
data: JsonDict = {".class": "SymbolTableNode", "kind": node_kinds[self.kind]}
if self.module_hidden:
data["module_hidden"] = True
if not self.module_public:
data["module_public"] = False
if self.implicit:
data["implicit"] = True
if self.plugin_generated:
data["plugin_generated"] = True
if isinstance(self.node, MypyFile):
data["cross_ref"] = self.node.fullname
else:
assert self.node is not None, f"{prefix}:{name}"
if prefix is not None:
fullname = self.node.fullname
if (
"." in fullname
and fullname != prefix + "." + name
and not (isinstance(self.node, Var) and self.node.from_module_getattr)
):
assert not isinstance(
self.node, PlaceholderNode
), f"Definition of {fullname} is unexpectedly incomplete"
data["cross_ref"] = fullname
return data
data["node"] = self.node.serialize()
return data
@classmethod
def deserialize(cls, data: JsonDict) -> SymbolTableNode:
assert data[".class"] == "SymbolTableNode"
kind = inverse_node_kinds[data["kind"]]
if "cross_ref" in data:
# This will be fixed up later.
stnode = SymbolTableNode(kind, None)
stnode.cross_ref = data["cross_ref"]
else:
assert "node" in data, data
node = SymbolNode.deserialize(data["node"])
stnode = SymbolTableNode(kind, node)
if "module_hidden" in data:
stnode.module_hidden = data["module_hidden"]
if "module_public" in data:
stnode.module_public = data["module_public"]
if "implicit" in data:
stnode.implicit = data["implicit"]
if "plugin_generated" in data:
stnode.plugin_generated = data["plugin_generated"]
return stnode
class SymbolTable(Dict[str, SymbolTableNode]):
"""Static representation of a namespace dictionary.
This is used for module, class and function namespaces.
"""
__slots__ = ()
def __str__(self) -> str:
a: list[str] = []
for key, value in self.items():
# Filter out the implicit import of builtins.
if isinstance(value, SymbolTableNode):
if (
value.fullname != "builtins"
and (value.fullname or "").split(".")[-1] not in implicit_module_attrs
):
a.append(" " + str(key) + " : " + str(value))
else:
a.append(" <invalid item>")
a = sorted(a)
a.insert(0, "SymbolTable(")
a[-1] += ")"
return "\n".join(a)
def copy(self) -> SymbolTable:
return SymbolTable([(key, node.copy()) for key, node in self.items()])
def serialize(self, fullname: str) -> JsonDict:
data: JsonDict = {".class": "SymbolTable"}
for key, value in self.items():
# Skip __builtins__: it's a reference to the builtins
# module that gets added to every module by
# SemanticAnalyzerPass2.visit_file(), but it shouldn't be
# accessed by users of the module.
if key == "__builtins__" or value.no_serialize:
continue
data[key] = value.serialize(fullname, key)
return data
@classmethod
def deserialize(cls, data: JsonDict) -> SymbolTable:
assert data[".class"] == "SymbolTable"
st = SymbolTable()
for key, value in data.items():
if key != ".class":
st[key] = SymbolTableNode.deserialize(value)
return st
class DataclassTransformSpec:
"""Specifies how a dataclass-like transform should be applied. The fields here are based on the
parameters accepted by `typing.dataclass_transform`."""
__slots__ = (
"eq_default",
"order_default",
"kw_only_default",
"frozen_default",
"field_specifiers",
)
def __init__(
self,
*,
eq_default: bool | None = None,
order_default: bool | None = None,
kw_only_default: bool | None = None,
field_specifiers: tuple[str, ...] | None = None,
# Specified outside of PEP 681:
# frozen_default was added to CPythonin https://github.com/python/cpython/pull/99958 citing
# positive discussion in typing-sig
frozen_default: bool | None = None,
) -> None:
self.eq_default = eq_default if eq_default is not None else True
self.order_default = order_default if order_default is not None else False
self.kw_only_default = kw_only_default if kw_only_default is not None else False
self.frozen_default = frozen_default if frozen_default is not None else False
self.field_specifiers = field_specifiers if field_specifiers is not None else ()
def serialize(self) -> JsonDict:
return {
"eq_default": self.eq_default,
"order_default": self.order_default,
"kw_only_default": self.kw_only_default,
"frozen_default": self.frozen_default,
"field_specifiers": list(self.field_specifiers),
}
@classmethod
def deserialize(cls, data: JsonDict) -> DataclassTransformSpec:
return DataclassTransformSpec(
eq_default=data.get("eq_default"),
order_default=data.get("order_default"),
kw_only_default=data.get("kw_only_default"),
frozen_default=data.get("frozen_default"),
field_specifiers=tuple(data.get("field_specifiers", [])),
)
def get_flags(node: Node, names: list[str]) -> list[str]:
return [name for name in names if getattr(node, name)]
def set_flags(node: Node, flags: list[str]) -> None:
for name in flags:
setattr(node, name, True)
def get_member_expr_fullname(expr: MemberExpr) -> str | None:
"""Return the qualified name representation of a member expression.
Return a string of form foo.bar, foo.bar.baz, or similar, or None if the
argument cannot be represented in this form.
"""
initial: str | None = None
if isinstance(expr.expr, NameExpr):
initial = expr.expr.name
elif isinstance(expr.expr, MemberExpr):
initial = get_member_expr_fullname(expr.expr)
if initial is None:
return None
return f"{initial}.{expr.name}"
deserialize_map: Final = {
key: obj.deserialize
for key, obj in globals().items()
if type(obj) is not FakeInfo
and isinstance(obj, type)
and issubclass(obj, SymbolNode)
and obj is not SymbolNode
}
def check_arg_kinds(
arg_kinds: list[ArgKind], nodes: list[T], fail: Callable[[str, T], None]
) -> None:
is_var_arg = False
is_kw_arg = False
seen_named = False
seen_opt = False
for kind, node in zip(arg_kinds, nodes):
if kind == ARG_POS:
if is_var_arg or is_kw_arg or seen_named or seen_opt:
fail(
"Required positional args may not appear after default, named or var args",
node,
)
break
elif kind == ARG_OPT:
if is_var_arg or is_kw_arg or seen_named:
fail("Positional default args may not appear after named or var args", node)
break
seen_opt = True
elif kind == ARG_STAR:
if is_var_arg or is_kw_arg or seen_named:
fail("Var args may not appear after named or var args", node)
break
is_var_arg = True
elif kind == ARG_NAMED or kind == ARG_NAMED_OPT:
seen_named = True
if is_kw_arg:
fail("A **kwargs argument must be the last argument", node)
break
elif kind == ARG_STAR2:
if is_kw_arg:
fail("You may only have one **kwargs argument", node)
break
is_kw_arg = True
def check_arg_names(
names: Sequence[str | None],
nodes: list[T],
fail: Callable[[str, T], None],
description: str = "function definition",
) -> None:
seen_names: set[str | None] = set()
for name, node in zip(names, nodes):
if name is not None and name in seen_names:
fail(f'Duplicate argument "{name}" in {description}', node)
break
seen_names.add(name)
def is_class_var(expr: NameExpr) -> bool:
"""Return whether the expression is ClassVar[...]"""
if isinstance(expr.node, Var):
return expr.node.is_classvar
return False
def is_final_node(node: SymbolNode | None) -> bool:
"""Check whether `node` corresponds to a final attribute."""
return isinstance(node, (Var, FuncDef, OverloadedFuncDef, Decorator)) and node.is_final
def local_definitions(
names: SymbolTable, name_prefix: str, info: TypeInfo | None = None
) -> Iterator[Definition]:
"""Iterate over local definitions (not imported) in a symbol table.
Recursively iterate over class members and nested classes.
"""
# TODO: What should the name be? Or maybe remove it?
for name, symnode in names.items():
shortname = name
if "-redef" in name:
# Restore original name from mangled name of multiply defined function
shortname = name.split("-redef")[0]
fullname = name_prefix + "." + shortname
node = symnode.node
if node and node.fullname == fullname:
yield fullname, symnode, info
if isinstance(node, TypeInfo):
yield from local_definitions(node.names, fullname, node)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/nodes.py
|
Python
|
NOASSERTION
| 137,130 |
"""Information about Python operators"""
from __future__ import annotations
from typing import Final
# Map from binary operator id to related method name (in Python 3).
op_methods: Final = {
"+": "__add__",
"-": "__sub__",
"*": "__mul__",
"/": "__truediv__",
"%": "__mod__",
"divmod": "__divmod__",
"//": "__floordiv__",
"**": "__pow__",
"@": "__matmul__",
"&": "__and__",
"|": "__or__",
"^": "__xor__",
"<<": "__lshift__",
">>": "__rshift__",
"==": "__eq__",
"!=": "__ne__",
"<": "__lt__",
">=": "__ge__",
">": "__gt__",
"<=": "__le__",
"in": "__contains__",
}
op_methods_to_symbols: Final = {v: k for (k, v) in op_methods.items()}
ops_falling_back_to_cmp: Final = {"__ne__", "__eq__", "__lt__", "__le__", "__gt__", "__ge__"}
ops_with_inplace_method: Final = {
"+",
"-",
"*",
"/",
"%",
"//",
"**",
"@",
"&",
"|",
"^",
"<<",
">>",
}
inplace_operator_methods: Final = {"__i" + op_methods[op][2:] for op in ops_with_inplace_method}
reverse_op_methods: Final = {
"__add__": "__radd__",
"__sub__": "__rsub__",
"__mul__": "__rmul__",
"__truediv__": "__rtruediv__",
"__mod__": "__rmod__",
"__divmod__": "__rdivmod__",
"__floordiv__": "__rfloordiv__",
"__pow__": "__rpow__",
"__matmul__": "__rmatmul__",
"__and__": "__rand__",
"__or__": "__ror__",
"__xor__": "__rxor__",
"__lshift__": "__rlshift__",
"__rshift__": "__rrshift__",
"__eq__": "__eq__",
"__ne__": "__ne__",
"__lt__": "__gt__",
"__ge__": "__le__",
"__gt__": "__lt__",
"__le__": "__ge__",
}
reverse_op_method_names: Final = set(reverse_op_methods.values())
# Suppose we have some class A. When we do A() + A(), Python will only check
# the output of A().__add__(A()) and skip calling the __radd__ method entirely.
# This shortcut is used only for the following methods:
op_methods_that_shortcut: Final = {
"__add__",
"__sub__",
"__mul__",
"__truediv__",
"__mod__",
"__divmod__",
"__floordiv__",
"__pow__",
"__matmul__",
"__and__",
"__or__",
"__xor__",
"__lshift__",
"__rshift__",
}
normal_from_reverse_op: Final = {m: n for n, m in reverse_op_methods.items()}
reverse_op_method_set: Final = set(reverse_op_methods.values())
unary_op_methods: Final = {"-": "__neg__", "+": "__pos__", "~": "__invert__"}
int_op_to_method: Final = {
"==": int.__eq__,
"is": int.__eq__,
"<": int.__lt__,
"<=": int.__le__,
"!=": int.__ne__,
"is not": int.__ne__,
">": int.__gt__,
">=": int.__ge__,
}
flip_ops: Final = {"<": ">", "<=": ">=", ">": "<", ">=": "<="}
neg_ops: Final = {
"==": "!=",
"!=": "==",
"is": "is not",
"is not": "is",
"<": ">=",
"<=": ">",
">": "<=",
">=": "<",
}
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/operators.py
|
Python
|
NOASSERTION
| 2,866 |
from __future__ import annotations
import pprint
import re
import sys
import sysconfig
from typing import Any, Callable, Final, Mapping, Pattern
from mypy import defaults
from mypy.errorcodes import ErrorCode, error_codes
from mypy.util import get_class_descriptors, replace_object_state
class BuildType:
STANDARD: Final = 0
MODULE: Final = 1
PROGRAM_TEXT: Final = 2
PER_MODULE_OPTIONS: Final = {
# Please keep this list sorted
"allow_redefinition",
"allow_untyped_globals",
"always_false",
"always_true",
"check_untyped_defs",
"debug_cache",
"disable_error_code",
"disabled_error_codes",
"disallow_any_decorated",
"disallow_any_explicit",
"disallow_any_expr",
"disallow_any_generics",
"disallow_any_unimported",
"disallow_incomplete_defs",
"disallow_subclassing_any",
"disallow_untyped_calls",
"disallow_untyped_decorators",
"disallow_untyped_defs",
"enable_error_code",
"enabled_error_codes",
"extra_checks",
"follow_imports_for_stubs",
"follow_imports",
"ignore_errors",
"ignore_missing_imports",
"implicit_optional",
"implicit_reexport",
"local_partial_types",
"mypyc",
"strict_concatenate",
"strict_equality",
"strict_optional",
"warn_no_return",
"warn_return_any",
"warn_unreachable",
"warn_unused_ignores",
}
OPTIONS_AFFECTING_CACHE: Final = (
PER_MODULE_OPTIONS
| {
"platform",
"bazel",
"old_type_inference",
"plugins",
"disable_bytearray_promotion",
"disable_memoryview_promotion",
}
) - {"debug_cache"}
# Features that are currently (or were recently) incomplete/experimental
TYPE_VAR_TUPLE: Final = "TypeVarTuple"
UNPACK: Final = "Unpack"
PRECISE_TUPLE_TYPES: Final = "PreciseTupleTypes"
NEW_GENERIC_SYNTAX: Final = "NewGenericSyntax"
INLINE_TYPEDDICT: Final = "InlineTypedDict"
INCOMPLETE_FEATURES: Final = frozenset((PRECISE_TUPLE_TYPES, INLINE_TYPEDDICT))
COMPLETE_FEATURES: Final = frozenset((TYPE_VAR_TUPLE, UNPACK, NEW_GENERIC_SYNTAX))
class Options:
"""Options collected from flags."""
def __init__(self) -> None:
# Cache for clone_for_module()
self._per_module_cache: dict[str, Options] | None = None
# -- build options --
self.build_type = BuildType.STANDARD
self.python_version: tuple[int, int] = sys.version_info[:2]
# The executable used to search for PEP 561 packages. If this is None,
# then mypy does not search for PEP 561 packages.
self.python_executable: str | None = sys.executable
# When cross compiling to emscripten, we need to rely on MACHDEP because
# sys.platform is the host build platform, not emscripten.
MACHDEP = sysconfig.get_config_var("MACHDEP")
if MACHDEP == "emscripten":
self.platform = MACHDEP
else:
self.platform = sys.platform
self.custom_typing_module: str | None = None
self.custom_typeshed_dir: str | None = None
# The abspath() version of the above, we compute it once as an optimization.
self.abs_custom_typeshed_dir: str | None = None
self.mypy_path: list[str] = []
self.report_dirs: dict[str, str] = {}
# Show errors in PEP 561 packages/site-packages modules
self.no_silence_site_packages = False
self.no_site_packages = False
self.ignore_missing_imports = False
# Is ignore_missing_imports set in a per-module section
self.ignore_missing_imports_per_module = False
self.follow_imports = "normal" # normal|silent|skip|error
# Whether to respect the follow_imports setting even for stub files.
# Intended to be used for disabling specific stubs.
self.follow_imports_for_stubs = False
# PEP 420 namespace packages
# This allows definitions of packages without __init__.py and allows packages to span
# multiple directories. This flag affects both import discovery and the association of
# input files/modules/packages to the relevant file and fully qualified module name.
self.namespace_packages = True
# Use current directory and MYPYPATH to determine fully qualified module names of files
# passed by automatically considering their subdirectories as packages. This is only
# relevant if namespace packages are enabled, since otherwise examining __init__.py's is
# sufficient to determine module names for files. As a possible alternative, add a single
# top-level __init__.py to your packages.
self.explicit_package_bases = False
# File names, directory names or subpaths to avoid checking
self.exclude: list[str] = []
# disallow_any options
self.disallow_any_generics = False
self.disallow_any_unimported = False
self.disallow_any_expr = False
self.disallow_any_decorated = False
self.disallow_any_explicit = False
# Disallow calling untyped functions from typed ones
self.disallow_untyped_calls = False
# Always allow untyped calls for function coming from modules/packages
# in this list (each item effectively acts as a prefix match)
self.untyped_calls_exclude: list[str] = []
# Disallow defining untyped (or incompletely typed) functions
self.disallow_untyped_defs = False
# Disallow defining incompletely typed functions
self.disallow_incomplete_defs = False
# Type check unannotated functions
self.check_untyped_defs = False
# Disallow decorating typed functions with untyped decorators
self.disallow_untyped_decorators = False
# Disallow subclassing values of type 'Any'
self.disallow_subclassing_any = False
# Also check typeshed for missing annotations
self.warn_incomplete_stub = False
# Warn about casting an expression to its inferred type
self.warn_redundant_casts = False
# Warn about falling off the end of a function returning non-None
self.warn_no_return = True
# Warn about returning objects of type Any when the function is
# declared with a precise type
self.warn_return_any = False
# Warn about unused '# type: ignore' comments
self.warn_unused_ignores = False
# Warn about unused '[mypy-<pattern>]' or '[[tool.mypy.overrides]]' config sections
self.warn_unused_configs = False
# Files in which to ignore all non-fatal errors
self.ignore_errors = False
# Apply strict None checking
self.strict_optional = True
# Show "note: In function "foo":" messages.
self.show_error_context = False
# Use nicer output (when possible).
self.color_output = True
self.error_summary = True
# Assume arguments with default values of None are Optional
self.implicit_optional = False
# Don't re-export names unless they are imported with `from ... as ...`
self.implicit_reexport = True
# Suppress toplevel errors caused by missing annotations
self.allow_untyped_globals = False
# Allow variable to be redefined with an arbitrary type in the same block
# and the same nesting level as the initialization
self.allow_redefinition = False
# Prohibit equality, identity, and container checks for non-overlapping types.
# This makes 1 == '1', 1 in ['1'], and 1 is '1' errors.
self.strict_equality = False
# Deprecated, use extra_checks instead.
self.strict_concatenate = False
# Enable additional checks that are technically correct but impractical.
self.extra_checks = False
# Report an error for any branches inferred to be unreachable as a result of
# type analysis.
self.warn_unreachable = False
# Variable names considered True
self.always_true: list[str] = []
# Variable names considered False
self.always_false: list[str] = []
# Error codes to disable
self.disable_error_code: list[str] = []
self.disabled_error_codes: set[ErrorCode] = set()
# Error codes to enable
self.enable_error_code: list[str] = []
self.enabled_error_codes: set[ErrorCode] = set()
# Use script name instead of __main__
self.scripts_are_modules = False
# Config file name
self.config_file: str | None = None
# A filename containing a JSON mapping from filenames to
# mtime/size/hash arrays, used to avoid having to recalculate
# source hashes as often.
self.quickstart_file: str | None = None
# A comma-separated list of files/directories for mypy to type check;
# supports globbing
self.files: list[str] | None = None
# A list of packages for mypy to type check
self.packages: list[str] | None = None
# A list of modules for mypy to type check
self.modules: list[str] | None = None
# Write junit.xml to given file
self.junit_xml: str | None = None
self.junit_format: str = "global" # global|per_file
# Caching and incremental checking options
self.incremental = True
self.cache_dir = defaults.CACHE_DIR
self.sqlite_cache = False
self.debug_cache = False
self.skip_version_check = False
self.skip_cache_mtime_checks = False
self.fine_grained_incremental = False
# Include fine-grained dependencies in written cache files
self.cache_fine_grained = False
# Read cache files in fine-grained incremental mode (cache must include dependencies)
self.use_fine_grained_cache = False
# Run tree.serialize() even if cache generation is disabled
self.debug_serialize = False
# Tune certain behaviors when being used as a front-end to mypyc. Set per-module
# in modules being compiled. Not in the config file or command line.
self.mypyc = False
# An internal flag to modify some type-checking logic while
# running inspections (e.g. don't expand function definitions).
# Not in the config file or command line.
self.inspections = False
# Disable the memory optimization of freeing ASTs when
# possible. This isn't exposed as a command line option
# because it is intended for software integrating with
# mypy. (Like mypyc.)
self.preserve_asts = False
# If True, function and class docstrings will be extracted and retained.
# This isn't exposed as a command line option
# because it is intended for software integrating with
# mypy. (Like stubgen.)
self.include_docstrings = False
# Paths of user plugins
self.plugins: list[str] = []
# Per-module options (raw)
self.per_module_options: dict[str, dict[str, object]] = {}
self._glob_options: list[tuple[str, Pattern[str]]] = []
self.unused_configs: set[str] = set()
# -- development options --
self.verbosity = 0 # More verbose messages (for troubleshooting)
self.pdb = False
self.show_traceback = False
self.raise_exceptions = False
self.dump_type_stats = False
self.dump_inference_stats = False
self.dump_build_stats = False
self.enable_incomplete_feature: list[str] = []
self.timing_stats: str | None = None
self.line_checking_stats: str | None = None
# -- test options --
# Stop after the semantic analysis phase
self.semantic_analysis_only = False
# Use stub builtins fixtures to speed up tests
self.use_builtins_fixtures = False
# This should only be set when running certain mypy tests.
# Use this sparingly to avoid tests diverging from non-test behavior.
self.test_env = False
# -- experimental options --
self.shadow_file: list[list[str]] | None = None
self.show_column_numbers: bool = False
self.show_error_end: bool = False
self.hide_error_codes = False
self.show_error_code_links = False
# Use soft word wrap and show trimmed source snippets with error location markers.
self.pretty = False
self.dump_graph = False
self.dump_deps = False
self.logical_deps = False
# If True, partial types can't span a module top level and a function
self.local_partial_types = False
# Some behaviors are changed when using Bazel (https://bazel.build).
self.bazel = False
# If True, export inferred types for all expressions as BuildResult.types
self.export_types = False
# List of package roots -- directories under these are packages even
# if they don't have __init__.py.
self.package_root: list[str] = []
self.cache_map: dict[str, tuple[str, str]] = {}
# Don't properly free objects on exit, just kill the current process.
self.fast_exit = True
# fast path for finding modules from source set
self.fast_module_lookup = False
# Allow empty function bodies even if it is not safe, used for testing only.
self.allow_empty_bodies = False
# Used to transform source code before parsing if not None
# TODO: Make the type precise (AnyStr -> AnyStr)
self.transform_source: Callable[[Any], Any] | None = None
# Print full path to each file in the report.
self.show_absolute_path: bool = False
# Install missing stub packages if True
self.install_types = False
# Install missing stub packages in non-interactive mode (don't prompt for
# confirmation, and don't show any errors)
self.non_interactive = False
# When we encounter errors that may cause many additional errors,
# skip most errors after this many messages have been reported.
# -1 means unlimited.
self.many_errors_threshold = defaults.MANY_ERRORS_THRESHOLD
# Disable new experimental type inference algorithm.
self.old_type_inference = False
# Deprecated reverse version of the above, do not use.
self.new_type_inference = False
# Export line-level, limited, fine-grained dependency information in cache data
# (undocumented feature).
self.export_ref_info = False
self.disable_bytearray_promotion = False
self.disable_memoryview_promotion = False
self.force_uppercase_builtins = False
self.force_union_syntax = False
# Sets custom output format
self.output: str | None = None
def use_lowercase_names(self) -> bool:
if self.python_version >= (3, 9):
return not self.force_uppercase_builtins
return False
def use_or_syntax(self) -> bool:
if self.python_version >= (3, 10):
return not self.force_union_syntax
return False
def use_star_unpack(self) -> bool:
return self.python_version >= (3, 11)
# To avoid breaking plugin compatibility, keep providing new_semantic_analyzer
@property
def new_semantic_analyzer(self) -> bool:
return True
def snapshot(self) -> dict[str, object]:
"""Produce a comparable snapshot of this Option"""
# Under mypyc, we don't have a __dict__, so we need to do worse things.
d = dict(getattr(self, "__dict__", ()))
for k in get_class_descriptors(Options):
if hasattr(self, k) and k != "new_semantic_analyzer":
d[k] = getattr(self, k)
# Remove private attributes from snapshot
d = {k: v for k, v in d.items() if not k.startswith("_")}
return d
def __repr__(self) -> str:
return f"Options({pprint.pformat(self.snapshot())})"
def process_error_codes(self, *, error_callback: Callable[[str], Any]) -> None:
# Process `--enable-error-code` and `--disable-error-code` flags
disabled_codes = set(self.disable_error_code)
enabled_codes = set(self.enable_error_code)
valid_error_codes = set(error_codes.keys())
invalid_codes = (enabled_codes | disabled_codes) - valid_error_codes
if invalid_codes:
error_callback(f"Invalid error code(s): {', '.join(sorted(invalid_codes))}")
self.disabled_error_codes |= {error_codes[code] for code in disabled_codes}
self.enabled_error_codes |= {error_codes[code] for code in enabled_codes}
# Enabling an error code always overrides disabling
self.disabled_error_codes -= self.enabled_error_codes
def process_incomplete_features(
self, *, error_callback: Callable[[str], Any], warning_callback: Callable[[str], Any]
) -> None:
# Validate incomplete features.
for feature in self.enable_incomplete_feature:
if feature not in INCOMPLETE_FEATURES | COMPLETE_FEATURES:
error_callback(f"Unknown incomplete feature: {feature}")
if feature in COMPLETE_FEATURES:
warning_callback(f"Warning: {feature} is already enabled by default")
def apply_changes(self, changes: dict[str, object]) -> Options:
# Note: effects of this method *must* be idempotent.
new_options = Options()
# Under mypyc, we don't have a __dict__, so we need to do worse things.
replace_object_state(new_options, self, copy_dict=True)
for key, value in changes.items():
setattr(new_options, key, value)
if changes.get("ignore_missing_imports"):
# This is the only option for which a per-module and a global
# option sometimes beheave differently.
new_options.ignore_missing_imports_per_module = True
# These two act as overrides, so apply them when cloning.
# Similar to global codes enabling overrides disabling, so we start from latter.
new_options.disabled_error_codes = self.disabled_error_codes.copy()
new_options.enabled_error_codes = self.enabled_error_codes.copy()
for code_str in new_options.disable_error_code:
code = error_codes[code_str]
new_options.disabled_error_codes.add(code)
new_options.enabled_error_codes.discard(code)
for code_str in new_options.enable_error_code:
code = error_codes[code_str]
new_options.enabled_error_codes.add(code)
new_options.disabled_error_codes.discard(code)
return new_options
def compare_stable(self, other_snapshot: dict[str, object]) -> bool:
"""Compare options in a way that is stable for snapshot() -> apply_changes() roundtrip.
This is needed because apply_changes() has non-trivial effects for some flags, so
Options().apply_changes(options.snapshot()) may result in a (slightly) different object.
"""
return (
Options().apply_changes(self.snapshot()).snapshot()
== Options().apply_changes(other_snapshot).snapshot()
)
def build_per_module_cache(self) -> None:
self._per_module_cache = {}
# Config precedence is as follows:
# 1. Concrete section names: foo.bar.baz
# 2. "Unstructured" glob patterns: foo.*.baz, in the order
# they appear in the file (last wins)
# 3. "Well-structured" wildcard patterns: foo.bar.*, in specificity order.
# Since structured configs inherit from structured configs above them in the hierarchy,
# we need to process per-module configs in a careful order.
# We have to process foo.* before foo.bar.* before foo.bar,
# and we need to apply *.bar to foo.bar but not to foo.bar.*.
# To do this, process all well-structured glob configs before non-glob configs and
# exploit the fact that foo.* sorts earlier ASCIIbetically (unicodebetically?)
# than foo.bar.*.
# (A section being "processed last" results in its config "winning".)
# Unstructured glob configs are stored and are all checked for each module.
unstructured_glob_keys = [k for k in self.per_module_options.keys() if "*" in k[:-1]]
structured_keys = [k for k in self.per_module_options.keys() if "*" not in k[:-1]]
wildcards = sorted(k for k in structured_keys if k.endswith(".*"))
concrete = [k for k in structured_keys if not k.endswith(".*")]
for glob in unstructured_glob_keys:
self._glob_options.append((glob, self.compile_glob(glob)))
# We (for ease of implementation) treat unstructured glob
# sections as used if any real modules use them or if any
# concrete config sections use them. This means we need to
# track which get used while constructing.
self.unused_configs = set(unstructured_glob_keys)
for key in wildcards + concrete:
# Find what the options for this key would be, just based
# on inheriting from parent configs.
options = self.clone_for_module(key)
# And then update it with its per-module options.
self._per_module_cache[key] = options.apply_changes(self.per_module_options[key])
# Add the more structured sections into unused configs, since
# they only count as used if actually used by a real module.
self.unused_configs.update(structured_keys)
def clone_for_module(self, module: str) -> Options:
"""Create an Options object that incorporates per-module options.
NOTE: Once this method is called all Options objects should be
considered read-only, else the caching might be incorrect.
"""
if self._per_module_cache is None:
self.build_per_module_cache()
assert self._per_module_cache is not None
# If the module just directly has a config entry, use it.
if module in self._per_module_cache:
self.unused_configs.discard(module)
return self._per_module_cache[module]
# If not, search for glob paths at all the parents. So if we are looking for
# options for foo.bar.baz, we search foo.bar.baz.*, foo.bar.*, foo.*,
# in that order, looking for an entry.
# This is technically quadratic in the length of the path, but module paths
# don't actually get all that long.
options = self
path = module.split(".")
for i in range(len(path), 0, -1):
key = ".".join(path[:i] + ["*"])
if key in self._per_module_cache:
self.unused_configs.discard(key)
options = self._per_module_cache[key]
break
# OK and *now* we need to look for unstructured glob matches.
# We only do this for concrete modules, not structured wildcards.
if not module.endswith(".*"):
for key, pattern in self._glob_options:
if pattern.match(module):
self.unused_configs.discard(key)
options = options.apply_changes(self.per_module_options[key])
# We could update the cache to directly point to modules once
# they have been looked up, but in testing this made things
# slower and not faster, so we don't bother.
return options
def compile_glob(self, s: str) -> Pattern[str]:
# Compile one of the glob patterns to a regex so that '.*' can
# match *zero or more* module sections. This means we compile
# '.*' into '(\..*)?'.
parts = s.split(".")
expr = re.escape(parts[0]) if parts[0] != "*" else ".*"
for part in parts[1:]:
expr += re.escape("." + part) if part != "*" else r"(\..*)?"
return re.compile(expr + "\\Z")
def select_options_affecting_cache(self) -> Mapping[str, object]:
result: dict[str, object] = {}
for opt in OPTIONS_AFFECTING_CACHE:
val = getattr(self, opt)
if opt in ("disabled_error_codes", "enabled_error_codes"):
val = sorted([code.code for code in val])
result[opt] = val
return result
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/options.py
|
Python
|
NOASSERTION
| 24,365 |
from __future__ import annotations
from mypy.errors import Errors
from mypy.nodes import MypyFile
from mypy.options import Options
def parse(
source: str | bytes,
fnam: str,
module: str | None,
errors: Errors,
options: Options,
raise_on_error: bool = False,
) -> MypyFile:
"""Parse a source file, without doing any semantic analysis.
Return the parse tree. If errors is not provided, raise ParseError
on failure. Otherwise, use the errors object to report parse errors.
The python_version (major, minor) option determines the Python syntax variant.
"""
if options.transform_source is not None:
source = options.transform_source(source)
import mypy.fastparse
tree = mypy.fastparse.parse(source, fnam=fnam, module=module, errors=errors, options=options)
if raise_on_error and errors.is_errors():
errors.raise_error()
return tree
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/parse.py
|
Python
|
NOASSERTION
| 913 |
from __future__ import annotations
from enum import Enum
from mypy import checker, errorcodes
from mypy.messages import MessageBuilder
from mypy.nodes import (
AssertStmt,
AssignmentExpr,
AssignmentStmt,
BreakStmt,
ClassDef,
Context,
ContinueStmt,
DictionaryComprehension,
Expression,
ExpressionStmt,
ForStmt,
FuncDef,
FuncItem,
GeneratorExpr,
GlobalDecl,
IfStmt,
Import,
ImportFrom,
LambdaExpr,
ListExpr,
Lvalue,
MatchStmt,
MypyFile,
NameExpr,
NonlocalDecl,
RaiseStmt,
ReturnStmt,
StarExpr,
SymbolTable,
TryStmt,
TupleExpr,
TypeAliasStmt,
WhileStmt,
WithStmt,
implicit_module_attrs,
)
from mypy.options import Options
from mypy.patterns import AsPattern, StarredPattern
from mypy.reachability import ALWAYS_TRUE, infer_pattern_value
from mypy.traverser import ExtendedTraverserVisitor
from mypy.types import Type, UninhabitedType
class BranchState:
"""BranchState contains information about variable definition at the end of a branching statement.
`if` and `match` are examples of branching statements.
`may_be_defined` contains variables that were defined in only some branches.
`must_be_defined` contains variables that were defined in all branches.
"""
def __init__(
self,
must_be_defined: set[str] | None = None,
may_be_defined: set[str] | None = None,
skipped: bool = False,
) -> None:
if may_be_defined is None:
may_be_defined = set()
if must_be_defined is None:
must_be_defined = set()
self.may_be_defined = set(may_be_defined)
self.must_be_defined = set(must_be_defined)
self.skipped = skipped
def copy(self) -> BranchState:
return BranchState(
must_be_defined=set(self.must_be_defined),
may_be_defined=set(self.may_be_defined),
skipped=self.skipped,
)
class BranchStatement:
def __init__(self, initial_state: BranchState | None = None) -> None:
if initial_state is None:
initial_state = BranchState()
self.initial_state = initial_state
self.branches: list[BranchState] = [
BranchState(
must_be_defined=self.initial_state.must_be_defined,
may_be_defined=self.initial_state.may_be_defined,
)
]
def copy(self) -> BranchStatement:
result = BranchStatement(self.initial_state)
result.branches = [b.copy() for b in self.branches]
return result
def next_branch(self) -> None:
self.branches.append(
BranchState(
must_be_defined=self.initial_state.must_be_defined,
may_be_defined=self.initial_state.may_be_defined,
)
)
def record_definition(self, name: str) -> None:
assert len(self.branches) > 0
self.branches[-1].must_be_defined.add(name)
self.branches[-1].may_be_defined.discard(name)
def delete_var(self, name: str) -> None:
assert len(self.branches) > 0
self.branches[-1].must_be_defined.discard(name)
self.branches[-1].may_be_defined.discard(name)
def record_nested_branch(self, state: BranchState) -> None:
assert len(self.branches) > 0
current_branch = self.branches[-1]
if state.skipped:
current_branch.skipped = True
return
current_branch.must_be_defined.update(state.must_be_defined)
current_branch.may_be_defined.update(state.may_be_defined)
current_branch.may_be_defined.difference_update(current_branch.must_be_defined)
def skip_branch(self) -> None:
assert len(self.branches) > 0
self.branches[-1].skipped = True
def is_possibly_undefined(self, name: str) -> bool:
assert len(self.branches) > 0
return name in self.branches[-1].may_be_defined
def is_undefined(self, name: str) -> bool:
assert len(self.branches) > 0
branch = self.branches[-1]
return name not in branch.may_be_defined and name not in branch.must_be_defined
def is_defined_in_a_branch(self, name: str) -> bool:
assert len(self.branches) > 0
for b in self.branches:
if name in b.must_be_defined or name in b.may_be_defined:
return True
return False
def done(self) -> BranchState:
# First, compute all vars, including skipped branches. We include skipped branches
# because our goal is to capture all variables that semantic analyzer would
# consider defined.
all_vars = set()
for b in self.branches:
all_vars.update(b.may_be_defined)
all_vars.update(b.must_be_defined)
# For the rest of the things, we only care about branches that weren't skipped.
non_skipped_branches = [b for b in self.branches if not b.skipped]
if non_skipped_branches:
must_be_defined = non_skipped_branches[0].must_be_defined
for b in non_skipped_branches[1:]:
must_be_defined.intersection_update(b.must_be_defined)
else:
must_be_defined = set()
# Everything that wasn't defined in all branches but was defined
# in at least one branch should be in `may_be_defined`!
may_be_defined = all_vars.difference(must_be_defined)
return BranchState(
must_be_defined=must_be_defined,
may_be_defined=may_be_defined,
skipped=len(non_skipped_branches) == 0,
)
class ScopeType(Enum):
Global = 1
Class = 2
Func = 3
Generator = 4
class Scope:
def __init__(self, stmts: list[BranchStatement], scope_type: ScopeType) -> None:
self.branch_stmts: list[BranchStatement] = stmts
self.scope_type = scope_type
self.undefined_refs: dict[str, set[NameExpr]] = {}
def copy(self) -> Scope:
result = Scope([s.copy() for s in self.branch_stmts], self.scope_type)
result.undefined_refs = self.undefined_refs.copy()
return result
def record_undefined_ref(self, o: NameExpr) -> None:
if o.name not in self.undefined_refs:
self.undefined_refs[o.name] = set()
self.undefined_refs[o.name].add(o)
def pop_undefined_ref(self, name: str) -> set[NameExpr]:
return self.undefined_refs.pop(name, set())
class DefinedVariableTracker:
"""DefinedVariableTracker manages the state and scope for the UndefinedVariablesVisitor."""
def __init__(self) -> None:
# There's always at least one scope. Within each scope, there's at least one "global" BranchingStatement.
self.scopes: list[Scope] = [Scope([BranchStatement()], ScopeType.Global)]
# disable_branch_skip is used to disable skipping a branch due to a return/raise/etc. This is useful
# in things like try/except/finally statements.
self.disable_branch_skip = False
def copy(self) -> DefinedVariableTracker:
result = DefinedVariableTracker()
result.scopes = [s.copy() for s in self.scopes]
result.disable_branch_skip = self.disable_branch_skip
return result
def _scope(self) -> Scope:
assert len(self.scopes) > 0
return self.scopes[-1]
def enter_scope(self, scope_type: ScopeType) -> None:
assert len(self._scope().branch_stmts) > 0
initial_state = None
if scope_type == ScopeType.Generator:
# Generators are special because they inherit the outer scope.
initial_state = self._scope().branch_stmts[-1].branches[-1]
self.scopes.append(Scope([BranchStatement(initial_state)], scope_type))
def exit_scope(self) -> None:
self.scopes.pop()
def in_scope(self, scope_type: ScopeType) -> bool:
return self._scope().scope_type == scope_type
def start_branch_statement(self) -> None:
assert len(self._scope().branch_stmts) > 0
self._scope().branch_stmts.append(
BranchStatement(self._scope().branch_stmts[-1].branches[-1])
)
def next_branch(self) -> None:
assert len(self._scope().branch_stmts) > 1
self._scope().branch_stmts[-1].next_branch()
def end_branch_statement(self) -> None:
assert len(self._scope().branch_stmts) > 1
result = self._scope().branch_stmts.pop().done()
self._scope().branch_stmts[-1].record_nested_branch(result)
def skip_branch(self) -> None:
# Only skip branch if we're outside of "root" branch statement.
if len(self._scope().branch_stmts) > 1 and not self.disable_branch_skip:
self._scope().branch_stmts[-1].skip_branch()
def record_definition(self, name: str) -> None:
assert len(self.scopes) > 0
assert len(self.scopes[-1].branch_stmts) > 0
self._scope().branch_stmts[-1].record_definition(name)
def delete_var(self, name: str) -> None:
assert len(self.scopes) > 0
assert len(self.scopes[-1].branch_stmts) > 0
self._scope().branch_stmts[-1].delete_var(name)
def record_undefined_ref(self, o: NameExpr) -> None:
"""Records an undefined reference. These can later be retrieved via `pop_undefined_ref`."""
assert len(self.scopes) > 0
self._scope().record_undefined_ref(o)
def pop_undefined_ref(self, name: str) -> set[NameExpr]:
"""If name has previously been reported as undefined, the NameExpr that was called will be returned."""
assert len(self.scopes) > 0
return self._scope().pop_undefined_ref(name)
def is_possibly_undefined(self, name: str) -> bool:
assert len(self._scope().branch_stmts) > 0
# A variable is undefined if it's in a set of `may_be_defined` but not in `must_be_defined`.
return self._scope().branch_stmts[-1].is_possibly_undefined(name)
def is_defined_in_different_branch(self, name: str) -> bool:
"""This will return true if a variable is defined in a branch that's not the current branch."""
assert len(self._scope().branch_stmts) > 0
stmt = self._scope().branch_stmts[-1]
if not stmt.is_undefined(name):
return False
for stmt in self._scope().branch_stmts:
if stmt.is_defined_in_a_branch(name):
return True
return False
def is_undefined(self, name: str) -> bool:
assert len(self._scope().branch_stmts) > 0
return self._scope().branch_stmts[-1].is_undefined(name)
class Loop:
def __init__(self) -> None:
self.has_break = False
class PossiblyUndefinedVariableVisitor(ExtendedTraverserVisitor):
"""Detects the following cases:
- A variable that's defined only part of the time.
- If a variable is used before definition
An example of a partial definition:
if foo():
x = 1
print(x) # Error: "x" may be undefined.
Example of a used before definition:
x = y
y: int = 2
Note that this code does not detect variables not defined in any of the branches -- that is
handled by the semantic analyzer.
"""
def __init__(
self,
msg: MessageBuilder,
type_map: dict[Expression, Type],
options: Options,
names: SymbolTable,
) -> None:
self.msg = msg
self.type_map = type_map
self.options = options
self.builtins = SymbolTable()
builtins_mod = names.get("__builtins__", None)
if builtins_mod:
assert isinstance(builtins_mod.node, MypyFile)
self.builtins = builtins_mod.node.names
self.loops: list[Loop] = []
self.try_depth = 0
self.tracker = DefinedVariableTracker()
for name in implicit_module_attrs:
self.tracker.record_definition(name)
def var_used_before_def(self, name: str, context: Context) -> None:
if self.msg.errors.is_error_code_enabled(errorcodes.USED_BEFORE_DEF):
self.msg.var_used_before_def(name, context)
def variable_may_be_undefined(self, name: str, context: Context) -> None:
if self.msg.errors.is_error_code_enabled(errorcodes.POSSIBLY_UNDEFINED):
self.msg.variable_may_be_undefined(name, context)
def process_definition(self, name: str) -> None:
# Was this name previously used? If yes, it's a used-before-definition error.
if not self.tracker.in_scope(ScopeType.Class):
refs = self.tracker.pop_undefined_ref(name)
for ref in refs:
if self.loops:
self.variable_may_be_undefined(name, ref)
else:
self.var_used_before_def(name, ref)
else:
# Errors in class scopes are caught by the semantic analyzer.
pass
self.tracker.record_definition(name)
def visit_global_decl(self, o: GlobalDecl) -> None:
for name in o.names:
self.process_definition(name)
super().visit_global_decl(o)
def visit_nonlocal_decl(self, o: NonlocalDecl) -> None:
for name in o.names:
self.process_definition(name)
super().visit_nonlocal_decl(o)
def process_lvalue(self, lvalue: Lvalue | None) -> None:
if isinstance(lvalue, NameExpr):
self.process_definition(lvalue.name)
elif isinstance(lvalue, StarExpr):
self.process_lvalue(lvalue.expr)
elif isinstance(lvalue, (ListExpr, TupleExpr)):
for item in lvalue.items:
self.process_lvalue(item)
def visit_assignment_stmt(self, o: AssignmentStmt) -> None:
for lvalue in o.lvalues:
self.process_lvalue(lvalue)
super().visit_assignment_stmt(o)
def visit_assignment_expr(self, o: AssignmentExpr) -> None:
o.value.accept(self)
self.process_lvalue(o.target)
def visit_if_stmt(self, o: IfStmt) -> None:
for e in o.expr:
e.accept(self)
self.tracker.start_branch_statement()
for b in o.body:
if b.is_unreachable:
continue
b.accept(self)
self.tracker.next_branch()
if o.else_body:
if not o.else_body.is_unreachable:
o.else_body.accept(self)
else:
self.tracker.skip_branch()
self.tracker.end_branch_statement()
def visit_match_stmt(self, o: MatchStmt) -> None:
o.subject.accept(self)
self.tracker.start_branch_statement()
for i in range(len(o.patterns)):
pattern = o.patterns[i]
pattern.accept(self)
guard = o.guards[i]
if guard is not None:
guard.accept(self)
if not o.bodies[i].is_unreachable:
o.bodies[i].accept(self)
else:
self.tracker.skip_branch()
is_catchall = infer_pattern_value(pattern) == ALWAYS_TRUE
if not is_catchall:
self.tracker.next_branch()
self.tracker.end_branch_statement()
def visit_func_def(self, o: FuncDef) -> None:
self.process_definition(o.name)
super().visit_func_def(o)
def visit_func(self, o: FuncItem) -> None:
if o.is_dynamic() and not self.options.check_untyped_defs:
return
args = o.arguments or []
# Process initializers (defaults) outside the function scope.
for arg in args:
if arg.initializer is not None:
arg.initializer.accept(self)
self.tracker.enter_scope(ScopeType.Func)
for arg in args:
self.process_definition(arg.variable.name)
super().visit_var(arg.variable)
o.body.accept(self)
self.tracker.exit_scope()
def visit_generator_expr(self, o: GeneratorExpr) -> None:
self.tracker.enter_scope(ScopeType.Generator)
for idx in o.indices:
self.process_lvalue(idx)
super().visit_generator_expr(o)
self.tracker.exit_scope()
def visit_dictionary_comprehension(self, o: DictionaryComprehension) -> None:
self.tracker.enter_scope(ScopeType.Generator)
for idx in o.indices:
self.process_lvalue(idx)
super().visit_dictionary_comprehension(o)
self.tracker.exit_scope()
def visit_for_stmt(self, o: ForStmt) -> None:
o.expr.accept(self)
self.process_lvalue(o.index)
o.index.accept(self)
self.tracker.start_branch_statement()
loop = Loop()
self.loops.append(loop)
o.body.accept(self)
self.tracker.next_branch()
self.tracker.end_branch_statement()
if o.else_body is not None:
# If the loop has a `break` inside, `else` is executed conditionally.
# If the loop doesn't have a `break` either the function will return or
# execute the `else`.
has_break = loop.has_break
if has_break:
self.tracker.start_branch_statement()
self.tracker.next_branch()
o.else_body.accept(self)
if has_break:
self.tracker.end_branch_statement()
self.loops.pop()
def visit_return_stmt(self, o: ReturnStmt) -> None:
super().visit_return_stmt(o)
self.tracker.skip_branch()
def visit_lambda_expr(self, o: LambdaExpr) -> None:
self.tracker.enter_scope(ScopeType.Func)
super().visit_lambda_expr(o)
self.tracker.exit_scope()
def visit_assert_stmt(self, o: AssertStmt) -> None:
super().visit_assert_stmt(o)
if checker.is_false_literal(o.expr):
self.tracker.skip_branch()
def visit_raise_stmt(self, o: RaiseStmt) -> None:
super().visit_raise_stmt(o)
self.tracker.skip_branch()
def visit_continue_stmt(self, o: ContinueStmt) -> None:
super().visit_continue_stmt(o)
self.tracker.skip_branch()
def visit_break_stmt(self, o: BreakStmt) -> None:
super().visit_break_stmt(o)
if self.loops:
self.loops[-1].has_break = True
self.tracker.skip_branch()
def visit_expression_stmt(self, o: ExpressionStmt) -> None:
if isinstance(self.type_map.get(o.expr, None), (UninhabitedType, type(None))):
self.tracker.skip_branch()
super().visit_expression_stmt(o)
def visit_try_stmt(self, o: TryStmt) -> None:
"""
Note that finding undefined vars in `finally` requires different handling from
the rest of the code. In particular, we want to disallow skipping branches due to jump
statements in except/else clauses for finally but not for other cases. Imagine a case like:
def f() -> int:
try:
x = 1
except:
# This jump statement needs to be handled differently depending on whether or
# not we're trying to process `finally` or not.
return 0
finally:
# `x` may be undefined here.
pass
# `x` is always defined here.
return x
"""
self.try_depth += 1
if o.finally_body is not None:
# In order to find undefined vars in `finally`, we need to
# process try/except with branch skipping disabled. However, for the rest of the code
# after finally, we need to process try/except with branch skipping enabled.
# Therefore, we need to process try/finally twice.
# Because processing is not idempotent, we should make a copy of the tracker.
old_tracker = self.tracker.copy()
self.tracker.disable_branch_skip = True
self.process_try_stmt(o)
self.tracker = old_tracker
self.process_try_stmt(o)
self.try_depth -= 1
def process_try_stmt(self, o: TryStmt) -> None:
"""
Processes try statement decomposing it into the following:
if ...:
body
else_body
elif ...:
except 1
elif ...:
except 2
else:
except n
finally
"""
self.tracker.start_branch_statement()
o.body.accept(self)
if o.else_body is not None:
o.else_body.accept(self)
if len(o.handlers) > 0:
assert len(o.handlers) == len(o.vars) == len(o.types)
for i in range(len(o.handlers)):
self.tracker.next_branch()
exc_type = o.types[i]
if exc_type is not None:
exc_type.accept(self)
var = o.vars[i]
if var is not None:
self.process_definition(var.name)
var.accept(self)
o.handlers[i].accept(self)
if var is not None:
self.tracker.delete_var(var.name)
self.tracker.end_branch_statement()
if o.finally_body is not None:
o.finally_body.accept(self)
def visit_while_stmt(self, o: WhileStmt) -> None:
o.expr.accept(self)
self.tracker.start_branch_statement()
loop = Loop()
self.loops.append(loop)
o.body.accept(self)
has_break = loop.has_break
if not checker.is_true_literal(o.expr):
# If this is a loop like `while True`, we can consider the body to be
# a single branch statement (we're guaranteed that the body is executed at least once).
# If not, call next_branch() to make all variables defined there conditional.
self.tracker.next_branch()
self.tracker.end_branch_statement()
if o.else_body is not None:
# If the loop has a `break` inside, `else` is executed conditionally.
# If the loop doesn't have a `break` either the function will return or
# execute the `else`.
if has_break:
self.tracker.start_branch_statement()
self.tracker.next_branch()
if o.else_body:
o.else_body.accept(self)
if has_break:
self.tracker.end_branch_statement()
self.loops.pop()
def visit_as_pattern(self, o: AsPattern) -> None:
if o.name is not None:
self.process_lvalue(o.name)
super().visit_as_pattern(o)
def visit_starred_pattern(self, o: StarredPattern) -> None:
if o.capture is not None:
self.process_lvalue(o.capture)
super().visit_starred_pattern(o)
def visit_name_expr(self, o: NameExpr) -> None:
if o.name in self.builtins and self.tracker.in_scope(ScopeType.Global):
return
if self.tracker.is_possibly_undefined(o.name):
# A variable is only defined in some branches.
self.variable_may_be_undefined(o.name, o)
# We don't want to report the error on the same variable multiple times.
self.tracker.record_definition(o.name)
elif self.tracker.is_defined_in_different_branch(o.name):
# A variable is defined in one branch but used in a different branch.
if self.loops or self.try_depth > 0:
# If we're in a loop or in a try, we can't be sure that this variable
# is undefined. Report it as "may be undefined".
self.variable_may_be_undefined(o.name, o)
else:
self.var_used_before_def(o.name, o)
elif self.tracker.is_undefined(o.name):
# A variable is undefined. It could be due to two things:
# 1. A variable is just totally undefined
# 2. The variable is defined later in the code.
# Case (1) will be caught by semantic analyzer. Case (2) is a forward ref that should
# be caught by this visitor. Save the ref for later, so that if we see a definition,
# we know it's a used-before-definition scenario.
self.tracker.record_undefined_ref(o)
super().visit_name_expr(o)
def visit_with_stmt(self, o: WithStmt) -> None:
for expr, idx in zip(o.expr, o.target):
expr.accept(self)
self.process_lvalue(idx)
o.body.accept(self)
def visit_class_def(self, o: ClassDef) -> None:
self.process_definition(o.name)
self.tracker.enter_scope(ScopeType.Class)
super().visit_class_def(o)
self.tracker.exit_scope()
def visit_import(self, o: Import) -> None:
for mod, alias in o.ids:
if alias is not None:
self.tracker.record_definition(alias)
else:
# When you do `import x.y`, only `x` becomes defined.
names = mod.split(".")
if names:
# `names` should always be nonempty, but we don't want mypy
# to crash on invalid code.
self.tracker.record_definition(names[0])
super().visit_import(o)
def visit_import_from(self, o: ImportFrom) -> None:
for mod, alias in o.names:
name = alias
if name is None:
name = mod
self.tracker.record_definition(name)
super().visit_import_from(o)
def visit_type_alias_stmt(self, o: TypeAliasStmt) -> None:
# Type alias target may contain forward references
self.tracker.record_definition(o.name.name)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/partially_defined.py
|
Python
|
NOASSERTION
| 25,562 |
"""Classes for representing match statement patterns."""
from __future__ import annotations
from typing import TypeVar
from mypy_extensions import trait
from mypy.nodes import Expression, NameExpr, Node, RefExpr
from mypy.visitor import PatternVisitor
T = TypeVar("T")
@trait
class Pattern(Node):
"""A pattern node."""
__slots__ = ()
def accept(self, visitor: PatternVisitor[T]) -> T:
raise RuntimeError("Not implemented", type(self))
class AsPattern(Pattern):
"""The pattern <pattern> as <name>"""
# The python ast, and therefore also our ast merges capture, wildcard and as patterns into one
# for easier handling.
# If pattern is None this is a capture pattern. If name and pattern are both none this is a
# wildcard pattern.
# Only name being None should not happen but also won't break anything.
pattern: Pattern | None
name: NameExpr | None
def __init__(self, pattern: Pattern | None, name: NameExpr | None) -> None:
super().__init__()
self.pattern = pattern
self.name = name
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_as_pattern(self)
class OrPattern(Pattern):
"""The pattern <pattern> | <pattern> | ..."""
patterns: list[Pattern]
def __init__(self, patterns: list[Pattern]) -> None:
super().__init__()
self.patterns = patterns
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_or_pattern(self)
class ValuePattern(Pattern):
"""The pattern x.y (or x.y.z, ...)"""
expr: Expression
def __init__(self, expr: Expression) -> None:
super().__init__()
self.expr = expr
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_value_pattern(self)
class SingletonPattern(Pattern):
# This can be exactly True, False or None
value: bool | None
def __init__(self, value: bool | None) -> None:
super().__init__()
self.value = value
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_singleton_pattern(self)
class SequencePattern(Pattern):
"""The pattern [<pattern>, ...]"""
patterns: list[Pattern]
def __init__(self, patterns: list[Pattern]) -> None:
super().__init__()
self.patterns = patterns
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_sequence_pattern(self)
class StarredPattern(Pattern):
# None corresponds to *_ in a list pattern. It will match multiple items but won't bind them to
# a name.
capture: NameExpr | None
def __init__(self, capture: NameExpr | None) -> None:
super().__init__()
self.capture = capture
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_starred_pattern(self)
class MappingPattern(Pattern):
keys: list[Expression]
values: list[Pattern]
rest: NameExpr | None
def __init__(
self, keys: list[Expression], values: list[Pattern], rest: NameExpr | None
) -> None:
super().__init__()
assert len(keys) == len(values)
self.keys = keys
self.values = values
self.rest = rest
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_mapping_pattern(self)
class ClassPattern(Pattern):
"""The pattern Cls(...)"""
class_ref: RefExpr
positionals: list[Pattern]
keyword_keys: list[str]
keyword_values: list[Pattern]
def __init__(
self,
class_ref: RefExpr,
positionals: list[Pattern],
keyword_keys: list[str],
keyword_values: list[Pattern],
) -> None:
super().__init__()
assert len(keyword_keys) == len(keyword_values)
self.class_ref = class_ref
self.positionals = positionals
self.keyword_keys = keyword_keys
self.keyword_values = keyword_values
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_class_pattern(self)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/patterns.py
|
Python
|
NOASSERTION
| 4,048 |
"""Plugin system for extending mypy.
At large scale the plugin system works as following:
* Plugins are collected from the corresponding mypy config file option
(either via paths to Python files, or installed Python modules)
and imported using importlib.
* Every module should get an entry point function (called 'plugin' by default,
but may be overridden in the config file) that should accept a single string
argument that is a full mypy version (includes git commit hash for dev
versions) and return a subclass of mypy.plugins.Plugin.
* All plugin class constructors should match the signature of mypy.plugin.Plugin
(i.e. should accept an mypy.options.Options object), and *must* call
super().__init__().
* At several steps during semantic analysis and type checking mypy calls
special `get_xxx` methods on user plugins with a single string argument that
is a fully qualified name (full name) of a relevant definition
(see mypy.plugin.Plugin method docstrings for details).
* The plugins are called in the order they are passed in the config option.
Every plugin must decide whether to act on a given full name. The first
plugin that returns non-None object will be used.
* The above decision should be made using the limited common API specified by
mypy.plugin.CommonPluginApi.
* The callback returned by the plugin will be called with a larger context that
includes relevant current state (e.g. a default return type, or a default
attribute type) and a wider relevant API provider (e.g.
SemanticAnalyzerPluginInterface or CheckerPluginInterface).
* The result of this is used for further processing. See various `XxxContext`
named tuples for details about which information is given to each hook.
Plugin developers should ensure that their plugins work well in incremental and
daemon modes. In particular, plugins should not hold global state, and should
always call add_plugin_dependency() in plugin hooks called during semantic
analysis. See the method docstring for more details.
There is no dedicated cache storage for plugins, but plugins can store
per-TypeInfo data in a special .metadata attribute that is serialized to the
mypy caches between incremental runs. To avoid collisions between plugins, they
are encouraged to store their state under a dedicated key coinciding with
plugin name in the metadata dictionary. Every value stored there must be
JSON-serializable.
## Notes about the semantic analyzer
Mypy 0.710 introduced a new semantic analyzer that changed how plugins are
expected to work in several notable ways (from mypy 0.730 the old semantic
analyzer is no longer available):
1. The order of processing AST nodes in modules is different. The old semantic
analyzer processed modules in textual order, one module at a time. The new
semantic analyzer first processes the module top levels, including bodies of
any top-level classes and classes nested within classes. ("Top-level" here
means "not nested within a function/method".) Functions and methods are
processed only after module top levels have been finished. If there is an
import cycle, all module top levels in the cycle are processed before
processing any functions or methods. Each unit of processing (a module top
level or a function/method) is called a *target*.
This also means that function signatures in the same module have not been
analyzed yet when analyzing the module top level. If you need access to
a function signature, you'll need to explicitly analyze the signature first
using `anal_type()`.
2. Each target can be processed multiple times. This may happen if some forward
references are not ready yet, for example. This means that semantic analyzer
related plugin hooks can be called multiple times for the same full name.
These plugin methods must thus be idempotent.
3. The `anal_type` API function returns None if some part of the type is not
available yet. If this happens, the current target being analyzed will be
*deferred*, which means that it will be processed again soon, in the hope
that additional dependencies will be available. This may happen if there are
forward references to types or inter-module references to types within an
import cycle.
Note that if there is a circular definition, mypy may decide to stop
processing to avoid an infinite number of iterations. When this happens,
`anal_type` will generate an error and return an `AnyType` type object
during the final iteration (instead of None).
4. There is a new API method `defer()`. This can be used to explicitly request
the current target to be reprocessed one more time. You don't need this
to call this if `anal_type` returns None, however.
5. There is a new API property `final_iteration`, which is true once mypy
detected no progress during the previous iteration or if the maximum
semantic analysis iteration count has been reached. You must never
defer during the final iteration, as it will cause a crash.
6. The `node` attribute of SymbolTableNode objects may contain a reference to
a PlaceholderNode object. This object means that this definition has not
been fully processed yet. If you encounter a PlaceholderNode, you should
defer unless it's the final iteration. If it's the final iteration, you
should generate an error message. It usually means that there's a cyclic
definition that cannot be resolved by mypy. PlaceholderNodes can only refer
to references inside an import cycle. If you are looking up things from
another module, such as the builtins, that is outside the current module or
import cycle, you can safely assume that you won't receive a placeholder.
When testing your plugin, you should have a test case that forces a module top
level to be processed multiple times. The easiest way to do this is to include
a forward reference to a class in a top-level annotation. Example:
c: C # Forward reference causes second analysis pass
class C: pass
Note that a forward reference in a function signature won't trigger another
pass, since all functions are processed only after the top level has been fully
analyzed.
You can use `api.options.new_semantic_analyzer` to check whether the new
semantic analyzer is enabled (it's always true in mypy 0.730 and later).
"""
from __future__ import annotations
from abc import abstractmethod
from typing import Any, Callable, NamedTuple, TypeVar
from mypy_extensions import mypyc_attr, trait
from mypy.errorcodes import ErrorCode
from mypy.lookup import lookup_fully_qualified
from mypy.message_registry import ErrorMessage
from mypy.messages import MessageBuilder
from mypy.nodes import (
ArgKind,
CallExpr,
ClassDef,
Context,
Expression,
MypyFile,
SymbolTableNode,
TypeInfo,
)
from mypy.options import Options
from mypy.tvar_scope import TypeVarLikeScope
from mypy.types import (
CallableType,
FunctionLike,
Instance,
ProperType,
Type,
TypeList,
UnboundType,
)
@trait
class TypeAnalyzerPluginInterface:
"""Interface for accessing semantic analyzer functionality in plugins.
Methods docstrings contain only basic info. Look for corresponding implementation
docstrings in typeanal.py for more details.
"""
# An options object. Note: these are the cloned options for the current file.
# This might be different from Plugin.options (that contains default/global options)
# if there are per-file options in the config. This applies to all other interfaces
# in this file.
options: Options
@abstractmethod
def fail(self, msg: str, ctx: Context, *, code: ErrorCode | None = None) -> None:
"""Emit an error message at given location."""
raise NotImplementedError
@abstractmethod
def named_type(self, name: str, args: list[Type]) -> Instance:
"""Construct an instance of a builtin type with given name."""
raise NotImplementedError
@abstractmethod
def analyze_type(self, typ: Type) -> Type:
"""Analyze an unbound type using the default mypy logic."""
raise NotImplementedError
@abstractmethod
def analyze_callable_args(
self, arglist: TypeList
) -> tuple[list[Type], list[ArgKind], list[str | None]] | None:
"""Find types, kinds, and names of arguments from extended callable syntax."""
raise NotImplementedError
# A context for a hook that semantically analyzes an unbound type.
class AnalyzeTypeContext(NamedTuple):
type: UnboundType # Type to analyze
context: Context # Relevant location context (e.g. for error messages)
api: TypeAnalyzerPluginInterface
@mypyc_attr(allow_interpreted_subclasses=True)
class CommonPluginApi:
"""
A common plugin API (shared between semantic analysis and type checking phases)
that all plugin hooks get independently of the context.
"""
# Global mypy options.
# Per-file options can be only accessed on various
# XxxPluginInterface classes.
options: Options
@abstractmethod
def lookup_fully_qualified(self, fullname: str) -> SymbolTableNode | None:
"""Lookup a symbol by its full name (including module).
This lookup function available for all plugins. Return None if a name
is not found. This function doesn't support lookup from current scope.
Use SemanticAnalyzerPluginInterface.lookup_qualified() for this."""
raise NotImplementedError
@trait
class CheckerPluginInterface:
"""Interface for accessing type checker functionality in plugins.
Methods docstrings contain only basic info. Look for corresponding implementation
docstrings in checker.py for more details.
"""
msg: MessageBuilder
options: Options
path: str
# Type context for type inference
@property
@abstractmethod
def type_context(self) -> list[Type | None]:
"""Return the type context of the plugin"""
raise NotImplementedError
@abstractmethod
def fail(
self, msg: str | ErrorMessage, ctx: Context, /, *, code: ErrorCode | None = None
) -> None:
"""Emit an error message at given location."""
raise NotImplementedError
@abstractmethod
def named_generic_type(self, name: str, args: list[Type]) -> Instance:
"""Construct an instance of a generic type with given type arguments."""
raise NotImplementedError
@abstractmethod
def get_expression_type(self, node: Expression, type_context: Type | None = None) -> Type:
"""Checks the type of the given expression."""
raise NotImplementedError
@trait
class SemanticAnalyzerPluginInterface:
"""Interface for accessing semantic analyzer functionality in plugins.
Methods docstrings contain only basic info. Look for corresponding implementation
docstrings in semanal.py for more details.
# TODO: clean-up lookup functions.
"""
modules: dict[str, MypyFile]
# Options for current file.
options: Options
cur_mod_id: str
msg: MessageBuilder
@abstractmethod
def named_type(self, fullname: str, args: list[Type] | None = None) -> Instance:
"""Construct an instance of a builtin type with given type arguments."""
raise NotImplementedError
@abstractmethod
def builtin_type(self, fully_qualified_name: str) -> Instance:
"""Legacy function -- use named_type() instead."""
# NOTE: Do not delete this since many plugins may still use it.
raise NotImplementedError
@abstractmethod
def named_type_or_none(self, fullname: str, args: list[Type] | None = None) -> Instance | None:
"""Construct an instance of a type with given type arguments.
Return None if a type could not be constructed for the qualified
type name. This is possible when the qualified name includes a
module name and the module has not been imported.
"""
raise NotImplementedError
@abstractmethod
def basic_new_typeinfo(self, name: str, basetype_or_fallback: Instance, line: int) -> TypeInfo:
raise NotImplementedError
@abstractmethod
def parse_bool(self, expr: Expression) -> bool | None:
"""Parse True/False literals."""
raise NotImplementedError
@abstractmethod
def parse_str_literal(self, expr: Expression) -> str | None:
"""Parse string literals."""
@abstractmethod
def fail(
self,
msg: str,
ctx: Context,
serious: bool = False,
*,
blocker: bool = False,
code: ErrorCode | None = None,
) -> None:
"""Emit an error message at given location."""
raise NotImplementedError
@abstractmethod
def anal_type(
self,
t: Type,
*,
tvar_scope: TypeVarLikeScope | None = None,
allow_tuple_literal: bool = False,
allow_unbound_tvars: bool = False,
report_invalid_types: bool = True,
) -> Type | None:
"""Analyze an unbound type.
Return None if some part of the type is not ready yet. In this
case the current target being analyzed will be deferred and
analyzed again.
"""
raise NotImplementedError
@abstractmethod
def class_type(self, self_type: Type) -> Type:
"""Generate type of first argument of class methods from type of self."""
raise NotImplementedError
@abstractmethod
def lookup_fully_qualified(self, name: str) -> SymbolTableNode:
"""Lookup a symbol by its fully qualified name.
Raise an error if not found.
"""
raise NotImplementedError
@abstractmethod
def lookup_fully_qualified_or_none(self, name: str) -> SymbolTableNode | None:
"""Lookup a symbol by its fully qualified name.
Return None if not found.
"""
raise NotImplementedError
@abstractmethod
def lookup_qualified(
self, name: str, ctx: Context, suppress_errors: bool = False
) -> SymbolTableNode | None:
"""Lookup symbol using a name in current scope.
This follows Python local->non-local->global->builtins rules.
"""
raise NotImplementedError
@abstractmethod
def add_plugin_dependency(self, trigger: str, target: str | None = None) -> None:
"""Specify semantic dependencies for generated methods/variables.
If the symbol with full name given by trigger is found to be stale by mypy,
then the body of node with full name given by target will be re-checked.
By default, this is the node that is currently analyzed.
For example, the dataclass plugin adds a generated __init__ method with
a signature that depends on types of attributes in ancestor classes. If any
attribute in an ancestor class gets stale (modified), we need to reprocess
the subclasses (and thus regenerate __init__ methods).
This is used by fine-grained incremental mode (mypy daemon). See mypy/server/deps.py
for more details.
"""
raise NotImplementedError
@abstractmethod
def add_symbol_table_node(self, name: str, stnode: SymbolTableNode) -> Any:
"""Add node to global symbol table (or to nearest class if there is one)."""
raise NotImplementedError
@abstractmethod
def qualified_name(self, n: str) -> str:
"""Make qualified name using current module and enclosing class (if any)."""
raise NotImplementedError
@abstractmethod
def defer(self) -> None:
"""Call this to defer the processing of the current node.
This will request an additional iteration of semantic analysis.
"""
raise NotImplementedError
@property
@abstractmethod
def final_iteration(self) -> bool:
"""Is this the final iteration of semantic analysis?"""
raise NotImplementedError
@property
@abstractmethod
def is_stub_file(self) -> bool:
raise NotImplementedError
@abstractmethod
def analyze_simple_literal_type(self, rvalue: Expression, is_final: bool) -> Type | None:
raise NotImplementedError
# A context for querying for configuration data about a module for
# cache invalidation purposes.
class ReportConfigContext(NamedTuple):
id: str # Module name
path: str # Module file path
is_check: bool # Is this invocation for checking whether the config matches
# A context for a function signature hook that infers a better signature for a
# function. Note that argument types aren't available yet. If you need them,
# you have to use a method hook instead.
class FunctionSigContext(NamedTuple):
args: list[list[Expression]] # Actual expressions for each formal argument
default_signature: CallableType # Original signature of the method
context: Context # Relevant location context (e.g. for error messages)
api: CheckerPluginInterface
# A context for a function hook that infers the return type of a function with
# a special signature.
#
# A no-op callback would just return the inferred return type, but a useful
# callback at least sometimes can infer a more precise type.
class FunctionContext(NamedTuple):
arg_types: list[list[Type]] # List of actual caller types for each formal argument
arg_kinds: list[list[ArgKind]] # Ditto for argument kinds, see nodes.ARG_* constants
# Names of formal parameters from the callee definition,
# these will be sufficient in most cases.
callee_arg_names: list[str | None]
# Names of actual arguments in the call expression. For example,
# in a situation like this:
# def func(**kwargs) -> None:
# pass
# func(kw1=1, kw2=2)
# callee_arg_names will be ['kwargs'] and arg_names will be [['kw1', 'kw2']].
arg_names: list[list[str | None]]
default_return_type: Type # Return type inferred from signature
args: list[list[Expression]] # Actual expressions for each formal argument
context: Context # Relevant location context (e.g. for error messages)
api: CheckerPluginInterface
# A context for a method signature hook that infers a better signature for a
# method. Note that argument types aren't available yet. If you need them,
# you have to use a method hook instead.
# TODO: document ProperType in the plugin changelog/update issue.
class MethodSigContext(NamedTuple):
type: ProperType # Base object type for method call
args: list[list[Expression]] # Actual expressions for each formal argument
default_signature: CallableType # Original signature of the method
context: Context # Relevant location context (e.g. for error messages)
api: CheckerPluginInterface
# A context for a method hook that infers the return type of a method with a
# special signature.
#
# This is very similar to FunctionContext (only differences are documented).
class MethodContext(NamedTuple):
type: ProperType # Base object type for method call
arg_types: list[list[Type]] # List of actual caller types for each formal argument
# see FunctionContext for details about names and kinds
arg_kinds: list[list[ArgKind]]
callee_arg_names: list[str | None]
arg_names: list[list[str | None]]
default_return_type: Type # Return type inferred by mypy
args: list[list[Expression]] # Lists of actual expressions for every formal argument
context: Context
api: CheckerPluginInterface
# A context for an attribute type hook that infers the type of an attribute.
class AttributeContext(NamedTuple):
type: ProperType # Type of object with attribute
default_attr_type: Type # Original attribute type
context: Context # Relevant location context (e.g. for error messages)
api: CheckerPluginInterface
# A context for a class hook that modifies the class definition.
class ClassDefContext(NamedTuple):
cls: ClassDef # The class definition
reason: Expression # The expression being applied (decorator, metaclass, base class)
api: SemanticAnalyzerPluginInterface
# A context for dynamic class definitions like
# Base = declarative_base()
class DynamicClassDefContext(NamedTuple):
call: CallExpr # The r.h.s. of dynamic class definition
name: str # The name this class is being assigned to
api: SemanticAnalyzerPluginInterface
@mypyc_attr(allow_interpreted_subclasses=True)
class Plugin(CommonPluginApi):
"""Base class of all type checker plugins.
This defines a no-op plugin. Subclasses can override some methods to
provide some actual functionality.
All get_ methods are treated as pure functions (you should assume that
results might be cached). A plugin should return None from a get_ method
to give way to other plugins.
Look at the comments of various *Context objects for additional information on
various hooks.
"""
def __init__(self, options: Options) -> None:
self.options = options
self.python_version = options.python_version
# This can't be set in __init__ because it is executed too soon in build.py.
# Therefore, build.py *must* set it later before graph processing starts
# by calling set_modules().
self._modules: dict[str, MypyFile] | None = None
def set_modules(self, modules: dict[str, MypyFile]) -> None:
self._modules = modules
def lookup_fully_qualified(self, fullname: str) -> SymbolTableNode | None:
assert self._modules is not None
return lookup_fully_qualified(fullname, self._modules)
def report_config_data(self, ctx: ReportConfigContext) -> Any:
"""Get representation of configuration data for a module.
The data must be encodable as JSON and will be stored in the
cache metadata for the module. A mismatch between the cached
values and the returned will result in that module's cache
being invalidated and the module being rechecked.
This can be called twice for each module, once after loading
the cache to check if it is valid and once while writing new
cache information.
If is_check in the context is true, then the return of this
call will be checked against the cached version. Otherwise the
call is being made to determine what to put in the cache. This
can be used to allow consulting extra cache files in certain
complex situations.
This can be used to incorporate external configuration information
that might require changes to typechecking.
"""
return None
def get_additional_deps(self, file: MypyFile) -> list[tuple[int, str, int]]:
"""Customize dependencies for a module.
This hook allows adding in new dependencies for a module. It
is called after parsing a file but before analysis. This can
be useful if a library has dependencies that are dynamic based
on configuration information, for example.
Returns a list of (priority, module name, line number) tuples.
The line number can be -1 when there is not a known real line number.
Priorities are defined in mypy.build (but maybe shouldn't be).
10 is a good choice for priority.
"""
return []
def get_type_analyze_hook(self, fullname: str) -> Callable[[AnalyzeTypeContext], Type] | None:
"""Customize behaviour of the type analyzer for given full names.
This method is called during the semantic analysis pass whenever mypy sees an
unbound type. For example, while analysing this code:
from lib import Special, Other
var: Special
def func(x: Other[int]) -> None:
...
this method will be called with 'lib.Special', and then with 'lib.Other'.
The callback returned by plugin must return an analyzed type,
i.e. an instance of `mypy.types.Type`.
"""
return None
def get_function_signature_hook(
self, fullname: str
) -> Callable[[FunctionSigContext], FunctionLike] | None:
"""Adjust the signature of a function.
This method is called before type checking a function call. Plugin
may infer a better type for the function.
from lib import Class, do_stuff
do_stuff(42)
Class()
This method will be called with 'lib.do_stuff' and then with 'lib.Class'.
"""
return None
def get_function_hook(self, fullname: str) -> Callable[[FunctionContext], Type] | None:
"""Adjust the return type of a function call.
This method is called after type checking a call. Plugin may adjust the return
type inferred by mypy, and/or emit some error messages. Note, this hook is also
called for class instantiation calls, so that in this example:
from lib import Class, do_stuff
do_stuff(42)
Class()
This method will be called with 'lib.do_stuff' and then with 'lib.Class'.
"""
return None
def get_method_signature_hook(
self, fullname: str
) -> Callable[[MethodSigContext], FunctionLike] | None:
"""Adjust the signature of a method.
This method is called before type checking a method call. Plugin
may infer a better type for the method. The hook is also called for special
Python dunder methods except __init__ and __new__ (use get_function_hook to customize
class instantiation). This function is called with the method full name using
the class where it was _defined_. For example, in this code:
from lib import Special
class Base:
def method(self, arg: Any) -> Any:
...
class Derived(Base):
...
var: Derived
var.method(42)
x: Special
y = x[0]
this method is called with '__main__.Base.method', and then with
'lib.Special.__getitem__'.
"""
return None
def get_method_hook(self, fullname: str) -> Callable[[MethodContext], Type] | None:
"""Adjust return type of a method call.
This is the same as get_function_hook(), but is called with the
method full name (again, using the class where the method is defined).
"""
return None
def get_attribute_hook(self, fullname: str) -> Callable[[AttributeContext], Type] | None:
"""Adjust type of an instance attribute.
This method is called with attribute full name using the class of the instance where
the attribute was defined (or Var.info.fullname for generated attributes).
For classes without __getattr__ or __getattribute__, this hook is only called for
names of fields/properties (but not methods) that exist in the instance MRO.
For classes that implement __getattr__ or __getattribute__, this hook is called
for all fields/properties, including nonexistent ones (but still not methods).
For example:
class Base:
x: Any
def __getattr__(self, attr: str) -> Any: ...
class Derived(Base):
...
var: Derived
var.x
var.y
get_attribute_hook is called with '__main__.Base.x' and '__main__.Base.y'.
However, if we had not implemented __getattr__ on Base, you would only get
the callback for 'var.x'; 'var.y' would produce an error without calling the hook.
"""
return None
def get_class_attribute_hook(self, fullname: str) -> Callable[[AttributeContext], Type] | None:
"""
Adjust type of a class attribute.
This method is called with attribute full name using the class where the attribute was
defined (or Var.info.fullname for generated attributes).
For example:
class Cls:
x: Any
Cls.x
get_class_attribute_hook is called with '__main__.Cls.x' as fullname.
"""
return None
def get_class_decorator_hook(self, fullname: str) -> Callable[[ClassDefContext], None] | None:
"""Update class definition for given class decorators.
The plugin can modify a TypeInfo _in place_ (for example add some generated
methods to the symbol table). This hook is called after the class body was
semantically analyzed, but *there may still be placeholders* (typically
caused by forward references).
NOTE: Usually get_class_decorator_hook_2 is the better option, since it
guarantees that there are no placeholders.
The hook is called with full names of all class decorators.
The hook can be called multiple times per class, so it must be
idempotent.
"""
return None
def get_class_decorator_hook_2(
self, fullname: str
) -> Callable[[ClassDefContext], bool] | None:
"""Update class definition for given class decorators.
Similar to get_class_decorator_hook, but this runs in a later pass when
placeholders have been resolved.
The hook can return False if some base class hasn't been
processed yet using class hooks. It causes all class hooks
(that are run in this same pass) to be invoked another time for
the file(s) currently being processed.
The hook can be called multiple times per class, so it must be
idempotent.
"""
return None
def get_metaclass_hook(self, fullname: str) -> Callable[[ClassDefContext], None] | None:
"""Update class definition for given declared metaclasses.
Same as get_class_decorator_hook() but for metaclasses. Note:
this hook will be only called for explicit metaclasses, not for
inherited ones.
TODO: probably it should also be called on inherited metaclasses.
"""
return None
def get_base_class_hook(self, fullname: str) -> Callable[[ClassDefContext], None] | None:
"""Update class definition for given base classes.
Same as get_class_decorator_hook() but for base classes. Base classes
don't need to refer to TypeInfos, if a base class refers to a variable with
Any type, this hook will still be called.
"""
return None
def get_customize_class_mro_hook(
self, fullname: str
) -> Callable[[ClassDefContext], None] | None:
"""Customize MRO for given classes.
The plugin can modify the class MRO _in place_. This method is called
with the class full name before its body was semantically analyzed.
"""
return None
def get_dynamic_class_hook(
self, fullname: str
) -> Callable[[DynamicClassDefContext], None] | None:
"""Semantically analyze a dynamic class definition.
This plugin hook allows one to semantically analyze dynamic class definitions like:
from lib import dynamic_class
X = dynamic_class('X', [])
For such definition, this hook will be called with 'lib.dynamic_class'.
The plugin should create the corresponding TypeInfo, and place it into a relevant
symbol table, e.g. using ctx.api.add_symbol_table_node().
"""
return None
T = TypeVar("T")
class ChainedPlugin(Plugin):
"""A plugin that represents a sequence of chained plugins.
Each lookup method returns the hook for the first plugin that
reports a match.
This class should not be subclassed -- use Plugin as the base class
for all plugins.
"""
# TODO: Support caching of lookup results (through a LRU cache, for example).
def __init__(self, options: Options, plugins: list[Plugin]) -> None:
"""Initialize chained plugin.
Assume that the child plugins aren't mutated (results may be cached).
"""
super().__init__(options)
self._plugins = plugins
def set_modules(self, modules: dict[str, MypyFile]) -> None:
for plugin in self._plugins:
plugin.set_modules(modules)
def report_config_data(self, ctx: ReportConfigContext) -> Any:
config_data = [plugin.report_config_data(ctx) for plugin in self._plugins]
return config_data if any(x is not None for x in config_data) else None
def get_additional_deps(self, file: MypyFile) -> list[tuple[int, str, int]]:
deps = []
for plugin in self._plugins:
deps.extend(plugin.get_additional_deps(file))
return deps
def get_type_analyze_hook(self, fullname: str) -> Callable[[AnalyzeTypeContext], Type] | None:
return self._find_hook(lambda plugin: plugin.get_type_analyze_hook(fullname))
def get_function_signature_hook(
self, fullname: str
) -> Callable[[FunctionSigContext], FunctionLike] | None:
return self._find_hook(lambda plugin: plugin.get_function_signature_hook(fullname))
def get_function_hook(self, fullname: str) -> Callable[[FunctionContext], Type] | None:
return self._find_hook(lambda plugin: plugin.get_function_hook(fullname))
def get_method_signature_hook(
self, fullname: str
) -> Callable[[MethodSigContext], FunctionLike] | None:
return self._find_hook(lambda plugin: plugin.get_method_signature_hook(fullname))
def get_method_hook(self, fullname: str) -> Callable[[MethodContext], Type] | None:
return self._find_hook(lambda plugin: plugin.get_method_hook(fullname))
def get_attribute_hook(self, fullname: str) -> Callable[[AttributeContext], Type] | None:
return self._find_hook(lambda plugin: plugin.get_attribute_hook(fullname))
def get_class_attribute_hook(self, fullname: str) -> Callable[[AttributeContext], Type] | None:
return self._find_hook(lambda plugin: plugin.get_class_attribute_hook(fullname))
def get_class_decorator_hook(self, fullname: str) -> Callable[[ClassDefContext], None] | None:
return self._find_hook(lambda plugin: plugin.get_class_decorator_hook(fullname))
def get_class_decorator_hook_2(
self, fullname: str
) -> Callable[[ClassDefContext], bool] | None:
return self._find_hook(lambda plugin: plugin.get_class_decorator_hook_2(fullname))
def get_metaclass_hook(self, fullname: str) -> Callable[[ClassDefContext], None] | None:
return self._find_hook(lambda plugin: plugin.get_metaclass_hook(fullname))
def get_base_class_hook(self, fullname: str) -> Callable[[ClassDefContext], None] | None:
return self._find_hook(lambda plugin: plugin.get_base_class_hook(fullname))
def get_customize_class_mro_hook(
self, fullname: str
) -> Callable[[ClassDefContext], None] | None:
return self._find_hook(lambda plugin: plugin.get_customize_class_mro_hook(fullname))
def get_dynamic_class_hook(
self, fullname: str
) -> Callable[[DynamicClassDefContext], None] | None:
return self._find_hook(lambda plugin: plugin.get_dynamic_class_hook(fullname))
def _find_hook(self, lookup: Callable[[Plugin], T]) -> T | None:
for plugin in self._plugins:
hook = lookup(plugin)
if hook:
return hook
return None
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/plugin.py
|
Python
|
NOASSERTION
| 35,404 |
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/plugins/__init__.py
|
Python
|
NOASSERTION
| 0 |
|
"""Plugin for supporting the attrs library (http://www.attrs.org)"""
from __future__ import annotations
from collections import defaultdict
from functools import reduce
from typing import Final, Iterable, List, Mapping, cast
from typing_extensions import Literal
import mypy.plugin # To avoid circular imports.
from mypy.applytype import apply_generic_arguments
from mypy.errorcodes import LITERAL_REQ
from mypy.expandtype import expand_type, expand_type_by_instance
from mypy.exprtotype import TypeTranslationError, expr_to_unanalyzed_type
from mypy.meet import meet_types
from mypy.messages import format_type_bare
from mypy.nodes import (
ARG_NAMED,
ARG_NAMED_OPT,
ARG_OPT,
ARG_POS,
MDEF,
Argument,
AssignmentStmt,
CallExpr,
Context,
Decorator,
Expression,
FuncDef,
IndexExpr,
JsonDict,
LambdaExpr,
ListExpr,
MemberExpr,
NameExpr,
OverloadedFuncDef,
PlaceholderNode,
RefExpr,
SymbolTableNode,
TempNode,
TupleExpr,
TypeApplication,
TypeInfo,
TypeVarExpr,
Var,
is_class_var,
)
from mypy.plugin import SemanticAnalyzerPluginInterface
from mypy.plugins.common import (
_get_argument,
_get_bool_argument,
_get_decorator_bool_argument,
add_attribute_to_class,
add_method_to_class,
deserialize_and_fixup_type,
)
from mypy.server.trigger import make_wildcard_trigger
from mypy.state import state
from mypy.typeops import get_type_vars, make_simplified_union, map_type_from_supertype
from mypy.types import (
AnyType,
CallableType,
FunctionLike,
Instance,
LiteralType,
NoneType,
Overloaded,
ProperType,
TupleType,
Type,
TypeOfAny,
TypeType,
TypeVarId,
TypeVarType,
UninhabitedType,
UnionType,
get_proper_type,
)
from mypy.typevars import fill_typevars
from mypy.util import unmangle
# The names of the different functions that create classes or arguments.
attr_class_makers: Final = {"attr.s", "attr.attrs", "attr.attributes"}
attr_dataclass_makers: Final = {"attr.dataclass"}
attr_frozen_makers: Final = {"attr.frozen", "attrs.frozen"}
attr_define_makers: Final = {"attr.define", "attr.mutable", "attrs.define", "attrs.mutable"}
attr_attrib_makers: Final = {"attr.ib", "attr.attrib", "attr.attr", "attr.field", "attrs.field"}
attr_optional_converters: Final = {"attr.converters.optional", "attrs.converters.optional"}
SELF_TVAR_NAME: Final = "_AT"
MAGIC_ATTR_NAME: Final = "__attrs_attrs__"
MAGIC_ATTR_CLS_NAME_TEMPLATE: Final = "__{}_AttrsAttributes__" # The tuple subclass pattern.
ATTRS_INIT_NAME: Final = "__attrs_init__"
class Converter:
"""Holds information about a `converter=` argument"""
def __init__(self, init_type: Type | None = None, ret_type: Type | None = None) -> None:
self.init_type = init_type
self.ret_type = ret_type
class Attribute:
"""The value of an attr.ib() call."""
def __init__(
self,
name: str,
alias: str | None,
info: TypeInfo,
has_default: bool,
init: bool,
kw_only: bool,
converter: Converter | None,
context: Context,
init_type: Type | None,
) -> None:
self.name = name
self.alias = alias
self.info = info
self.has_default = has_default
self.init = init
self.kw_only = kw_only
self.converter = converter
self.context = context
self.init_type = init_type
def argument(self, ctx: mypy.plugin.ClassDefContext) -> Argument:
"""Return this attribute as an argument to __init__."""
assert self.init
init_type: Type | None = None
if self.converter:
if self.converter.init_type:
init_type = self.converter.init_type
if init_type and self.init_type and self.converter.ret_type:
# The converter return type should be the same type as the attribute type.
# Copy type vars from attr type to converter.
converter_vars = get_type_vars(self.converter.ret_type)
init_vars = get_type_vars(self.init_type)
if converter_vars and len(converter_vars) == len(init_vars):
variables = {
binder.id: arg for binder, arg in zip(converter_vars, init_vars)
}
init_type = expand_type(init_type, variables)
else:
ctx.api.fail("Cannot determine __init__ type from converter", self.context)
init_type = AnyType(TypeOfAny.from_error)
else: # There is no converter, the init type is the normal type.
init_type = self.init_type or self.info[self.name].type
unannotated = False
if init_type is None:
unannotated = True
# Convert type not set to Any.
init_type = AnyType(TypeOfAny.unannotated)
else:
proper_type = get_proper_type(init_type)
if isinstance(proper_type, AnyType):
if proper_type.type_of_any == TypeOfAny.unannotated:
unannotated = True
if unannotated and ctx.api.options.disallow_untyped_defs:
# This is a compromise. If you don't have a type here then the
# __init__ will be untyped. But since the __init__ is added it's
# pointing at the decorator. So instead we also show the error in the
# assignment, which is where you would fix the issue.
node = self.info[self.name].node
assert node is not None
ctx.api.msg.need_annotation_for_var(node, self.context)
if self.kw_only:
arg_kind = ARG_NAMED_OPT if self.has_default else ARG_NAMED
else:
arg_kind = ARG_OPT if self.has_default else ARG_POS
# Attrs removes leading underscores when creating the __init__ arguments.
name = self.alias or self.name.lstrip("_")
return Argument(Var(name, init_type), init_type, None, arg_kind)
def serialize(self) -> JsonDict:
"""Serialize this object so it can be saved and restored."""
return {
"name": self.name,
"alias": self.alias,
"has_default": self.has_default,
"init": self.init,
"kw_only": self.kw_only,
"has_converter": self.converter is not None,
"converter_init_type": (
self.converter.init_type.serialize()
if self.converter and self.converter.init_type
else None
),
"context_line": self.context.line,
"context_column": self.context.column,
"init_type": self.init_type.serialize() if self.init_type else None,
}
@classmethod
def deserialize(
cls, info: TypeInfo, data: JsonDict, api: SemanticAnalyzerPluginInterface
) -> Attribute:
"""Return the Attribute that was serialized."""
raw_init_type = data["init_type"]
init_type = deserialize_and_fixup_type(raw_init_type, api) if raw_init_type else None
raw_converter_init_type = data["converter_init_type"]
converter_init_type = (
deserialize_and_fixup_type(raw_converter_init_type, api)
if raw_converter_init_type
else None
)
return Attribute(
data["name"],
data["alias"],
info,
data["has_default"],
data["init"],
data["kw_only"],
Converter(converter_init_type) if data["has_converter"] else None,
Context(line=data["context_line"], column=data["context_column"]),
init_type,
)
def expand_typevar_from_subtype(self, sub_type: TypeInfo) -> None:
"""Expands type vars in the context of a subtype when an attribute is inherited
from a generic super type."""
if self.init_type:
self.init_type = map_type_from_supertype(self.init_type, sub_type, self.info)
else:
self.init_type = None
def _determine_eq_order(ctx: mypy.plugin.ClassDefContext) -> bool:
"""
Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
value of order.
"""
cmp = _get_decorator_optional_bool_argument(ctx, "cmp")
eq = _get_decorator_optional_bool_argument(ctx, "eq")
order = _get_decorator_optional_bool_argument(ctx, "order")
if cmp is not None and any((eq is not None, order is not None)):
ctx.api.fail('Don\'t mix "cmp" with "eq" and "order"', ctx.reason)
# cmp takes precedence due to bw-compatibility.
if cmp is not None:
return cmp
# If left None, equality is on and ordering mirrors equality.
if eq is None:
eq = True
if order is None:
order = eq
if eq is False and order is True:
ctx.api.fail("eq must be True if order is True", ctx.reason)
return order
def _get_decorator_optional_bool_argument(
ctx: mypy.plugin.ClassDefContext, name: str, default: bool | None = None
) -> bool | None:
"""Return the Optional[bool] argument for the decorator.
This handles both @decorator(...) and @decorator.
"""
if isinstance(ctx.reason, CallExpr):
attr_value = _get_argument(ctx.reason, name)
if attr_value:
if isinstance(attr_value, NameExpr):
if attr_value.fullname == "builtins.True":
return True
if attr_value.fullname == "builtins.False":
return False
if attr_value.fullname == "builtins.None":
return None
ctx.api.fail(
f'"{name}" argument must be a True, False, or None literal',
ctx.reason,
code=LITERAL_REQ,
)
return default
return default
else:
return default
def attr_tag_callback(ctx: mypy.plugin.ClassDefContext) -> None:
"""Record that we have an attrs class in the main semantic analysis pass.
The later pass implemented by attr_class_maker_callback will use this
to detect attrs classes in base classes.
"""
# The value is ignored, only the existence matters.
ctx.cls.info.metadata["attrs_tag"] = {}
def attr_class_maker_callback(
ctx: mypy.plugin.ClassDefContext,
auto_attribs_default: bool | None = False,
frozen_default: bool = False,
slots_default: bool = False,
) -> bool:
"""Add necessary dunder methods to classes decorated with attr.s.
attrs is a package that lets you define classes without writing dull boilerplate code.
At a quick glance, the decorator searches the class body for assignments of `attr.ib`s (or
annotated variables if auto_attribs=True), then depending on how the decorator is called,
it will add an __init__ or all the compare methods.
For frozen=True it will turn the attrs into properties.
Hashability will be set according to https://www.attrs.org/en/stable/hashing.html.
See https://www.attrs.org/en/stable/how-does-it-work.html for information on how attrs works.
If this returns False, some required metadata was not ready yet, and we need another
pass.
"""
with state.strict_optional_set(ctx.api.options.strict_optional):
# This hook is called during semantic analysis, but it uses a bunch of
# type-checking ops, so it needs the strict optional set properly.
return attr_class_maker_callback_impl(
ctx, auto_attribs_default, frozen_default, slots_default
)
def attr_class_maker_callback_impl(
ctx: mypy.plugin.ClassDefContext,
auto_attribs_default: bool | None,
frozen_default: bool,
slots_default: bool,
) -> bool:
info = ctx.cls.info
init = _get_decorator_bool_argument(ctx, "init", True)
frozen = _get_frozen(ctx, frozen_default)
order = _determine_eq_order(ctx)
slots = _get_decorator_bool_argument(ctx, "slots", slots_default)
auto_attribs = _get_decorator_optional_bool_argument(ctx, "auto_attribs", auto_attribs_default)
kw_only = _get_decorator_bool_argument(ctx, "kw_only", False)
match_args = _get_decorator_bool_argument(ctx, "match_args", True)
for super_info in ctx.cls.info.mro[1:-1]:
if "attrs_tag" in super_info.metadata and "attrs" not in super_info.metadata:
# Super class is not ready yet. Request another pass.
return False
attributes = _analyze_class(ctx, auto_attribs, kw_only)
# Check if attribute types are ready.
for attr in attributes:
node = info.get(attr.name)
if node is None:
# This name is likely blocked by some semantic analysis error that
# should have been reported already.
_add_empty_metadata(info)
return True
_add_attrs_magic_attribute(ctx, [(attr.name, info[attr.name].type) for attr in attributes])
if slots:
_add_slots(ctx, attributes)
if match_args and ctx.api.options.python_version[:2] >= (3, 10):
# `.__match_args__` is only added for python3.10+, but the argument
# exists for earlier versions as well.
_add_match_args(ctx, attributes)
# Save the attributes so that subclasses can reuse them.
ctx.cls.info.metadata["attrs"] = {
"attributes": [attr.serialize() for attr in attributes],
"frozen": frozen,
}
adder = MethodAdder(ctx)
# If __init__ is not being generated, attrs still generates it as __attrs_init__ instead.
_add_init(ctx, attributes, adder, "__init__" if init else ATTRS_INIT_NAME)
if order:
_add_order(ctx, adder)
if frozen:
_make_frozen(ctx, attributes)
# Frozen classes are hashable by default, even if inheriting from non-frozen ones.
hashable: bool | None = _get_decorator_bool_argument(
ctx, "hash", True
) and _get_decorator_bool_argument(ctx, "unsafe_hash", True)
else:
hashable = _get_decorator_optional_bool_argument(ctx, "unsafe_hash")
if hashable is None: # unspecified
hashable = _get_decorator_optional_bool_argument(ctx, "hash")
eq = _get_decorator_optional_bool_argument(ctx, "eq")
has_own_hash = "__hash__" in ctx.cls.info.names
if has_own_hash or (hashable is None and eq is False):
pass # Do nothing.
elif hashable:
# We copy the `__hash__` signature from `object` to make them hashable.
ctx.cls.info.names["__hash__"] = ctx.cls.info.mro[-1].names["__hash__"]
else:
_remove_hashability(ctx)
return True
def _get_frozen(ctx: mypy.plugin.ClassDefContext, frozen_default: bool) -> bool:
"""Return whether this class is frozen."""
if _get_decorator_bool_argument(ctx, "frozen", frozen_default):
return True
# Subclasses of frozen classes are frozen so check that.
for super_info in ctx.cls.info.mro[1:-1]:
if "attrs" in super_info.metadata and super_info.metadata["attrs"]["frozen"]:
return True
return False
def _analyze_class(
ctx: mypy.plugin.ClassDefContext, auto_attribs: bool | None, kw_only: bool
) -> list[Attribute]:
"""Analyze the class body of an attr maker, its parents, and return the Attributes found.
auto_attribs=True means we'll generate attributes from type annotations also.
auto_attribs=None means we'll detect which mode to use.
kw_only=True means that all attributes created here will be keyword only args in __init__.
"""
own_attrs: dict[str, Attribute] = {}
if auto_attribs is None:
auto_attribs = _detect_auto_attribs(ctx)
# Walk the body looking for assignments and decorators.
for stmt in ctx.cls.defs.body:
if isinstance(stmt, AssignmentStmt):
for attr in _attributes_from_assignment(ctx, stmt, auto_attribs, kw_only):
# When attrs are defined twice in the same body we want to use the 2nd definition
# in the 2nd location. So remove it from the OrderedDict.
# Unless it's auto_attribs in which case we want the 2nd definition in the
# 1st location.
if not auto_attribs and attr.name in own_attrs:
del own_attrs[attr.name]
own_attrs[attr.name] = attr
elif isinstance(stmt, Decorator):
_cleanup_decorator(stmt, own_attrs)
for attribute in own_attrs.values():
# Even though these look like class level assignments we want them to look like
# instance level assignments.
if attribute.name in ctx.cls.info.names:
node = ctx.cls.info.names[attribute.name].node
if isinstance(node, PlaceholderNode):
# This node is not ready yet.
continue
assert isinstance(node, Var)
node.is_initialized_in_class = False
# Traverse the MRO and collect attributes from the parents.
taken_attr_names = set(own_attrs)
super_attrs = []
for super_info in ctx.cls.info.mro[1:-1]:
if "attrs" in super_info.metadata:
# Each class depends on the set of attributes in its attrs ancestors.
ctx.api.add_plugin_dependency(make_wildcard_trigger(super_info.fullname))
for data in super_info.metadata["attrs"]["attributes"]:
# Only add an attribute if it hasn't been defined before. This
# allows for overwriting attribute definitions by subclassing.
if data["name"] not in taken_attr_names:
a = Attribute.deserialize(super_info, data, ctx.api)
a.expand_typevar_from_subtype(ctx.cls.info)
super_attrs.append(a)
taken_attr_names.add(a.name)
attributes = super_attrs + list(own_attrs.values())
# Check the init args for correct default-ness. Note: This has to be done after all the
# attributes for all classes have been read, because subclasses can override parents.
last_default = False
for i, attribute in enumerate(attributes):
if not attribute.init:
continue
if attribute.kw_only:
# Keyword-only attributes don't care whether they are default or not.
continue
# If the issue comes from merging different classes, report it
# at the class definition point.
context = attribute.context if i >= len(super_attrs) else ctx.cls
if not attribute.has_default and last_default:
ctx.api.fail("Non-default attributes not allowed after default attributes.", context)
last_default |= attribute.has_default
return attributes
def _add_empty_metadata(info: TypeInfo) -> None:
"""Add empty metadata to mark that we've finished processing this class."""
info.metadata["attrs"] = {"attributes": [], "frozen": False}
def _detect_auto_attribs(ctx: mypy.plugin.ClassDefContext) -> bool:
"""Return whether auto_attribs should be enabled or disabled.
It's disabled if there are any unannotated attribs()
"""
for stmt in ctx.cls.defs.body:
if isinstance(stmt, AssignmentStmt):
for lvalue in stmt.lvalues:
lvalues, rvalues = _parse_assignments(lvalue, stmt)
if len(lvalues) != len(rvalues):
# This means we have some assignment that isn't 1 to 1.
# It can't be an attrib.
continue
for lhs, rvalue in zip(lvalues, rvalues):
# Check if the right hand side is a call to an attribute maker.
if (
isinstance(rvalue, CallExpr)
and isinstance(rvalue.callee, RefExpr)
and rvalue.callee.fullname in attr_attrib_makers
and not stmt.new_syntax
):
# This means we have an attrib without an annotation and so
# we can't do auto_attribs=True
return False
return True
def _attributes_from_assignment(
ctx: mypy.plugin.ClassDefContext, stmt: AssignmentStmt, auto_attribs: bool, kw_only: bool
) -> Iterable[Attribute]:
"""Return Attribute objects that are created by this assignment.
The assignments can look like this:
x = attr.ib()
x = y = attr.ib()
x, y = attr.ib(), attr.ib()
or if auto_attribs is enabled also like this:
x: type
x: type = default_value
x: type = attr.ib(...)
"""
for lvalue in stmt.lvalues:
lvalues, rvalues = _parse_assignments(lvalue, stmt)
if len(lvalues) != len(rvalues):
# This means we have some assignment that isn't 1 to 1.
# It can't be an attrib.
continue
for lhs, rvalue in zip(lvalues, rvalues):
# Check if the right hand side is a call to an attribute maker.
if (
isinstance(rvalue, CallExpr)
and isinstance(rvalue.callee, RefExpr)
and rvalue.callee.fullname in attr_attrib_makers
):
attr = _attribute_from_attrib_maker(ctx, auto_attribs, kw_only, lhs, rvalue, stmt)
if attr:
yield attr
elif auto_attribs and stmt.type and stmt.new_syntax and not is_class_var(lhs):
yield _attribute_from_auto_attrib(ctx, kw_only, lhs, rvalue, stmt)
def _cleanup_decorator(stmt: Decorator, attr_map: dict[str, Attribute]) -> None:
"""Handle decorators in class bodies.
`x.default` will set a default value on x
`x.validator` and `x.default` will get removed to avoid throwing a type error.
"""
remove_me = []
for func_decorator in stmt.decorators:
if (
isinstance(func_decorator, MemberExpr)
and isinstance(func_decorator.expr, NameExpr)
and func_decorator.expr.name in attr_map
):
if func_decorator.name == "default":
attr_map[func_decorator.expr.name].has_default = True
if func_decorator.name in ("default", "validator"):
# These are decorators on the attrib object that only exist during
# class creation time. In order to not trigger a type error later we
# just remove them. This might leave us with a Decorator with no
# decorators (Emperor's new clothes?)
# TODO: It would be nice to type-check these rather than remove them.
# default should be Callable[[], T]
# validator should be Callable[[Any, 'Attribute', T], Any]
# where T is the type of the attribute.
remove_me.append(func_decorator)
for dec in remove_me:
stmt.decorators.remove(dec)
def _attribute_from_auto_attrib(
ctx: mypy.plugin.ClassDefContext,
kw_only: bool,
lhs: NameExpr,
rvalue: Expression,
stmt: AssignmentStmt,
) -> Attribute:
"""Return an Attribute for a new type assignment."""
name = unmangle(lhs.name)
# `x: int` (without equal sign) assigns rvalue to TempNode(AnyType())
has_rhs = not isinstance(rvalue, TempNode)
sym = ctx.cls.info.names.get(name)
init_type = sym.type if sym else None
return Attribute(name, None, ctx.cls.info, has_rhs, True, kw_only, None, stmt, init_type)
def _attribute_from_attrib_maker(
ctx: mypy.plugin.ClassDefContext,
auto_attribs: bool,
kw_only: bool,
lhs: NameExpr,
rvalue: CallExpr,
stmt: AssignmentStmt,
) -> Attribute | None:
"""Return an Attribute from the assignment or None if you can't make one."""
if auto_attribs and not stmt.new_syntax:
# auto_attribs requires an annotation on *every* attr.ib.
assert lhs.node is not None
ctx.api.msg.need_annotation_for_var(lhs.node, stmt)
return None
if len(stmt.lvalues) > 1:
ctx.api.fail("Too many names for one attribute", stmt)
return None
# This is the type that belongs in the __init__ method for this attrib.
init_type = stmt.type
# Read all the arguments from the call.
init = _get_bool_argument(ctx, rvalue, "init", True)
# Note: If the class decorator says kw_only=True the attribute is ignored.
# See https://github.com/python-attrs/attrs/issues/481 for explanation.
kw_only |= _get_bool_argument(ctx, rvalue, "kw_only", False)
# TODO: Check for attr.NOTHING
attr_has_default = bool(_get_argument(rvalue, "default"))
attr_has_factory = bool(_get_argument(rvalue, "factory"))
if attr_has_default and attr_has_factory:
ctx.api.fail('Can\'t pass both "default" and "factory".', rvalue)
elif attr_has_factory:
attr_has_default = True
# If the type isn't set through annotation but is passed through `type=` use that.
type_arg = _get_argument(rvalue, "type")
if type_arg and not init_type:
try:
un_type = expr_to_unanalyzed_type(type_arg, ctx.api.options, ctx.api.is_stub_file)
except TypeTranslationError:
ctx.api.fail("Invalid argument to type", type_arg)
else:
init_type = ctx.api.anal_type(un_type)
if init_type and isinstance(lhs.node, Var) and not lhs.node.type:
# If there is no annotation, add one.
lhs.node.type = init_type
lhs.is_inferred_def = False
# Note: convert is deprecated but works the same as converter.
converter = _get_argument(rvalue, "converter")
convert = _get_argument(rvalue, "convert")
if convert and converter:
ctx.api.fail('Can\'t pass both "convert" and "converter".', rvalue)
elif convert:
ctx.api.fail("convert is deprecated, use converter", rvalue)
converter = convert
converter_info = _parse_converter(ctx, converter)
# Custom alias might be defined:
alias = None
alias_expr = _get_argument(rvalue, "alias")
if alias_expr:
alias = ctx.api.parse_str_literal(alias_expr)
if alias is None:
ctx.api.fail(
'"alias" argument to attrs field must be a string literal',
rvalue,
code=LITERAL_REQ,
)
name = unmangle(lhs.name)
return Attribute(
name, alias, ctx.cls.info, attr_has_default, init, kw_only, converter_info, stmt, init_type
)
def _parse_converter(
ctx: mypy.plugin.ClassDefContext, converter_expr: Expression | None
) -> Converter | None:
"""Return the Converter object from an Expression."""
# TODO: Support complex converters, e.g. lambdas, calls, etc.
if not converter_expr:
return None
converter_info = Converter()
if (
isinstance(converter_expr, CallExpr)
and isinstance(converter_expr.callee, RefExpr)
and converter_expr.callee.fullname in attr_optional_converters
and converter_expr.args
and converter_expr.args[0]
):
# Special handling for attr.converters.optional(type)
# We extract the type and add make the init_args Optional in Attribute.argument
converter_expr = converter_expr.args[0]
is_attr_converters_optional = True
else:
is_attr_converters_optional = False
converter_type: Type | None = None
if isinstance(converter_expr, RefExpr) and converter_expr.node:
if isinstance(converter_expr.node, FuncDef):
if converter_expr.node.type and isinstance(converter_expr.node.type, FunctionLike):
converter_type = converter_expr.node.type
else: # The converter is an unannotated function.
converter_info.init_type = AnyType(TypeOfAny.unannotated)
return converter_info
elif isinstance(converter_expr.node, OverloadedFuncDef) and is_valid_overloaded_converter(
converter_expr.node
):
converter_type = converter_expr.node.type
elif isinstance(converter_expr.node, TypeInfo):
from mypy.checkmember import type_object_type # To avoid import cycle.
converter_type = type_object_type(converter_expr.node, ctx.api.named_type)
elif (
isinstance(converter_expr, IndexExpr)
and isinstance(converter_expr.analyzed, TypeApplication)
and isinstance(converter_expr.base, RefExpr)
and isinstance(converter_expr.base.node, TypeInfo)
):
# The converter is a generic type.
from mypy.checkmember import type_object_type # To avoid import cycle.
converter_type = type_object_type(converter_expr.base.node, ctx.api.named_type)
if isinstance(converter_type, CallableType):
converter_type = apply_generic_arguments(
converter_type,
converter_expr.analyzed.types,
ctx.api.msg.incompatible_typevar_value,
converter_type,
)
else:
converter_type = None
if isinstance(converter_expr, LambdaExpr):
# TODO: should we send a fail if converter_expr.min_args > 1?
converter_info.init_type = AnyType(TypeOfAny.unannotated)
return converter_info
if not converter_type:
# Signal that we have an unsupported converter.
ctx.api.fail(
"Unsupported converter, only named functions, types and lambdas are currently "
"supported",
converter_expr,
)
converter_info.init_type = AnyType(TypeOfAny.from_error)
return converter_info
converter_type = get_proper_type(converter_type)
if isinstance(converter_type, CallableType) and converter_type.arg_types:
converter_info.init_type = converter_type.arg_types[0]
if not is_attr_converters_optional:
converter_info.ret_type = converter_type.ret_type
elif isinstance(converter_type, Overloaded):
types: list[Type] = []
for item in converter_type.items:
# Walk the overloads looking for methods that can accept one argument.
num_arg_types = len(item.arg_types)
if not num_arg_types:
continue
if num_arg_types > 1 and any(kind == ARG_POS for kind in item.arg_kinds[1:]):
continue
types.append(item.arg_types[0])
# Make a union of all the valid types.
if types:
converter_info.init_type = make_simplified_union(types)
if is_attr_converters_optional and converter_info.init_type:
# If the converter was attr.converter.optional(type) then add None to
# the allowed init_type.
converter_info.init_type = UnionType.make_union([converter_info.init_type, NoneType()])
return converter_info
def is_valid_overloaded_converter(defn: OverloadedFuncDef) -> bool:
return all(
(not isinstance(item, Decorator) or isinstance(item.func.type, FunctionLike))
for item in defn.items
)
def _parse_assignments(
lvalue: Expression, stmt: AssignmentStmt
) -> tuple[list[NameExpr], list[Expression]]:
"""Convert a possibly complex assignment expression into lists of lvalues and rvalues."""
lvalues: list[NameExpr] = []
rvalues: list[Expression] = []
if isinstance(lvalue, (TupleExpr, ListExpr)):
if all(isinstance(item, NameExpr) for item in lvalue.items):
lvalues = cast(List[NameExpr], lvalue.items)
if isinstance(stmt.rvalue, (TupleExpr, ListExpr)):
rvalues = stmt.rvalue.items
elif isinstance(lvalue, NameExpr):
lvalues = [lvalue]
rvalues = [stmt.rvalue]
return lvalues, rvalues
def _add_order(ctx: mypy.plugin.ClassDefContext, adder: MethodAdder) -> None:
"""Generate all the ordering methods for this class."""
bool_type = ctx.api.named_type("builtins.bool")
object_type = ctx.api.named_type("builtins.object")
# Make the types be:
# AT = TypeVar('AT')
# def __lt__(self: AT, other: AT) -> bool
# This way comparisons with subclasses will work correctly.
fullname = f"{ctx.cls.info.fullname}.{SELF_TVAR_NAME}"
tvd = TypeVarType(
SELF_TVAR_NAME,
fullname,
# Namespace is patched per-method below.
id=TypeVarId(-1, namespace=""),
values=[],
upper_bound=object_type,
default=AnyType(TypeOfAny.from_omitted_generics),
)
self_tvar_expr = TypeVarExpr(
SELF_TVAR_NAME, fullname, [], object_type, AnyType(TypeOfAny.from_omitted_generics)
)
ctx.cls.info.names[SELF_TVAR_NAME] = SymbolTableNode(MDEF, self_tvar_expr)
for method in ["__lt__", "__le__", "__gt__", "__ge__"]:
namespace = f"{ctx.cls.info.fullname}.{method}"
tvd = tvd.copy_modified(id=TypeVarId(tvd.id.raw_id, namespace=namespace))
args = [Argument(Var("other", tvd), tvd, None, ARG_POS)]
adder.add_method(method, args, bool_type, self_type=tvd, tvd=tvd)
def _make_frozen(ctx: mypy.plugin.ClassDefContext, attributes: list[Attribute]) -> None:
"""Turn all the attributes into properties to simulate frozen classes."""
for attribute in attributes:
if attribute.name in ctx.cls.info.names:
# This variable belongs to this class so we can modify it.
node = ctx.cls.info.names[attribute.name].node
if not isinstance(node, Var):
# The superclass attribute was overridden with a non-variable.
# No need to do anything here, override will be verified during
# type checking.
continue
node.is_property = True
else:
# This variable belongs to a super class so create new Var so we
# can modify it.
var = Var(attribute.name, attribute.init_type)
var.info = ctx.cls.info
var._fullname = f"{ctx.cls.info.fullname}.{var.name}"
ctx.cls.info.names[var.name] = SymbolTableNode(MDEF, var)
var.is_property = True
def _add_init(
ctx: mypy.plugin.ClassDefContext,
attributes: list[Attribute],
adder: MethodAdder,
method_name: Literal["__init__", "__attrs_init__"],
) -> None:
"""Generate an __init__ method for the attributes and add it to the class."""
# Convert attributes to arguments with kw_only arguments at the end of
# the argument list
pos_args = []
kw_only_args = []
sym_table = ctx.cls.info.names
for attribute in attributes:
if not attribute.init:
continue
if attribute.kw_only:
kw_only_args.append(attribute.argument(ctx))
else:
pos_args.append(attribute.argument(ctx))
# If the attribute is Final, present in `__init__` and has
# no default, make sure it doesn't error later.
if not attribute.has_default and attribute.name in sym_table:
sym_node = sym_table[attribute.name].node
if isinstance(sym_node, Var) and sym_node.is_final:
sym_node.final_set_in_init = True
args = pos_args + kw_only_args
if all(
# We use getattr rather than instance checks because the variable.type
# might be wrapped into a Union or some other type, but even non-Any
# types reliably track the fact that the argument was not annotated.
getattr(arg.variable.type, "type_of_any", None) == TypeOfAny.unannotated
for arg in args
):
# This workaround makes --disallow-incomplete-defs usable with attrs,
# but is definitely suboptimal as a long-term solution.
# See https://github.com/python/mypy/issues/5954 for discussion.
for a in args:
a.variable.type = AnyType(TypeOfAny.implementation_artifact)
a.type_annotation = AnyType(TypeOfAny.implementation_artifact)
adder.add_method(method_name, args, NoneType())
def _add_attrs_magic_attribute(
ctx: mypy.plugin.ClassDefContext, attrs: list[tuple[str, Type | None]]
) -> None:
any_type = AnyType(TypeOfAny.explicit)
attributes_types: list[Type] = [
ctx.api.named_type_or_none("attr.Attribute", [attr_type or any_type]) or any_type
for _, attr_type in attrs
]
fallback_type = ctx.api.named_type(
"builtins.tuple", [ctx.api.named_type_or_none("attr.Attribute", [any_type]) or any_type]
)
attr_name = MAGIC_ATTR_CLS_NAME_TEMPLATE.format(ctx.cls.fullname.replace(".", "_"))
ti = ctx.api.basic_new_typeinfo(attr_name, fallback_type, 0)
for (name, _), attr_type in zip(attrs, attributes_types):
var = Var(name, attr_type)
var._fullname = name
var.is_property = True
proper_type = get_proper_type(attr_type)
if isinstance(proper_type, Instance):
var.info = proper_type.type
ti.names[name] = SymbolTableNode(MDEF, var, plugin_generated=True)
attributes_type = Instance(ti, [])
# We need to stash the type of the magic attribute so it can be
# loaded on cached runs.
ctx.cls.info.names[attr_name] = SymbolTableNode(MDEF, ti, plugin_generated=True)
add_attribute_to_class(
ctx.api,
ctx.cls,
MAGIC_ATTR_NAME,
TupleType(attributes_types, fallback=attributes_type),
fullname=f"{ctx.cls.fullname}.{MAGIC_ATTR_NAME}",
override_allow_incompatible=True,
is_classvar=True,
)
def _add_slots(ctx: mypy.plugin.ClassDefContext, attributes: list[Attribute]) -> None:
if any(p.slots is None for p in ctx.cls.info.mro[1:-1]):
# At least one type in mro (excluding `self` and `object`)
# does not have concrete `__slots__` defined. Ignoring.
return
# Unlike `@dataclasses.dataclass`, `__slots__` is rewritten here.
ctx.cls.info.slots = {attr.name for attr in attributes}
# Also, inject `__slots__` attribute to class namespace:
slots_type = TupleType(
[ctx.api.named_type("builtins.str") for _ in attributes],
fallback=ctx.api.named_type("builtins.tuple"),
)
add_attribute_to_class(api=ctx.api, cls=ctx.cls, name="__slots__", typ=slots_type)
def _add_match_args(ctx: mypy.plugin.ClassDefContext, attributes: list[Attribute]) -> None:
if (
"__match_args__" not in ctx.cls.info.names
or ctx.cls.info.names["__match_args__"].plugin_generated
):
str_type = ctx.api.named_type("builtins.str")
match_args = TupleType(
[
str_type.copy_modified(last_known_value=LiteralType(attr.name, fallback=str_type))
for attr in attributes
if not attr.kw_only and attr.init
],
fallback=ctx.api.named_type("builtins.tuple"),
)
add_attribute_to_class(api=ctx.api, cls=ctx.cls, name="__match_args__", typ=match_args)
def _remove_hashability(ctx: mypy.plugin.ClassDefContext) -> None:
"""Remove hashability from a class."""
add_attribute_to_class(
ctx.api, ctx.cls, "__hash__", NoneType(), is_classvar=True, overwrite_existing=True
)
class MethodAdder:
"""Helper to add methods to a TypeInfo.
ctx: The ClassDefCtx we are using on which we will add methods.
"""
# TODO: Combine this with the code build_namedtuple_typeinfo to support both.
def __init__(self, ctx: mypy.plugin.ClassDefContext) -> None:
self.ctx = ctx
self.self_type = fill_typevars(ctx.cls.info)
def add_method(
self,
method_name: str,
args: list[Argument],
ret_type: Type,
self_type: Type | None = None,
tvd: TypeVarType | None = None,
) -> None:
"""Add a method: def <method_name>(self, <args>) -> <ret_type>): ... to info.
self_type: The type to use for the self argument or None to use the inferred self type.
tvd: If the method is generic these should be the type variables.
"""
self_type = self_type if self_type is not None else self.self_type
add_method_to_class(
self.ctx.api, self.ctx.cls, method_name, args, ret_type, self_type, tvd
)
def _get_attrs_init_type(typ: Instance) -> CallableType | None:
"""
If `typ` refers to an attrs class, get the type of its initializer method.
"""
magic_attr = typ.type.get(MAGIC_ATTR_NAME)
if magic_attr is None or not magic_attr.plugin_generated:
return None
init_method = typ.type.get_method("__init__") or typ.type.get_method(ATTRS_INIT_NAME)
if not isinstance(init_method, FuncDef) or not isinstance(init_method.type, CallableType):
return None
return init_method.type
def _fail_not_attrs_class(ctx: mypy.plugin.FunctionSigContext, t: Type, parent_t: Type) -> None:
t_name = format_type_bare(t, ctx.api.options)
if parent_t is t:
msg = (
f'Argument 1 to "evolve" has a variable type "{t_name}" not bound to an attrs class'
if isinstance(t, TypeVarType)
else f'Argument 1 to "evolve" has incompatible type "{t_name}"; expected an attrs class'
)
else:
pt_name = format_type_bare(parent_t, ctx.api.options)
msg = (
f'Argument 1 to "evolve" has type "{pt_name}" whose item "{t_name}" is not bound to an attrs class'
if isinstance(t, TypeVarType)
else f'Argument 1 to "evolve" has incompatible type "{pt_name}" whose item "{t_name}" is not an attrs class'
)
ctx.api.fail(msg, ctx.context)
def _get_expanded_attr_types(
ctx: mypy.plugin.FunctionSigContext,
typ: ProperType,
display_typ: ProperType,
parent_typ: ProperType,
) -> list[Mapping[str, Type]] | None:
"""
For a given type, determine what attrs classes it can be: for each class, return the field types.
For generic classes, the field types are expanded.
If the type contains Any or a non-attrs type, returns None; in the latter case, also reports an error.
"""
if isinstance(typ, AnyType):
return None
elif isinstance(typ, UnionType):
ret: list[Mapping[str, Type]] | None = []
for item in typ.relevant_items():
item = get_proper_type(item)
item_types = _get_expanded_attr_types(ctx, item, item, parent_typ)
if ret is not None and item_types is not None:
ret += item_types
else:
ret = None # but keep iterating to emit all errors
return ret
elif isinstance(typ, TypeVarType):
return _get_expanded_attr_types(
ctx, get_proper_type(typ.upper_bound), display_typ, parent_typ
)
elif isinstance(typ, Instance):
init_func = _get_attrs_init_type(typ)
if init_func is None:
_fail_not_attrs_class(ctx, display_typ, parent_typ)
return None
init_func = expand_type_by_instance(init_func, typ)
# [1:] to skip the self argument of AttrClass.__init__
field_names = cast(List[str], init_func.arg_names[1:])
field_types = init_func.arg_types[1:]
return [dict(zip(field_names, field_types))]
else:
_fail_not_attrs_class(ctx, display_typ, parent_typ)
return None
def _meet_fields(types: list[Mapping[str, Type]]) -> Mapping[str, Type]:
"""
"Meet" the fields of a list of attrs classes, i.e. for each field, its new type will be the lower bound.
"""
field_to_types = defaultdict(list)
for fields in types:
for name, typ in fields.items():
field_to_types[name].append(typ)
return {
name: (
get_proper_type(reduce(meet_types, f_types))
if len(f_types) == len(types)
else UninhabitedType()
)
for name, f_types in field_to_types.items()
}
def evolve_function_sig_callback(ctx: mypy.plugin.FunctionSigContext) -> CallableType:
"""
Generate a signature for the 'attr.evolve' function that's specific to the call site
and dependent on the type of the first argument.
"""
if len(ctx.args) != 2:
# Ideally the name and context should be callee's, but we don't have it in FunctionSigContext.
ctx.api.fail(f'"{ctx.default_signature.name}" has unexpected type annotation', ctx.context)
return ctx.default_signature
if len(ctx.args[0]) != 1:
return ctx.default_signature # leave it to the type checker to complain
inst_arg = ctx.args[0][0]
inst_type = get_proper_type(ctx.api.get_expression_type(inst_arg))
inst_type_str = format_type_bare(inst_type, ctx.api.options)
attr_types = _get_expanded_attr_types(ctx, inst_type, inst_type, inst_type)
if attr_types is None:
return ctx.default_signature
fields = _meet_fields(attr_types)
return CallableType(
arg_names=["inst", *fields.keys()],
arg_kinds=[ARG_POS] + [ARG_NAMED_OPT] * len(fields),
arg_types=[inst_type, *fields.values()],
ret_type=inst_type,
fallback=ctx.default_signature.fallback,
name=f"{ctx.default_signature.name} of {inst_type_str}",
)
def fields_function_sig_callback(ctx: mypy.plugin.FunctionSigContext) -> CallableType:
"""Provide the signature for `attrs.fields`."""
if len(ctx.args) != 1 or len(ctx.args[0]) != 1:
return ctx.default_signature
proper_type = get_proper_type(ctx.api.get_expression_type(ctx.args[0][0]))
# fields(Any) -> Any, fields(type[Any]) -> Any
if (
isinstance(proper_type, AnyType)
or isinstance(proper_type, TypeType)
and isinstance(proper_type.item, AnyType)
):
return ctx.default_signature
cls = None
arg_types = ctx.default_signature.arg_types
if isinstance(proper_type, TypeVarType):
inner = get_proper_type(proper_type.upper_bound)
if isinstance(inner, Instance):
# We need to work arg_types to compensate for the attrs stubs.
arg_types = [proper_type]
cls = inner.type
elif isinstance(proper_type, CallableType):
cls = proper_type.type_object()
if cls is not None and MAGIC_ATTR_NAME in cls.names:
# This is a proper attrs class.
ret_type = cls.names[MAGIC_ATTR_NAME].type
assert ret_type is not None
return ctx.default_signature.copy_modified(arg_types=arg_types, ret_type=ret_type)
return ctx.default_signature
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/plugins/attrs.py
|
Python
|
NOASSERTION
| 46,535 |
from __future__ import annotations
from typing import NamedTuple
from mypy.argmap import map_actuals_to_formals
from mypy.fixup import TypeFixer
from mypy.nodes import (
ARG_POS,
MDEF,
SYMBOL_FUNCBASE_TYPES,
Argument,
Block,
CallExpr,
ClassDef,
Decorator,
Expression,
FuncDef,
JsonDict,
NameExpr,
Node,
OverloadedFuncDef,
PassStmt,
RefExpr,
SymbolTableNode,
TypeInfo,
Var,
)
from mypy.plugin import CheckerPluginInterface, ClassDefContext, SemanticAnalyzerPluginInterface
from mypy.semanal_shared import (
ALLOW_INCOMPATIBLE_OVERRIDE,
parse_bool,
require_bool_literal_argument,
set_callable_name,
)
from mypy.typeops import try_getting_str_literals as try_getting_str_literals
from mypy.types import (
AnyType,
CallableType,
Instance,
LiteralType,
NoneType,
Overloaded,
Type,
TypeOfAny,
TypeType,
TypeVarType,
deserialize_type,
get_proper_type,
)
from mypy.types_utils import is_overlapping_none
from mypy.typevars import fill_typevars
from mypy.util import get_unique_redefinition_name
def _get_decorator_bool_argument(ctx: ClassDefContext, name: str, default: bool) -> bool:
"""Return the bool argument for the decorator.
This handles both @decorator(...) and @decorator.
"""
if isinstance(ctx.reason, CallExpr):
return _get_bool_argument(ctx, ctx.reason, name, default)
else:
return default
def _get_bool_argument(ctx: ClassDefContext, expr: CallExpr, name: str, default: bool) -> bool:
"""Return the boolean value for an argument to a call or the
default if it's not found.
"""
attr_value = _get_argument(expr, name)
if attr_value:
return require_bool_literal_argument(ctx.api, attr_value, name, default)
return default
def _get_argument(call: CallExpr, name: str) -> Expression | None:
"""Return the expression for the specific argument."""
# To do this we use the CallableType of the callee to find the FormalArgument,
# then walk the actual CallExpr looking for the appropriate argument.
#
# Note: I'm not hard-coding the index so that in the future we can support other
# attrib and class makers.
callee_type = _get_callee_type(call)
if not callee_type:
return None
argument = callee_type.argument_by_name(name)
if not argument:
return None
assert argument.name
for i, (attr_name, attr_value) in enumerate(zip(call.arg_names, call.args)):
if argument.pos is not None and not attr_name and i == argument.pos:
return attr_value
if attr_name == argument.name:
return attr_value
return None
def find_shallow_matching_overload_item(overload: Overloaded, call: CallExpr) -> CallableType:
"""Perform limited lookup of a matching overload item.
Full overload resolution is only supported during type checking, but plugins
sometimes need to resolve overloads. This can be used in some such use cases.
Resolve overloads based on these things only:
* Match using argument kinds and names
* If formal argument has type None, only accept the "None" expression in the callee
* If formal argument has type Literal[True] or Literal[False], only accept the
relevant bool literal
Return the first matching overload item, or the last one if nothing matches.
"""
for item in overload.items[:-1]:
ok = True
mapped = map_actuals_to_formals(
call.arg_kinds,
call.arg_names,
item.arg_kinds,
item.arg_names,
lambda i: AnyType(TypeOfAny.special_form),
)
# Look for extra actuals
matched_actuals = set()
for actuals in mapped:
matched_actuals.update(actuals)
if any(i not in matched_actuals for i in range(len(call.args))):
ok = False
for arg_type, kind, actuals in zip(item.arg_types, item.arg_kinds, mapped):
if kind.is_required() and not actuals:
# Missing required argument
ok = False
break
elif actuals:
args = [call.args[i] for i in actuals]
arg_type = get_proper_type(arg_type)
arg_none = any(isinstance(arg, NameExpr) and arg.name == "None" for arg in args)
if isinstance(arg_type, NoneType):
if not arg_none:
ok = False
break
elif (
arg_none
and not is_overlapping_none(arg_type)
and not (
isinstance(arg_type, Instance)
and arg_type.type.fullname == "builtins.object"
)
and not isinstance(arg_type, AnyType)
):
ok = False
break
elif isinstance(arg_type, LiteralType) and isinstance(arg_type.value, bool):
if not any(parse_bool(arg) == arg_type.value for arg in args):
ok = False
break
if ok:
return item
return overload.items[-1]
def _get_callee_type(call: CallExpr) -> CallableType | None:
"""Return the type of the callee, regardless of its syntatic form."""
callee_node: Node | None = call.callee
if isinstance(callee_node, RefExpr):
callee_node = callee_node.node
# Some decorators may be using typing.dataclass_transform, which is itself a decorator, so we
# need to unwrap them to get at the true callee
if isinstance(callee_node, Decorator):
callee_node = callee_node.func
if isinstance(callee_node, (Var, SYMBOL_FUNCBASE_TYPES)) and callee_node.type:
callee_node_type = get_proper_type(callee_node.type)
if isinstance(callee_node_type, Overloaded):
return find_shallow_matching_overload_item(callee_node_type, call)
elif isinstance(callee_node_type, CallableType):
return callee_node_type
return None
def add_method(
ctx: ClassDefContext,
name: str,
args: list[Argument],
return_type: Type,
self_type: Type | None = None,
tvar_def: TypeVarType | None = None,
is_classmethod: bool = False,
is_staticmethod: bool = False,
) -> None:
"""
Adds a new method to a class.
Deprecated, use add_method_to_class() instead.
"""
add_method_to_class(
ctx.api,
ctx.cls,
name=name,
args=args,
return_type=return_type,
self_type=self_type,
tvar_def=tvar_def,
is_classmethod=is_classmethod,
is_staticmethod=is_staticmethod,
)
class MethodSpec(NamedTuple):
"""Represents a method signature to be added, except for `name`."""
args: list[Argument]
return_type: Type
self_type: Type | None = None
tvar_defs: list[TypeVarType] | None = None
def add_method_to_class(
api: SemanticAnalyzerPluginInterface | CheckerPluginInterface,
cls: ClassDef,
name: str,
# MethodSpec items kept for backward compatibility:
args: list[Argument],
return_type: Type,
self_type: Type | None = None,
tvar_def: list[TypeVarType] | TypeVarType | None = None,
is_classmethod: bool = False,
is_staticmethod: bool = False,
) -> FuncDef | Decorator:
"""Adds a new method to a class definition."""
_prepare_class_namespace(cls, name)
if tvar_def is not None and not isinstance(tvar_def, list):
tvar_def = [tvar_def]
func, sym = _add_method_by_spec(
api,
cls.info,
name,
MethodSpec(args=args, return_type=return_type, self_type=self_type, tvar_defs=tvar_def),
is_classmethod=is_classmethod,
is_staticmethod=is_staticmethod,
)
cls.info.names[name] = sym
cls.info.defn.defs.body.append(func)
return func
def add_overloaded_method_to_class(
api: SemanticAnalyzerPluginInterface | CheckerPluginInterface,
cls: ClassDef,
name: str,
items: list[MethodSpec],
is_classmethod: bool = False,
is_staticmethod: bool = False,
) -> OverloadedFuncDef:
"""Adds a new overloaded method to a class definition."""
assert len(items) >= 2, "Overloads must contain at least two cases"
# Save old definition, if it exists.
_prepare_class_namespace(cls, name)
# Create function bodies for each passed method spec.
funcs: list[Decorator | FuncDef] = []
for item in items:
func, _sym = _add_method_by_spec(
api,
cls.info,
name=name,
spec=item,
is_classmethod=is_classmethod,
is_staticmethod=is_staticmethod,
)
if isinstance(func, FuncDef):
var = Var(func.name, func.type)
var.set_line(func.line)
func.is_decorated = True
func.deco_line = func.line
deco = Decorator(func, [], var)
else:
deco = func
deco.is_overload = True
funcs.append(deco)
# Create the final OverloadedFuncDef node:
overload_def = OverloadedFuncDef(funcs)
overload_def.info = cls.info
overload_def.is_class = is_classmethod
overload_def.is_static = is_staticmethod
sym = SymbolTableNode(MDEF, overload_def)
sym.plugin_generated = True
cls.info.names[name] = sym
cls.info.defn.defs.body.append(overload_def)
return overload_def
def _prepare_class_namespace(cls: ClassDef, name: str) -> None:
info = cls.info
assert info
# First remove any previously generated methods with the same name
# to avoid clashes and problems in the semantic analyzer.
if name in info.names:
sym = info.names[name]
if sym.plugin_generated and isinstance(sym.node, FuncDef):
cls.defs.body.remove(sym.node)
# NOTE: we would like the plugin generated node to dominate, but we still
# need to keep any existing definitions so they get semantically analyzed.
if name in info.names:
# Get a nice unique name instead.
r_name = get_unique_redefinition_name(name, info.names)
info.names[r_name] = info.names[name]
def _add_method_by_spec(
api: SemanticAnalyzerPluginInterface | CheckerPluginInterface,
info: TypeInfo,
name: str,
spec: MethodSpec,
*,
is_classmethod: bool,
is_staticmethod: bool,
) -> tuple[FuncDef | Decorator, SymbolTableNode]:
args, return_type, self_type, tvar_defs = spec
assert not (
is_classmethod is True and is_staticmethod is True
), "Can't add a new method that's both staticmethod and classmethod."
if isinstance(api, SemanticAnalyzerPluginInterface):
function_type = api.named_type("builtins.function")
else:
function_type = api.named_generic_type("builtins.function", [])
if is_classmethod:
self_type = self_type or TypeType(fill_typevars(info))
first = [Argument(Var("_cls"), self_type, None, ARG_POS, True)]
elif is_staticmethod:
first = []
else:
self_type = self_type or fill_typevars(info)
first = [Argument(Var("self"), self_type, None, ARG_POS)]
args = first + args
arg_types, arg_names, arg_kinds = [], [], []
for arg in args:
assert arg.type_annotation, "All arguments must be fully typed."
arg_types.append(arg.type_annotation)
arg_names.append(arg.variable.name)
arg_kinds.append(arg.kind)
signature = CallableType(arg_types, arg_kinds, arg_names, return_type, function_type)
if tvar_defs:
signature.variables = tvar_defs
func = FuncDef(name, args, Block([PassStmt()]))
func.info = info
func.type = set_callable_name(signature, func)
func.is_class = is_classmethod
func.is_static = is_staticmethod
func._fullname = info.fullname + "." + name
func.line = info.line
# Add decorator for is_staticmethod. It's unnecessary for is_classmethod.
if is_staticmethod:
func.is_decorated = True
v = Var(name, func.type)
v.info = info
v._fullname = func._fullname
v.is_staticmethod = True
dec = Decorator(func, [], v)
dec.line = info.line
sym = SymbolTableNode(MDEF, dec)
sym.plugin_generated = True
return dec, sym
sym = SymbolTableNode(MDEF, func)
sym.plugin_generated = True
return func, sym
def add_attribute_to_class(
api: SemanticAnalyzerPluginInterface,
cls: ClassDef,
name: str,
typ: Type,
final: bool = False,
no_serialize: bool = False,
override_allow_incompatible: bool = False,
fullname: str | None = None,
is_classvar: bool = False,
overwrite_existing: bool = False,
) -> Var:
"""
Adds a new attribute to a class definition.
This currently only generates the symbol table entry and no corresponding AssignmentStatement
"""
info = cls.info
# NOTE: we would like the plugin generated node to dominate, but we still
# need to keep any existing definitions so they get semantically analyzed.
if name in info.names and not overwrite_existing:
# Get a nice unique name instead.
r_name = get_unique_redefinition_name(name, info.names)
info.names[r_name] = info.names[name]
node = Var(name, typ)
node.info = info
node.is_final = final
node.is_classvar = is_classvar
if name in ALLOW_INCOMPATIBLE_OVERRIDE:
node.allow_incompatible_override = True
else:
node.allow_incompatible_override = override_allow_incompatible
if fullname:
node._fullname = fullname
else:
node._fullname = info.fullname + "." + name
info.names[name] = SymbolTableNode(
MDEF, node, plugin_generated=True, no_serialize=no_serialize
)
return node
def deserialize_and_fixup_type(data: str | JsonDict, api: SemanticAnalyzerPluginInterface) -> Type:
typ = deserialize_type(data)
typ.accept(TypeFixer(api.modules, allow_missing=False))
return typ
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/plugins/common.py
|
Python
|
NOASSERTION
| 14,148 |
"""Plugin to provide accurate types for some parts of the ctypes module."""
from __future__ import annotations
# Fully qualified instead of "from mypy.plugin import ..." to avoid circular import problems.
import mypy.plugin
from mypy import nodes
from mypy.maptype import map_instance_to_supertype
from mypy.messages import format_type
from mypy.subtypes import is_subtype
from mypy.typeops import make_simplified_union
from mypy.types import (
AnyType,
CallableType,
Instance,
NoneType,
ProperType,
Type,
TypeOfAny,
UnionType,
flatten_nested_unions,
get_proper_type,
)
def _find_simplecdata_base_arg(
tp: Instance, api: mypy.plugin.CheckerPluginInterface
) -> ProperType | None:
"""Try to find a parametrized _SimpleCData in tp's bases and return its single type argument.
None is returned if _SimpleCData appears nowhere in tp's (direct or indirect) bases.
"""
if tp.type.has_base("_ctypes._SimpleCData"):
simplecdata_base = map_instance_to_supertype(
tp,
api.named_generic_type("_ctypes._SimpleCData", [AnyType(TypeOfAny.special_form)]).type,
)
assert len(simplecdata_base.args) == 1, "_SimpleCData takes exactly one type argument"
return get_proper_type(simplecdata_base.args[0])
return None
def _autoconvertible_to_cdata(tp: Type, api: mypy.plugin.CheckerPluginInterface) -> Type:
"""Get a type that is compatible with all types that can be implicitly converted to the given
CData type.
Examples:
* c_int -> Union[c_int, int]
* c_char_p -> Union[c_char_p, bytes, int, NoneType]
* MyStructure -> MyStructure
"""
allowed_types = []
# If tp is a union, we allow all types that are convertible to at least one of the union
# items. This is not quite correct - strictly speaking, only types convertible to *all* of the
# union items should be allowed. This may be worth changing in the future, but the more
# correct algorithm could be too strict to be useful.
for t in flatten_nested_unions([tp]):
t = get_proper_type(t)
# Every type can be converted from itself (obviously).
allowed_types.append(t)
if isinstance(t, Instance):
unboxed = _find_simplecdata_base_arg(t, api)
if unboxed is not None:
# If _SimpleCData appears in tp's (direct or indirect) bases, its type argument
# specifies the type's "unboxed" version, which can always be converted back to
# the original "boxed" type.
allowed_types.append(unboxed)
if t.type.has_base("ctypes._PointerLike"):
# Pointer-like _SimpleCData subclasses can also be converted from
# an int or None.
allowed_types.append(api.named_generic_type("builtins.int", []))
allowed_types.append(NoneType())
return make_simplified_union(allowed_types)
def _autounboxed_cdata(tp: Type) -> ProperType:
"""Get the auto-unboxed version of a CData type, if applicable.
For *direct* _SimpleCData subclasses, the only type argument of _SimpleCData in the bases list
is returned.
For all other CData types, including indirect _SimpleCData subclasses, tp is returned as-is.
"""
tp = get_proper_type(tp)
if isinstance(tp, UnionType):
return make_simplified_union([_autounboxed_cdata(t) for t in tp.items])
elif isinstance(tp, Instance):
for base in tp.type.bases:
if base.type.fullname == "_ctypes._SimpleCData":
# If tp has _SimpleCData as a direct base class,
# the auto-unboxed type is the single type argument of the _SimpleCData type.
assert len(base.args) == 1
return get_proper_type(base.args[0])
# If tp is not a concrete type, or if there is no _SimpleCData in the bases,
# the type is not auto-unboxed.
return tp
def _get_array_element_type(tp: Type) -> ProperType | None:
"""Get the element type of the Array type tp, or None if not specified."""
tp = get_proper_type(tp)
if isinstance(tp, Instance):
assert tp.type.fullname == "_ctypes.Array"
if len(tp.args) == 1:
return get_proper_type(tp.args[0])
return None
def array_constructor_callback(ctx: mypy.plugin.FunctionContext) -> Type:
"""Callback to provide an accurate signature for the ctypes.Array constructor."""
# Extract the element type from the constructor's return type, i. e. the type of the array
# being constructed.
et = _get_array_element_type(ctx.default_return_type)
if et is not None:
allowed = _autoconvertible_to_cdata(et, ctx.api)
assert (
len(ctx.arg_types) == 1
), "The stub of the ctypes.Array constructor should have a single vararg parameter"
for arg_num, (arg_kind, arg_type) in enumerate(zip(ctx.arg_kinds[0], ctx.arg_types[0]), 1):
if arg_kind == nodes.ARG_POS and not is_subtype(arg_type, allowed):
ctx.api.msg.fail(
"Array constructor argument {} of type {}"
" is not convertible to the array element type {}".format(
arg_num,
format_type(arg_type, ctx.api.options),
format_type(et, ctx.api.options),
),
ctx.context,
)
elif arg_kind == nodes.ARG_STAR:
ty = ctx.api.named_generic_type("typing.Iterable", [allowed])
if not is_subtype(arg_type, ty):
it = ctx.api.named_generic_type("typing.Iterable", [et])
ctx.api.msg.fail(
"Array constructor argument {} of type {}"
" is not convertible to the array element type {}".format(
arg_num,
format_type(arg_type, ctx.api.options),
format_type(it, ctx.api.options),
),
ctx.context,
)
return ctx.default_return_type
def array_getitem_callback(ctx: mypy.plugin.MethodContext) -> Type:
"""Callback to provide an accurate return type for ctypes.Array.__getitem__."""
et = _get_array_element_type(ctx.type)
if et is not None:
unboxed = _autounboxed_cdata(et)
assert (
len(ctx.arg_types) == 1
), "The stub of ctypes.Array.__getitem__ should have exactly one parameter"
assert (
len(ctx.arg_types[0]) == 1
), "ctypes.Array.__getitem__'s parameter should not be variadic"
index_type = get_proper_type(ctx.arg_types[0][0])
if isinstance(index_type, Instance):
if index_type.type.has_base("builtins.int"):
return unboxed
elif index_type.type.has_base("builtins.slice"):
return ctx.api.named_generic_type("builtins.list", [unboxed])
return ctx.default_return_type
def array_setitem_callback(ctx: mypy.plugin.MethodSigContext) -> CallableType:
"""Callback to provide an accurate signature for ctypes.Array.__setitem__."""
et = _get_array_element_type(ctx.type)
if et is not None:
allowed = _autoconvertible_to_cdata(et, ctx.api)
assert len(ctx.default_signature.arg_types) == 2
index_type = get_proper_type(ctx.default_signature.arg_types[0])
if isinstance(index_type, Instance):
arg_type = None
if index_type.type.has_base("builtins.int"):
arg_type = allowed
elif index_type.type.has_base("builtins.slice"):
arg_type = ctx.api.named_generic_type("builtins.list", [allowed])
if arg_type is not None:
# Note: arg_type can only be None if index_type is invalid, in which case we use
# the default signature and let mypy report an error about it.
return ctx.default_signature.copy_modified(
arg_types=ctx.default_signature.arg_types[:1] + [arg_type]
)
return ctx.default_signature
def array_iter_callback(ctx: mypy.plugin.MethodContext) -> Type:
"""Callback to provide an accurate return type for ctypes.Array.__iter__."""
et = _get_array_element_type(ctx.type)
if et is not None:
unboxed = _autounboxed_cdata(et)
return ctx.api.named_generic_type("typing.Iterator", [unboxed])
return ctx.default_return_type
def array_value_callback(ctx: mypy.plugin.AttributeContext) -> Type:
"""Callback to provide an accurate type for ctypes.Array.value."""
et = _get_array_element_type(ctx.type)
if et is not None:
types: list[Type] = []
for tp in flatten_nested_unions([et]):
tp = get_proper_type(tp)
if isinstance(tp, AnyType):
types.append(AnyType(TypeOfAny.from_another_any, source_any=tp))
elif isinstance(tp, Instance) and tp.type.fullname == "ctypes.c_char":
types.append(ctx.api.named_generic_type("builtins.bytes", []))
elif isinstance(tp, Instance) and tp.type.fullname == "ctypes.c_wchar":
types.append(ctx.api.named_generic_type("builtins.str", []))
else:
ctx.api.msg.fail(
'Array attribute "value" is only available'
' with element type "c_char" or "c_wchar", not {}'.format(
format_type(et, ctx.api.options)
),
ctx.context,
)
return make_simplified_union(types)
return ctx.default_attr_type
def array_raw_callback(ctx: mypy.plugin.AttributeContext) -> Type:
"""Callback to provide an accurate type for ctypes.Array.raw."""
et = _get_array_element_type(ctx.type)
if et is not None:
types: list[Type] = []
for tp in flatten_nested_unions([et]):
tp = get_proper_type(tp)
if (
isinstance(tp, AnyType)
or isinstance(tp, Instance)
and tp.type.fullname == "ctypes.c_char"
):
types.append(ctx.api.named_generic_type("builtins.bytes", []))
else:
ctx.api.msg.fail(
'Array attribute "raw" is only available'
' with element type "c_char", not {}'.format(format_type(et, ctx.api.options)),
ctx.context,
)
return make_simplified_union(types)
return ctx.default_attr_type
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/plugins/ctypes.py
|
Python
|
NOASSERTION
| 10,675 |
"""Plugin that provides support for dataclasses."""
from __future__ import annotations
from typing import TYPE_CHECKING, Final, Iterator, Literal
from mypy import errorcodes, message_registry
from mypy.expandtype import expand_type, expand_type_by_instance
from mypy.meet import meet_types
from mypy.messages import format_type_bare
from mypy.nodes import (
ARG_NAMED,
ARG_NAMED_OPT,
ARG_OPT,
ARG_POS,
ARG_STAR,
ARG_STAR2,
MDEF,
Argument,
AssignmentStmt,
Block,
CallExpr,
ClassDef,
Context,
DataclassTransformSpec,
Decorator,
EllipsisExpr,
Expression,
FuncDef,
FuncItem,
IfStmt,
JsonDict,
NameExpr,
Node,
PlaceholderNode,
RefExpr,
Statement,
SymbolTableNode,
TempNode,
TypeAlias,
TypeInfo,
TypeVarExpr,
Var,
)
from mypy.plugin import ClassDefContext, FunctionSigContext, SemanticAnalyzerPluginInterface
from mypy.plugins.common import (
_get_callee_type,
_get_decorator_bool_argument,
add_attribute_to_class,
add_method_to_class,
deserialize_and_fixup_type,
)
from mypy.semanal_shared import find_dataclass_transform_spec, require_bool_literal_argument
from mypy.server.trigger import make_wildcard_trigger
from mypy.state import state
from mypy.typeops import map_type_from_supertype, try_getting_literals_from_type
from mypy.types import (
AnyType,
CallableType,
FunctionLike,
Instance,
LiteralType,
NoneType,
ProperType,
TupleType,
Type,
TypeOfAny,
TypeVarId,
TypeVarType,
UninhabitedType,
UnionType,
get_proper_type,
)
from mypy.typevars import fill_typevars
if TYPE_CHECKING:
from mypy.checker import TypeChecker
# The set of decorators that generate dataclasses.
dataclass_makers: Final = {"dataclass", "dataclasses.dataclass"}
SELF_TVAR_NAME: Final = "_DT"
_TRANSFORM_SPEC_FOR_DATACLASSES: Final = DataclassTransformSpec(
eq_default=True,
order_default=False,
kw_only_default=False,
frozen_default=False,
field_specifiers=("dataclasses.Field", "dataclasses.field"),
)
_INTERNAL_REPLACE_SYM_NAME: Final = "__mypy-replace"
_INTERNAL_POST_INIT_SYM_NAME: Final = "__mypy-post_init"
class DataclassAttribute:
def __init__(
self,
name: str,
alias: str | None,
is_in_init: bool,
is_init_var: bool,
has_default: bool,
line: int,
column: int,
type: Type | None,
info: TypeInfo,
kw_only: bool,
is_neither_frozen_nor_nonfrozen: bool,
api: SemanticAnalyzerPluginInterface,
) -> None:
self.name = name
self.alias = alias
self.is_in_init = is_in_init
self.is_init_var = is_init_var
self.has_default = has_default
self.line = line
self.column = column
self.type = type # Type as __init__ argument
self.info = info
self.kw_only = kw_only
self.is_neither_frozen_nor_nonfrozen = is_neither_frozen_nor_nonfrozen
self._api = api
def to_argument(
self, current_info: TypeInfo, *, of: Literal["__init__", "replace", "__post_init__"]
) -> Argument:
if of == "__init__":
arg_kind = ARG_POS
if self.kw_only and self.has_default:
arg_kind = ARG_NAMED_OPT
elif self.kw_only and not self.has_default:
arg_kind = ARG_NAMED
elif not self.kw_only and self.has_default:
arg_kind = ARG_OPT
elif of == "replace":
arg_kind = ARG_NAMED if self.is_init_var and not self.has_default else ARG_NAMED_OPT
elif of == "__post_init__":
# We always use `ARG_POS` without a default value, because it is practical.
# Consider this case:
#
# @dataclass
# class My:
# y: dataclasses.InitVar[str] = 'a'
# def __post_init__(self, y: str) -> None: ...
#
# We would be *required* to specify `y: str = ...` if default is added here.
# But, most people won't care about adding default values to `__post_init__`,
# because it is not designed to be called directly, and duplicating default values
# for the sake of type-checking is unpleasant.
arg_kind = ARG_POS
return Argument(
variable=self.to_var(current_info),
type_annotation=self.expand_type(current_info),
initializer=EllipsisExpr() if self.has_default else None, # Only used by stubgen
kind=arg_kind,
)
def expand_type(self, current_info: TypeInfo) -> Type | None:
if self.type is not None and self.info.self_type is not None:
# In general, it is not safe to call `expand_type()` during semantic analysis,
# however this plugin is called very late, so all types should be fully ready.
# Also, it is tricky to avoid eager expansion of Self types here (e.g. because
# we serialize attributes).
with state.strict_optional_set(self._api.options.strict_optional):
return expand_type(
self.type, {self.info.self_type.id: fill_typevars(current_info)}
)
return self.type
def to_var(self, current_info: TypeInfo) -> Var:
return Var(self.alias or self.name, self.expand_type(current_info))
def serialize(self) -> JsonDict:
assert self.type
return {
"name": self.name,
"alias": self.alias,
"is_in_init": self.is_in_init,
"is_init_var": self.is_init_var,
"has_default": self.has_default,
"line": self.line,
"column": self.column,
"type": self.type.serialize(),
"kw_only": self.kw_only,
"is_neither_frozen_nor_nonfrozen": self.is_neither_frozen_nor_nonfrozen,
}
@classmethod
def deserialize(
cls, info: TypeInfo, data: JsonDict, api: SemanticAnalyzerPluginInterface
) -> DataclassAttribute:
data = data.copy()
typ = deserialize_and_fixup_type(data.pop("type"), api)
return cls(type=typ, info=info, **data, api=api)
def expand_typevar_from_subtype(self, sub_type: TypeInfo) -> None:
"""Expands type vars in the context of a subtype when an attribute is inherited
from a generic super type."""
if self.type is not None:
with state.strict_optional_set(self._api.options.strict_optional):
self.type = map_type_from_supertype(self.type, sub_type, self.info)
class DataclassTransformer:
"""Implement the behavior of @dataclass.
Note that this may be executed multiple times on the same class, so
everything here must be idempotent.
This runs after the main semantic analysis pass, so you can assume that
there are no placeholders.
"""
def __init__(
self,
cls: ClassDef,
# Statement must also be accepted since class definition itself may be passed as the reason
# for subclass/metaclass-based uses of `typing.dataclass_transform`
reason: Expression | Statement,
spec: DataclassTransformSpec,
api: SemanticAnalyzerPluginInterface,
) -> None:
self._cls = cls
self._reason = reason
self._spec = spec
self._api = api
def transform(self) -> bool:
"""Apply all the necessary transformations to the underlying
dataclass so as to ensure it is fully type checked according
to the rules in PEP 557.
"""
info = self._cls.info
attributes = self.collect_attributes()
if attributes is None:
# Some definitions are not ready. We need another pass.
return False
for attr in attributes:
if attr.type is None:
return False
decorator_arguments = {
"init": self._get_bool_arg("init", True),
"eq": self._get_bool_arg("eq", self._spec.eq_default),
"order": self._get_bool_arg("order", self._spec.order_default),
"frozen": self._get_bool_arg("frozen", self._spec.frozen_default),
"slots": self._get_bool_arg("slots", False),
"match_args": self._get_bool_arg("match_args", True),
}
py_version = self._api.options.python_version
# If there are no attributes, it may be that the semantic analyzer has not
# processed them yet. In order to work around this, we can simply skip generating
# __init__ if there are no attributes, because if the user truly did not define any,
# then the object default __init__ with an empty signature will be present anyway.
if (
decorator_arguments["init"]
and ("__init__" not in info.names or info.names["__init__"].plugin_generated)
and attributes
):
args = [
attr.to_argument(info, of="__init__")
for attr in attributes
if attr.is_in_init and not self._is_kw_only_type(attr.type)
]
if info.fallback_to_any:
# Make positional args optional since we don't know their order.
# This will at least allow us to typecheck them if they are called
# as kwargs
for arg in args:
if arg.kind == ARG_POS:
arg.kind = ARG_OPT
existing_args_names = {arg.variable.name for arg in args}
gen_args_name = "generated_args"
while gen_args_name in existing_args_names:
gen_args_name += "_"
gen_kwargs_name = "generated_kwargs"
while gen_kwargs_name in existing_args_names:
gen_kwargs_name += "_"
args = [
Argument(Var(gen_args_name), AnyType(TypeOfAny.explicit), None, ARG_STAR),
*args,
Argument(Var(gen_kwargs_name), AnyType(TypeOfAny.explicit), None, ARG_STAR2),
]
add_method_to_class(
self._api, self._cls, "__init__", args=args, return_type=NoneType()
)
if (
decorator_arguments["eq"]
and info.get("__eq__") is None
or decorator_arguments["order"]
):
# Type variable for self types in generated methods.
obj_type = self._api.named_type("builtins.object")
self_tvar_expr = TypeVarExpr(
SELF_TVAR_NAME,
info.fullname + "." + SELF_TVAR_NAME,
[],
obj_type,
AnyType(TypeOfAny.from_omitted_generics),
)
info.names[SELF_TVAR_NAME] = SymbolTableNode(MDEF, self_tvar_expr)
# Add <, >, <=, >=, but only if the class has an eq method.
if decorator_arguments["order"]:
if not decorator_arguments["eq"]:
self._api.fail('"eq" must be True if "order" is True', self._reason)
for method_name in ["__lt__", "__gt__", "__le__", "__ge__"]:
# Like for __eq__ and __ne__, we want "other" to match
# the self type.
obj_type = self._api.named_type("builtins.object")
order_tvar_def = TypeVarType(
SELF_TVAR_NAME,
f"{info.fullname}.{SELF_TVAR_NAME}",
id=TypeVarId(-1, namespace=f"{info.fullname}.{method_name}"),
values=[],
upper_bound=obj_type,
default=AnyType(TypeOfAny.from_omitted_generics),
)
order_return_type = self._api.named_type("builtins.bool")
order_args = [
Argument(Var("other", order_tvar_def), order_tvar_def, None, ARG_POS)
]
existing_method = info.get(method_name)
if existing_method is not None and not existing_method.plugin_generated:
assert existing_method.node
self._api.fail(
f'You may not have a custom "{method_name}" method when "order" is True',
existing_method.node,
)
add_method_to_class(
self._api,
self._cls,
method_name,
args=order_args,
return_type=order_return_type,
self_type=order_tvar_def,
tvar_def=order_tvar_def,
)
parent_decorator_arguments = []
for parent in info.mro[1:-1]:
parent_args = parent.metadata.get("dataclass")
# Ignore parent classes that directly specify a dataclass transform-decorated metaclass
# when searching for usage of the frozen parameter. PEP 681 states that a class that
# directly specifies such a metaclass must be treated as neither frozen nor non-frozen.
if parent_args and not _has_direct_dataclass_transform_metaclass(parent):
parent_decorator_arguments.append(parent_args)
if decorator_arguments["frozen"]:
if any(not parent["frozen"] for parent in parent_decorator_arguments):
self._api.fail("Cannot inherit frozen dataclass from a non-frozen one", info)
self._propertize_callables(attributes, settable=False)
self._freeze(attributes)
else:
if any(parent["frozen"] for parent in parent_decorator_arguments):
self._api.fail("Cannot inherit non-frozen dataclass from a frozen one", info)
self._propertize_callables(attributes)
if decorator_arguments["slots"]:
self.add_slots(info, attributes, correct_version=py_version >= (3, 10))
self.reset_init_only_vars(info, attributes)
if (
decorator_arguments["match_args"]
and (
"__match_args__" not in info.names or info.names["__match_args__"].plugin_generated
)
and py_version >= (3, 10)
):
str_type = self._api.named_type("builtins.str")
literals: list[Type] = [
LiteralType(attr.name, str_type) for attr in attributes if attr.is_in_init
]
match_args_type = TupleType(literals, self._api.named_type("builtins.tuple"))
add_attribute_to_class(self._api, self._cls, "__match_args__", match_args_type)
self._add_dataclass_fields_magic_attribute()
self._add_internal_replace_method(attributes)
if self._api.options.python_version >= (3, 13):
self._add_dunder_replace(attributes)
if "__post_init__" in info.names:
self._add_internal_post_init_method(attributes)
info.metadata["dataclass"] = {
"attributes": [attr.serialize() for attr in attributes],
"frozen": decorator_arguments["frozen"],
}
return True
def _add_dunder_replace(self, attributes: list[DataclassAttribute]) -> None:
"""Add a `__replace__` method to the class, which is used to replace attributes in the `copy` module."""
args = [attr.to_argument(self._cls.info, of="replace") for attr in attributes]
type_vars = [tv for tv in self._cls.type_vars]
add_method_to_class(
self._api,
self._cls,
"__replace__",
args=args,
return_type=Instance(self._cls.info, type_vars),
)
def _add_internal_replace_method(self, attributes: list[DataclassAttribute]) -> None:
"""
Stashes the signature of 'dataclasses.replace(...)' for this specific dataclass
to be used later whenever 'dataclasses.replace' is called for this dataclass.
"""
add_method_to_class(
self._api,
self._cls,
_INTERNAL_REPLACE_SYM_NAME,
args=[attr.to_argument(self._cls.info, of="replace") for attr in attributes],
return_type=NoneType(),
is_staticmethod=True,
)
def _add_internal_post_init_method(self, attributes: list[DataclassAttribute]) -> None:
add_method_to_class(
self._api,
self._cls,
_INTERNAL_POST_INIT_SYM_NAME,
args=[
attr.to_argument(self._cls.info, of="__post_init__")
for attr in attributes
if attr.is_init_var
],
return_type=NoneType(),
)
def add_slots(
self, info: TypeInfo, attributes: list[DataclassAttribute], *, correct_version: bool
) -> None:
if not correct_version:
# This means that version is lower than `3.10`,
# it is just a non-existent argument for `dataclass` function.
self._api.fail(
'Keyword argument "slots" for "dataclass" '
"is only valid in Python 3.10 and higher",
self._reason,
)
return
generated_slots = {attr.name for attr in attributes}
if (info.slots is not None and info.slots != generated_slots) or info.names.get(
"__slots__"
):
# This means we have a slots conflict.
# Class explicitly specifies a different `__slots__` field.
# And `@dataclass(slots=True)` is used.
# In runtime this raises a type error.
self._api.fail(
'"{}" both defines "__slots__" and is used with "slots=True"'.format(
self._cls.name
),
self._cls,
)
return
if any(p.slots is None for p in info.mro[1:-1]):
# At least one type in mro (excluding `self` and `object`)
# does not have concrete `__slots__` defined. Ignoring.
return
info.slots = generated_slots
# Now, insert `.__slots__` attribute to class namespace:
slots_type = TupleType(
[self._api.named_type("builtins.str") for _ in generated_slots],
self._api.named_type("builtins.tuple"),
)
add_attribute_to_class(self._api, self._cls, "__slots__", slots_type)
def reset_init_only_vars(self, info: TypeInfo, attributes: list[DataclassAttribute]) -> None:
"""Remove init-only vars from the class and reset init var declarations."""
for attr in attributes:
if attr.is_init_var:
if attr.name in info.names:
del info.names[attr.name]
else:
# Nodes of superclass InitVars not used in __init__ cannot be reached.
assert attr.is_init_var
for stmt in info.defn.defs.body:
if isinstance(stmt, AssignmentStmt) and stmt.unanalyzed_type:
lvalue = stmt.lvalues[0]
if isinstance(lvalue, NameExpr) and lvalue.name == attr.name:
# Reset node so that another semantic analysis pass will
# recreate a symbol node for this attribute.
lvalue.node = None
def _get_assignment_statements_from_if_statement(
self, stmt: IfStmt
) -> Iterator[AssignmentStmt]:
for body in stmt.body:
if not body.is_unreachable:
yield from self._get_assignment_statements_from_block(body)
if stmt.else_body is not None and not stmt.else_body.is_unreachable:
yield from self._get_assignment_statements_from_block(stmt.else_body)
def _get_assignment_statements_from_block(self, block: Block) -> Iterator[AssignmentStmt]:
for stmt in block.body:
if isinstance(stmt, AssignmentStmt):
yield stmt
elif isinstance(stmt, IfStmt):
yield from self._get_assignment_statements_from_if_statement(stmt)
def collect_attributes(self) -> list[DataclassAttribute] | None:
"""Collect all attributes declared in the dataclass and its parents.
All assignments of the form
a: SomeType
b: SomeOtherType = ...
are collected.
Return None if some dataclass base class hasn't been processed
yet and thus we'll need to ask for another pass.
"""
cls = self._cls
# First, collect attributes belonging to any class in the MRO, ignoring duplicates.
#
# We iterate through the MRO in reverse because attrs defined in the parent must appear
# earlier in the attributes list than attrs defined in the child. See:
# https://docs.python.org/3/library/dataclasses.html#inheritance
#
# However, we also want attributes defined in the subtype to override ones defined
# in the parent. We can implement this via a dict without disrupting the attr order
# because dicts preserve insertion order in Python 3.7+.
found_attrs: dict[str, DataclassAttribute] = {}
found_dataclass_supertype = False
for info in reversed(cls.info.mro[1:-1]):
if "dataclass_tag" in info.metadata and "dataclass" not in info.metadata:
# We haven't processed the base class yet. Need another pass.
return None
if "dataclass" not in info.metadata:
continue
# Each class depends on the set of attributes in its dataclass ancestors.
self._api.add_plugin_dependency(make_wildcard_trigger(info.fullname))
found_dataclass_supertype = True
for data in info.metadata["dataclass"]["attributes"]:
name: str = data["name"]
attr = DataclassAttribute.deserialize(info, data, self._api)
# TODO: We shouldn't be performing type operations during the main
# semantic analysis pass, since some TypeInfo attributes might
# still be in flux. This should be performed in a later phase.
attr.expand_typevar_from_subtype(cls.info)
found_attrs[name] = attr
sym_node = cls.info.names.get(name)
if sym_node and sym_node.node and not isinstance(sym_node.node, Var):
self._api.fail(
"Dataclass attribute may only be overridden by another attribute",
sym_node.node,
)
# Second, collect attributes belonging to the current class.
current_attr_names: set[str] = set()
kw_only = self._get_bool_arg("kw_only", self._spec.kw_only_default)
for stmt in self._get_assignment_statements_from_block(cls.defs):
# Any assignment that doesn't use the new type declaration
# syntax can be ignored out of hand.
if not stmt.new_syntax:
continue
# a: int, b: str = 1, 'foo' is not supported syntax so we
# don't have to worry about it.
lhs = stmt.lvalues[0]
if not isinstance(lhs, NameExpr):
continue
sym = cls.info.names.get(lhs.name)
if sym is None:
# There was probably a semantic analysis error.
continue
node = sym.node
assert not isinstance(node, PlaceholderNode)
if isinstance(node, TypeAlias):
self._api.fail(
("Type aliases inside dataclass definitions are not supported at runtime"),
node,
)
# Skip processing this node. This doesn't match the runtime behaviour,
# but the only alternative would be to modify the SymbolTable,
# and it's a little hairy to do that in a plugin.
continue
if isinstance(node, Decorator):
# This might be a property / field name clash.
# We will issue an error later.
continue
assert isinstance(node, Var)
# x: ClassVar[int] is ignored by dataclasses.
if node.is_classvar:
continue
# x: InitVar[int] is turned into x: int and is removed from the class.
is_init_var = False
node_type = get_proper_type(node.type)
if (
isinstance(node_type, Instance)
and node_type.type.fullname == "dataclasses.InitVar"
):
is_init_var = True
node.type = node_type.args[0]
if self._is_kw_only_type(node_type):
kw_only = True
has_field_call, field_args = self._collect_field_args(stmt.rvalue)
is_in_init_param = field_args.get("init")
if is_in_init_param is None:
is_in_init = self._get_default_init_value_for_field_specifier(stmt.rvalue)
else:
is_in_init = bool(self._api.parse_bool(is_in_init_param))
has_default = False
# Ensure that something like x: int = field() is rejected
# after an attribute with a default.
if has_field_call:
has_default = (
"default" in field_args
or "default_factory" in field_args
# alias for default_factory defined in PEP 681
or "factory" in field_args
)
# All other assignments are already type checked.
elif not isinstance(stmt.rvalue, TempNode):
has_default = True
if not has_default and self._spec is _TRANSFORM_SPEC_FOR_DATACLASSES:
# Make all non-default dataclass attributes implicit because they are de-facto
# set on self in the generated __init__(), not in the class body. On the other
# hand, we don't know how custom dataclass transforms initialize attributes,
# so we don't treat them as implicit. This is required to support descriptors
# (https://github.com/python/mypy/issues/14868).
sym.implicit = True
is_kw_only = kw_only
# Use the kw_only field arg if it is provided. Otherwise use the
# kw_only value from the decorator parameter.
field_kw_only_param = field_args.get("kw_only")
if field_kw_only_param is not None:
value = self._api.parse_bool(field_kw_only_param)
if value is not None:
is_kw_only = value
else:
self._api.fail('"kw_only" argument must be a boolean literal', stmt.rvalue)
if sym.type is None and node.is_final and node.is_inferred:
# This is a special case, assignment like x: Final = 42 is classified
# annotated above, but mypy strips the `Final` turning it into x = 42.
# We do not support inferred types in dataclasses, so we can try inferring
# type for simple literals, and otherwise require an explicit type
# argument for Final[...].
typ = self._api.analyze_simple_literal_type(stmt.rvalue, is_final=True)
if typ:
node.type = typ
else:
self._api.fail(
"Need type argument for Final[...] with non-literal default in dataclass",
stmt,
)
node.type = AnyType(TypeOfAny.from_error)
alias = None
if "alias" in field_args:
alias = self._api.parse_str_literal(field_args["alias"])
if alias is None:
self._api.fail(
message_registry.DATACLASS_FIELD_ALIAS_MUST_BE_LITERAL,
stmt.rvalue,
code=errorcodes.LITERAL_REQ,
)
current_attr_names.add(lhs.name)
with state.strict_optional_set(self._api.options.strict_optional):
init_type = self._infer_dataclass_attr_init_type(sym, lhs.name, stmt)
found_attrs[lhs.name] = DataclassAttribute(
name=lhs.name,
alias=alias,
is_in_init=is_in_init,
is_init_var=is_init_var,
has_default=has_default,
line=stmt.line,
column=stmt.column,
type=init_type,
info=cls.info,
kw_only=is_kw_only,
is_neither_frozen_nor_nonfrozen=_has_direct_dataclass_transform_metaclass(
cls.info
),
api=self._api,
)
all_attrs = list(found_attrs.values())
if found_dataclass_supertype:
all_attrs.sort(key=lambda a: a.kw_only)
# Third, ensure that arguments without a default don't follow
# arguments that have a default and that the KW_ONLY sentinel
# is only provided once.
found_default = False
found_kw_sentinel = False
for attr in all_attrs:
# If we find any attribute that is_in_init, not kw_only, and that
# doesn't have a default after one that does have one,
# then that's an error.
if found_default and attr.is_in_init and not attr.has_default and not attr.kw_only:
# If the issue comes from merging different classes, report it
# at the class definition point.
context: Context = cls
if attr.name in current_attr_names:
context = Context(line=attr.line, column=attr.column)
self._api.fail(
"Attributes without a default cannot follow attributes with one", context
)
found_default = found_default or (attr.has_default and attr.is_in_init)
if found_kw_sentinel and self._is_kw_only_type(attr.type):
context = cls
if attr.name in current_attr_names:
context = Context(line=attr.line, column=attr.column)
self._api.fail(
"There may not be more than one field with the KW_ONLY type", context
)
found_kw_sentinel = found_kw_sentinel or self._is_kw_only_type(attr.type)
return all_attrs
def _freeze(self, attributes: list[DataclassAttribute]) -> None:
"""Converts all attributes to @property methods in order to
emulate frozen classes.
"""
info = self._cls.info
for attr in attributes:
# Classes that directly specify a dataclass_transform metaclass must be neither frozen
# non non-frozen per PEP681. Though it is surprising, this means that attributes from
# such a class must be writable even if the rest of the class hierarchy is frozen. This
# matches the behavior of Pyright (the reference implementation).
if attr.is_neither_frozen_nor_nonfrozen:
continue
sym_node = info.names.get(attr.name)
if sym_node is not None:
var = sym_node.node
if isinstance(var, Var):
var.is_property = True
else:
var = attr.to_var(info)
var.info = info
var.is_property = True
var._fullname = info.fullname + "." + var.name
info.names[var.name] = SymbolTableNode(MDEF, var)
def _propertize_callables(
self, attributes: list[DataclassAttribute], settable: bool = True
) -> None:
"""Converts all attributes with callable types to @property methods.
This avoids the typechecker getting confused and thinking that
`my_dataclass_instance.callable_attr(foo)` is going to receive a
`self` argument (it is not).
"""
info = self._cls.info
for attr in attributes:
if isinstance(get_proper_type(attr.type), CallableType):
var = attr.to_var(info)
var.info = info
var.is_property = True
var.is_settable_property = settable
var._fullname = info.fullname + "." + var.name
info.names[var.name] = SymbolTableNode(MDEF, var)
def _is_kw_only_type(self, node: Type | None) -> bool:
"""Checks if the type of the node is the KW_ONLY sentinel value."""
if node is None:
return False
node_type = get_proper_type(node)
if not isinstance(node_type, Instance):
return False
return node_type.type.fullname == "dataclasses.KW_ONLY"
def _add_dataclass_fields_magic_attribute(self) -> None:
attr_name = "__dataclass_fields__"
any_type = AnyType(TypeOfAny.explicit)
# For `dataclasses`, use the type `dict[str, Field[Any]]` for accuracy. For dataclass
# transforms, it's inaccurate to use `Field` since a given transform may use a completely
# different type (or none); fall back to `Any` there.
#
# In either case, we're aiming to match the Typeshed stub for `is_dataclass`, which expects
# the instance to have a `__dataclass_fields__` attribute of type `dict[str, Field[Any]]`.
if self._spec is _TRANSFORM_SPEC_FOR_DATACLASSES:
field_type = self._api.named_type_or_none("dataclasses.Field", [any_type]) or any_type
else:
field_type = any_type
attr_type = self._api.named_type(
"builtins.dict", [self._api.named_type("builtins.str"), field_type]
)
var = Var(name=attr_name, type=attr_type)
var.info = self._cls.info
var._fullname = self._cls.info.fullname + "." + attr_name
var.is_classvar = True
self._cls.info.names[attr_name] = SymbolTableNode(
kind=MDEF, node=var, plugin_generated=True
)
def _collect_field_args(self, expr: Expression) -> tuple[bool, dict[str, Expression]]:
"""Returns a tuple where the first value represents whether or not
the expression is a call to dataclass.field and the second is a
dictionary of the keyword arguments that field() was called with.
"""
if (
isinstance(expr, CallExpr)
and isinstance(expr.callee, RefExpr)
and expr.callee.fullname in self._spec.field_specifiers
):
# field() only takes keyword arguments.
args = {}
for name, arg, kind in zip(expr.arg_names, expr.args, expr.arg_kinds):
if not kind.is_named():
if kind.is_named(star=True):
# This means that `field` is used with `**` unpacking,
# the best we can do for now is not to fail.
# TODO: we can infer what's inside `**` and try to collect it.
message = 'Unpacking **kwargs in "field()" is not supported'
elif self._spec is not _TRANSFORM_SPEC_FOR_DATACLASSES:
# dataclasses.field can only be used with keyword args, but this
# restriction is only enforced for the *standardized* arguments to
# dataclass_transform field specifiers. If this is not a
# dataclasses.dataclass class, we can just skip positional args safely.
continue
else:
message = '"field()" does not accept positional arguments'
self._api.fail(message, expr)
return True, {}
assert name is not None
args[name] = arg
return True, args
return False, {}
def _get_bool_arg(self, name: str, default: bool) -> bool:
# Expressions are always CallExprs (either directly or via a wrapper like Decorator), so
# we can use the helpers from common
if isinstance(self._reason, Expression):
return _get_decorator_bool_argument(
ClassDefContext(self._cls, self._reason, self._api), name, default
)
# Subclass/metaclass use of `typing.dataclass_transform` reads the parameters from the
# class's keyword arguments (ie `class Subclass(Parent, kwarg1=..., kwarg2=...)`)
expression = self._cls.keywords.get(name)
if expression is not None:
return require_bool_literal_argument(self._api, expression, name, default)
return default
def _get_default_init_value_for_field_specifier(self, call: Expression) -> bool:
"""
Find a default value for the `init` parameter of the specifier being called. If the
specifier's type signature includes an `init` parameter with a type of `Literal[True]` or
`Literal[False]`, return the appropriate boolean value from the literal. Otherwise,
fall back to the standard default of `True`.
"""
if not isinstance(call, CallExpr):
return True
specifier_type = _get_callee_type(call)
if specifier_type is None:
return True
parameter = specifier_type.argument_by_name("init")
if parameter is None:
return True
literals = try_getting_literals_from_type(parameter.typ, bool, "builtins.bool")
if literals is None or len(literals) != 1:
return True
return literals[0]
def _infer_dataclass_attr_init_type(
self, sym: SymbolTableNode, name: str, context: Context
) -> Type | None:
"""Infer __init__ argument type for an attribute.
In particular, possibly use the signature of __set__.
"""
default = sym.type
if sym.implicit:
return default
t = get_proper_type(sym.type)
# Perform a simple-minded inference from the signature of __set__, if present.
# We can't use mypy.checkmember here, since this plugin runs before type checking.
# We only support some basic scanerios here, which is hopefully sufficient for
# the vast majority of use cases.
if not isinstance(t, Instance):
return default
setter = t.type.get("__set__")
if setter:
if isinstance(setter.node, FuncDef):
super_info = t.type.get_containing_type_info("__set__")
assert super_info
if setter.type:
setter_type = get_proper_type(
map_type_from_supertype(setter.type, t.type, super_info)
)
else:
return AnyType(TypeOfAny.unannotated)
if isinstance(setter_type, CallableType) and setter_type.arg_kinds == [
ARG_POS,
ARG_POS,
ARG_POS,
]:
return expand_type_by_instance(setter_type.arg_types[2], t)
else:
self._api.fail(
f'Unsupported signature for "__set__" in "{t.type.name}"', context
)
else:
self._api.fail(f'Unsupported "__set__" in "{t.type.name}"', context)
return default
def add_dataclass_tag(info: TypeInfo) -> None:
# The value is ignored, only the existence matters.
info.metadata["dataclass_tag"] = {}
def dataclass_tag_callback(ctx: ClassDefContext) -> None:
"""Record that we have a dataclass in the main semantic analysis pass.
The later pass implemented by DataclassTransformer will use this
to detect dataclasses in base classes.
"""
add_dataclass_tag(ctx.cls.info)
def dataclass_class_maker_callback(ctx: ClassDefContext) -> bool:
"""Hooks into the class typechecking process to add support for dataclasses."""
transformer = DataclassTransformer(
ctx.cls, ctx.reason, _get_transform_spec(ctx.reason), ctx.api
)
return transformer.transform()
def _get_transform_spec(reason: Expression) -> DataclassTransformSpec:
"""Find the relevant transform parameters from the decorator/parent class/metaclass that
triggered the dataclasses plugin.
Although the resulting DataclassTransformSpec is based on the typing.dataclass_transform
function, we also use it for traditional dataclasses.dataclass classes as well for simplicity.
In those cases, we return a default spec rather than one based on a call to
`typing.dataclass_transform`.
"""
if _is_dataclasses_decorator(reason):
return _TRANSFORM_SPEC_FOR_DATACLASSES
spec = find_dataclass_transform_spec(reason)
assert spec is not None, (
"trying to find dataclass transform spec, but reason is neither dataclasses.dataclass nor "
"decorated with typing.dataclass_transform"
)
return spec
def _is_dataclasses_decorator(node: Node) -> bool:
if isinstance(node, CallExpr):
node = node.callee
if isinstance(node, RefExpr):
return node.fullname in dataclass_makers
return False
def _has_direct_dataclass_transform_metaclass(info: TypeInfo) -> bool:
return (
info.declared_metaclass is not None
and info.declared_metaclass.type.dataclass_transform_spec is not None
)
def _get_expanded_dataclasses_fields(
ctx: FunctionSigContext, typ: ProperType, display_typ: ProperType, parent_typ: ProperType
) -> list[CallableType] | None:
"""
For a given type, determine what dataclasses it can be: for each class, return the field types.
For generic classes, the field types are expanded.
If the type contains Any or a non-dataclass, returns None; in the latter case, also reports an error.
"""
if isinstance(typ, UnionType):
ret: list[CallableType] | None = []
for item in typ.relevant_items():
item = get_proper_type(item)
item_types = _get_expanded_dataclasses_fields(ctx, item, item, parent_typ)
if ret is not None and item_types is not None:
ret += item_types
else:
ret = None # but keep iterating to emit all errors
return ret
elif isinstance(typ, TypeVarType):
return _get_expanded_dataclasses_fields(
ctx, get_proper_type(typ.upper_bound), display_typ, parent_typ
)
elif isinstance(typ, Instance):
replace_sym = typ.type.get_method(_INTERNAL_REPLACE_SYM_NAME)
if replace_sym is None:
return None
replace_sig = replace_sym.type
assert isinstance(replace_sig, ProperType)
assert isinstance(replace_sig, CallableType)
return [expand_type_by_instance(replace_sig, typ)]
else:
return None
# TODO: we can potentially get the function signature hook to allow returning a union
# and leave this to the regular machinery of resolving a union of callables
# (https://github.com/python/mypy/issues/15457)
def _meet_replace_sigs(sigs: list[CallableType]) -> CallableType:
"""
Produces the lowest bound of the 'replace' signatures of multiple dataclasses.
"""
args = {
name: (typ, kind)
for name, typ, kind in zip(sigs[0].arg_names, sigs[0].arg_types, sigs[0].arg_kinds)
}
for sig in sigs[1:]:
sig_args = {
name: (typ, kind)
for name, typ, kind in zip(sig.arg_names, sig.arg_types, sig.arg_kinds)
}
for name in (*args.keys(), *sig_args.keys()):
sig_typ, sig_kind = args.get(name, (UninhabitedType(), ARG_NAMED_OPT))
sig2_typ, sig2_kind = sig_args.get(name, (UninhabitedType(), ARG_NAMED_OPT))
args[name] = (
meet_types(sig_typ, sig2_typ),
ARG_NAMED_OPT if sig_kind == sig2_kind == ARG_NAMED_OPT else ARG_NAMED,
)
return sigs[0].copy_modified(
arg_names=list(args.keys()),
arg_types=[typ for typ, _ in args.values()],
arg_kinds=[kind for _, kind in args.values()],
)
def replace_function_sig_callback(ctx: FunctionSigContext) -> CallableType:
"""
Returns a signature for the 'dataclasses.replace' function that's dependent on the type
of the first positional argument.
"""
if len(ctx.args) != 2:
# Ideally the name and context should be callee's, but we don't have it in FunctionSigContext.
ctx.api.fail(f'"{ctx.default_signature.name}" has unexpected type annotation', ctx.context)
return ctx.default_signature
if len(ctx.args[0]) != 1:
return ctx.default_signature # leave it to the type checker to complain
obj_arg = ctx.args[0][0]
obj_type = get_proper_type(ctx.api.get_expression_type(obj_arg))
inst_type_str = format_type_bare(obj_type, ctx.api.options)
replace_sigs = _get_expanded_dataclasses_fields(ctx, obj_type, obj_type, obj_type)
if replace_sigs is None:
return ctx.default_signature
replace_sig = _meet_replace_sigs(replace_sigs)
return replace_sig.copy_modified(
arg_names=[None, *replace_sig.arg_names],
arg_kinds=[ARG_POS, *replace_sig.arg_kinds],
arg_types=[obj_type, *replace_sig.arg_types],
ret_type=obj_type,
fallback=ctx.default_signature.fallback,
name=f"{ctx.default_signature.name} of {inst_type_str}",
)
def is_processed_dataclass(info: TypeInfo) -> bool:
return bool(info) and "dataclass" in info.metadata
def check_post_init(api: TypeChecker, defn: FuncItem, info: TypeInfo) -> None:
if defn.type is None:
return
assert isinstance(defn.type, FunctionLike)
ideal_sig_method = info.get_method(_INTERNAL_POST_INIT_SYM_NAME)
assert ideal_sig_method is not None and ideal_sig_method.type is not None
ideal_sig = ideal_sig_method.type
assert isinstance(ideal_sig, ProperType) # we set it ourselves
assert isinstance(ideal_sig, CallableType)
ideal_sig = ideal_sig.copy_modified(name="__post_init__")
api.check_override(
override=defn.type,
original=ideal_sig,
name="__post_init__",
name_in_super="__post_init__",
supertype="dataclass",
original_class_or_static=False,
override_class_or_static=False,
node=defn,
)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/plugins/dataclasses.py
|
Python
|
NOASSERTION
| 46,716 |
from __future__ import annotations
from functools import partial
from typing import Callable, Final
import mypy.errorcodes as codes
from mypy import message_registry
from mypy.nodes import DictExpr, IntExpr, StrExpr, UnaryExpr
from mypy.plugin import (
AttributeContext,
ClassDefContext,
FunctionContext,
FunctionSigContext,
MethodContext,
MethodSigContext,
Plugin,
)
from mypy.plugins.common import try_getting_str_literals
from mypy.subtypes import is_subtype
from mypy.typeops import is_literal_type_like, make_simplified_union
from mypy.types import (
TPDICT_FB_NAMES,
AnyType,
CallableType,
FunctionLike,
Instance,
LiteralType,
NoneType,
TupleType,
Type,
TypedDictType,
TypeOfAny,
TypeVarType,
UnionType,
get_proper_type,
get_proper_types,
)
class DefaultPlugin(Plugin):
"""Type checker plugin that is enabled by default."""
def get_function_hook(self, fullname: str) -> Callable[[FunctionContext], Type] | None:
from mypy.plugins import ctypes, enums, singledispatch
if fullname == "_ctypes.Array":
return ctypes.array_constructor_callback
elif fullname == "functools.singledispatch":
return singledispatch.create_singledispatch_function_callback
elif fullname == "functools.partial":
import mypy.plugins.functools
return mypy.plugins.functools.partial_new_callback
elif fullname == "enum.member":
return enums.enum_member_callback
return None
def get_function_signature_hook(
self, fullname: str
) -> Callable[[FunctionSigContext], FunctionLike] | None:
from mypy.plugins import attrs, dataclasses
if fullname in ("attr.evolve", "attrs.evolve", "attr.assoc", "attrs.assoc"):
return attrs.evolve_function_sig_callback
elif fullname in ("attr.fields", "attrs.fields"):
return attrs.fields_function_sig_callback
elif fullname == "dataclasses.replace":
return dataclasses.replace_function_sig_callback
return None
def get_method_signature_hook(
self, fullname: str
) -> Callable[[MethodSigContext], FunctionLike] | None:
from mypy.plugins import ctypes, singledispatch
if fullname == "typing.Mapping.get":
return typed_dict_get_signature_callback
elif fullname in {n + ".setdefault" for n in TPDICT_FB_NAMES}:
return typed_dict_setdefault_signature_callback
elif fullname in {n + ".pop" for n in TPDICT_FB_NAMES}:
return typed_dict_pop_signature_callback
elif fullname == "_ctypes.Array.__setitem__":
return ctypes.array_setitem_callback
elif fullname == singledispatch.SINGLEDISPATCH_CALLABLE_CALL_METHOD:
return singledispatch.call_singledispatch_function_callback
typed_dict_updates = set()
for n in TPDICT_FB_NAMES:
typed_dict_updates.add(n + ".update")
typed_dict_updates.add(n + ".__or__")
typed_dict_updates.add(n + ".__ror__")
typed_dict_updates.add(n + ".__ior__")
if fullname in typed_dict_updates:
return typed_dict_update_signature_callback
return None
def get_method_hook(self, fullname: str) -> Callable[[MethodContext], Type] | None:
from mypy.plugins import ctypes, singledispatch
if fullname == "typing.Mapping.get":
return typed_dict_get_callback
elif fullname == "builtins.int.__pow__":
return int_pow_callback
elif fullname == "builtins.int.__neg__":
return int_neg_callback
elif fullname == "builtins.int.__pos__":
return int_pos_callback
elif fullname in ("builtins.tuple.__mul__", "builtins.tuple.__rmul__"):
return tuple_mul_callback
elif fullname in {n + ".setdefault" for n in TPDICT_FB_NAMES}:
return typed_dict_setdefault_callback
elif fullname in {n + ".pop" for n in TPDICT_FB_NAMES}:
return typed_dict_pop_callback
elif fullname in {n + ".__delitem__" for n in TPDICT_FB_NAMES}:
return typed_dict_delitem_callback
elif fullname == "_ctypes.Array.__getitem__":
return ctypes.array_getitem_callback
elif fullname == "_ctypes.Array.__iter__":
return ctypes.array_iter_callback
elif fullname == singledispatch.SINGLEDISPATCH_REGISTER_METHOD:
return singledispatch.singledispatch_register_callback
elif fullname == singledispatch.REGISTER_CALLABLE_CALL_METHOD:
return singledispatch.call_singledispatch_function_after_register_argument
elif fullname == "functools.partial.__call__":
import mypy.plugins.functools
return mypy.plugins.functools.partial_call_callback
return None
def get_attribute_hook(self, fullname: str) -> Callable[[AttributeContext], Type] | None:
from mypy.plugins import ctypes, enums
if fullname == "_ctypes.Array.value":
return ctypes.array_value_callback
elif fullname == "_ctypes.Array.raw":
return ctypes.array_raw_callback
elif fullname in enums.ENUM_NAME_ACCESS:
return enums.enum_name_callback
elif fullname in enums.ENUM_VALUE_ACCESS:
return enums.enum_value_callback
return None
def get_class_decorator_hook(self, fullname: str) -> Callable[[ClassDefContext], None] | None:
from mypy.plugins import attrs, dataclasses
# These dataclass and attrs hooks run in the main semantic analysis pass
# and only tag known dataclasses/attrs classes, so that the second
# hooks (in get_class_decorator_hook_2) can detect dataclasses/attrs classes
# in the MRO.
if fullname in dataclasses.dataclass_makers:
return dataclasses.dataclass_tag_callback
if (
fullname in attrs.attr_class_makers
or fullname in attrs.attr_dataclass_makers
or fullname in attrs.attr_frozen_makers
or fullname in attrs.attr_define_makers
):
return attrs.attr_tag_callback
return None
def get_class_decorator_hook_2(
self, fullname: str
) -> Callable[[ClassDefContext], bool] | None:
import mypy.plugins.functools
from mypy.plugins import attrs, dataclasses
if fullname in dataclasses.dataclass_makers:
return dataclasses.dataclass_class_maker_callback
elif fullname in mypy.plugins.functools.functools_total_ordering_makers:
return mypy.plugins.functools.functools_total_ordering_maker_callback
elif fullname in attrs.attr_class_makers:
return attrs.attr_class_maker_callback
elif fullname in attrs.attr_dataclass_makers:
return partial(attrs.attr_class_maker_callback, auto_attribs_default=True)
elif fullname in attrs.attr_frozen_makers:
return partial(
attrs.attr_class_maker_callback, auto_attribs_default=None, frozen_default=True
)
elif fullname in attrs.attr_define_makers:
return partial(
attrs.attr_class_maker_callback, auto_attribs_default=None, slots_default=True
)
return None
def typed_dict_get_signature_callback(ctx: MethodSigContext) -> CallableType:
"""Try to infer a better signature type for TypedDict.get.
This is used to get better type context for the second argument that
depends on a TypedDict value type.
"""
signature = ctx.default_signature
if (
isinstance(ctx.type, TypedDictType)
and len(ctx.args) == 2
and len(ctx.args[0]) == 1
and isinstance(ctx.args[0][0], StrExpr)
and len(signature.arg_types) == 2
and len(signature.variables) == 1
and len(ctx.args[1]) == 1
):
key = ctx.args[0][0].value
value_type = get_proper_type(ctx.type.items.get(key))
ret_type = signature.ret_type
if value_type:
default_arg = ctx.args[1][0]
if (
isinstance(value_type, TypedDictType)
and isinstance(default_arg, DictExpr)
and len(default_arg.items) == 0
):
# Caller has empty dict {} as default for typed dict.
value_type = value_type.copy_modified(required_keys=set())
# Tweak the signature to include the value type as context. It's
# only needed for type inference since there's a union with a type
# variable that accepts everything.
tv = signature.variables[0]
assert isinstance(tv, TypeVarType)
return signature.copy_modified(
arg_types=[signature.arg_types[0], make_simplified_union([value_type, tv])],
ret_type=ret_type,
)
return signature
def typed_dict_get_callback(ctx: MethodContext) -> Type:
"""Infer a precise return type for TypedDict.get with literal first argument."""
if (
isinstance(ctx.type, TypedDictType)
and len(ctx.arg_types) >= 1
and len(ctx.arg_types[0]) == 1
):
keys = try_getting_str_literals(ctx.args[0][0], ctx.arg_types[0][0])
if keys is None:
return ctx.default_return_type
output_types: list[Type] = []
for key in keys:
value_type = get_proper_type(ctx.type.items.get(key))
if value_type is None:
return ctx.default_return_type
if len(ctx.arg_types) == 1:
output_types.append(value_type)
elif len(ctx.arg_types) == 2 and len(ctx.arg_types[1]) == 1 and len(ctx.args[1]) == 1:
default_arg = ctx.args[1][0]
if (
isinstance(default_arg, DictExpr)
and len(default_arg.items) == 0
and isinstance(value_type, TypedDictType)
):
# Special case '{}' as the default for a typed dict type.
output_types.append(value_type.copy_modified(required_keys=set()))
else:
output_types.append(value_type)
output_types.append(ctx.arg_types[1][0])
if len(ctx.arg_types) == 1:
output_types.append(NoneType())
return make_simplified_union(output_types)
return ctx.default_return_type
def typed_dict_pop_signature_callback(ctx: MethodSigContext) -> CallableType:
"""Try to infer a better signature type for TypedDict.pop.
This is used to get better type context for the second argument that
depends on a TypedDict value type.
"""
signature = ctx.default_signature
str_type = ctx.api.named_generic_type("builtins.str", [])
if (
isinstance(ctx.type, TypedDictType)
and len(ctx.args) == 2
and len(ctx.args[0]) == 1
and isinstance(ctx.args[0][0], StrExpr)
and len(signature.arg_types) == 2
and len(signature.variables) == 1
and len(ctx.args[1]) == 1
):
key = ctx.args[0][0].value
value_type = ctx.type.items.get(key)
if value_type:
# Tweak the signature to include the value type as context. It's
# only needed for type inference since there's a union with a type
# variable that accepts everything.
tv = signature.variables[0]
assert isinstance(tv, TypeVarType)
typ = make_simplified_union([value_type, tv])
return signature.copy_modified(arg_types=[str_type, typ], ret_type=typ)
return signature.copy_modified(arg_types=[str_type, signature.arg_types[1]])
def typed_dict_pop_callback(ctx: MethodContext) -> Type:
"""Type check and infer a precise return type for TypedDict.pop."""
if (
isinstance(ctx.type, TypedDictType)
and len(ctx.arg_types) >= 1
and len(ctx.arg_types[0]) == 1
):
keys = try_getting_str_literals(ctx.args[0][0], ctx.arg_types[0][0])
if keys is None:
ctx.api.fail(
message_registry.TYPEDDICT_KEY_MUST_BE_STRING_LITERAL,
ctx.context,
code=codes.LITERAL_REQ,
)
return AnyType(TypeOfAny.from_error)
value_types = []
for key in keys:
if key in ctx.type.required_keys:
ctx.api.msg.typeddict_key_cannot_be_deleted(ctx.type, key, ctx.context)
value_type = ctx.type.items.get(key)
if value_type:
value_types.append(value_type)
else:
ctx.api.msg.typeddict_key_not_found(ctx.type, key, ctx.context)
return AnyType(TypeOfAny.from_error)
if len(ctx.args[1]) == 0:
return make_simplified_union(value_types)
elif len(ctx.arg_types) == 2 and len(ctx.arg_types[1]) == 1 and len(ctx.args[1]) == 1:
return make_simplified_union([*value_types, ctx.arg_types[1][0]])
return ctx.default_return_type
def typed_dict_setdefault_signature_callback(ctx: MethodSigContext) -> CallableType:
"""Try to infer a better signature type for TypedDict.setdefault.
This is used to get better type context for the second argument that
depends on a TypedDict value type.
"""
signature = ctx.default_signature
str_type = ctx.api.named_generic_type("builtins.str", [])
if (
isinstance(ctx.type, TypedDictType)
and len(ctx.args) == 2
and len(ctx.args[0]) == 1
and isinstance(ctx.args[0][0], StrExpr)
and len(signature.arg_types) == 2
and len(ctx.args[1]) == 1
):
key = ctx.args[0][0].value
value_type = ctx.type.items.get(key)
if value_type:
return signature.copy_modified(arg_types=[str_type, value_type])
return signature.copy_modified(arg_types=[str_type, signature.arg_types[1]])
def typed_dict_setdefault_callback(ctx: MethodContext) -> Type:
"""Type check TypedDict.setdefault and infer a precise return type."""
if (
isinstance(ctx.type, TypedDictType)
and len(ctx.arg_types) == 2
and len(ctx.arg_types[0]) == 1
and len(ctx.arg_types[1]) == 1
):
keys = try_getting_str_literals(ctx.args[0][0], ctx.arg_types[0][0])
if keys is None:
ctx.api.fail(
message_registry.TYPEDDICT_KEY_MUST_BE_STRING_LITERAL,
ctx.context,
code=codes.LITERAL_REQ,
)
return AnyType(TypeOfAny.from_error)
assigned_readonly_keys = ctx.type.readonly_keys & set(keys)
if assigned_readonly_keys:
ctx.api.msg.readonly_keys_mutated(assigned_readonly_keys, context=ctx.context)
default_type = ctx.arg_types[1][0]
value_types = []
for key in keys:
value_type = ctx.type.items.get(key)
if value_type is None:
ctx.api.msg.typeddict_key_not_found(ctx.type, key, ctx.context)
return AnyType(TypeOfAny.from_error)
# The signature_callback above can't always infer the right signature
# (e.g. when the expression is a variable that happens to be a Literal str)
# so we need to handle the check ourselves here and make sure the provided
# default can be assigned to all key-value pairs we're updating.
if not is_subtype(default_type, value_type):
ctx.api.msg.typeddict_setdefault_arguments_inconsistent(
default_type, value_type, ctx.context
)
return AnyType(TypeOfAny.from_error)
value_types.append(value_type)
return make_simplified_union(value_types)
return ctx.default_return_type
def typed_dict_delitem_callback(ctx: MethodContext) -> Type:
"""Type check TypedDict.__delitem__."""
if (
isinstance(ctx.type, TypedDictType)
and len(ctx.arg_types) == 1
and len(ctx.arg_types[0]) == 1
):
keys = try_getting_str_literals(ctx.args[0][0], ctx.arg_types[0][0])
if keys is None:
ctx.api.fail(
message_registry.TYPEDDICT_KEY_MUST_BE_STRING_LITERAL,
ctx.context,
code=codes.LITERAL_REQ,
)
return AnyType(TypeOfAny.from_error)
for key in keys:
if key in ctx.type.required_keys or key in ctx.type.readonly_keys:
ctx.api.msg.typeddict_key_cannot_be_deleted(ctx.type, key, ctx.context)
elif key not in ctx.type.items:
ctx.api.msg.typeddict_key_not_found(ctx.type, key, ctx.context)
return ctx.default_return_type
_TP_DICT_MUTATING_METHODS: Final = frozenset({"update of TypedDict", "__ior__ of TypedDict"})
def typed_dict_update_signature_callback(ctx: MethodSigContext) -> CallableType:
"""Try to infer a better signature type for methods that update `TypedDict`.
This includes: `TypedDict.update`, `TypedDict.__or__`, `TypedDict.__ror__`,
and `TypedDict.__ior__`.
"""
signature = ctx.default_signature
if isinstance(ctx.type, TypedDictType) and len(signature.arg_types) == 1:
arg_type = get_proper_type(signature.arg_types[0])
if not isinstance(arg_type, TypedDictType):
return signature
arg_type = arg_type.as_anonymous()
arg_type = arg_type.copy_modified(required_keys=set())
if ctx.args and ctx.args[0]:
if signature.name in _TP_DICT_MUTATING_METHODS:
# If we want to mutate this object in place, we need to set this flag,
# it will trigger an extra check in TypedDict's checker.
arg_type.to_be_mutated = True
with ctx.api.msg.filter_errors(
filter_errors=lambda name, info: info.code != codes.TYPEDDICT_READONLY_MUTATED,
save_filtered_errors=True,
):
inferred = get_proper_type(
ctx.api.get_expression_type(ctx.args[0][0], type_context=arg_type)
)
if arg_type.to_be_mutated:
arg_type.to_be_mutated = False # Done!
possible_tds = []
if isinstance(inferred, TypedDictType):
possible_tds = [inferred]
elif isinstance(inferred, UnionType):
possible_tds = [
t
for t in get_proper_types(inferred.relevant_items())
if isinstance(t, TypedDictType)
]
items = []
for td in possible_tds:
item = arg_type.copy_modified(
required_keys=(arg_type.required_keys | td.required_keys)
& arg_type.items.keys()
)
if not ctx.api.options.extra_checks:
item = item.copy_modified(item_names=list(td.items))
items.append(item)
if items:
arg_type = make_simplified_union(items)
return signature.copy_modified(arg_types=[arg_type])
return signature
def int_pow_callback(ctx: MethodContext) -> Type:
"""Infer a more precise return type for int.__pow__."""
# int.__pow__ has an optional modulo argument,
# so we expect 2 argument positions
if len(ctx.arg_types) == 2 and len(ctx.arg_types[0]) == 1 and len(ctx.arg_types[1]) == 0:
arg = ctx.args[0][0]
if isinstance(arg, IntExpr):
exponent = arg.value
elif isinstance(arg, UnaryExpr) and arg.op == "-" and isinstance(arg.expr, IntExpr):
exponent = -arg.expr.value
else:
# Right operand not an int literal or a negated literal -- give up.
return ctx.default_return_type
if exponent >= 0:
return ctx.api.named_generic_type("builtins.int", [])
else:
return ctx.api.named_generic_type("builtins.float", [])
return ctx.default_return_type
def int_neg_callback(ctx: MethodContext, multiplier: int = -1) -> Type:
"""Infer a more precise return type for int.__neg__ and int.__pos__.
This is mainly used to infer the return type as LiteralType
if the original underlying object is a LiteralType object.
"""
if isinstance(ctx.type, Instance) and ctx.type.last_known_value is not None:
value = ctx.type.last_known_value.value
fallback = ctx.type.last_known_value.fallback
if isinstance(value, int):
if is_literal_type_like(ctx.api.type_context[-1]):
return LiteralType(value=multiplier * value, fallback=fallback)
else:
return ctx.type.copy_modified(
last_known_value=LiteralType(
value=multiplier * value,
fallback=fallback,
line=ctx.type.line,
column=ctx.type.column,
)
)
elif isinstance(ctx.type, LiteralType):
value = ctx.type.value
fallback = ctx.type.fallback
if isinstance(value, int):
return LiteralType(value=multiplier * value, fallback=fallback)
return ctx.default_return_type
def int_pos_callback(ctx: MethodContext) -> Type:
"""Infer a more precise return type for int.__pos__.
This is identical to __neg__, except the value is not inverted.
"""
return int_neg_callback(ctx, +1)
def tuple_mul_callback(ctx: MethodContext) -> Type:
"""Infer a more precise return type for tuple.__mul__ and tuple.__rmul__.
This is used to return a specific sized tuple if multiplied by Literal int
"""
if not isinstance(ctx.type, TupleType):
return ctx.default_return_type
arg_type = get_proper_type(ctx.arg_types[0][0])
if isinstance(arg_type, Instance) and arg_type.last_known_value is not None:
value = arg_type.last_known_value.value
if isinstance(value, int):
return ctx.type.copy_modified(items=ctx.type.items * value)
elif isinstance(ctx.type, LiteralType):
value = arg_type.value
if isinstance(value, int):
return ctx.type.copy_modified(items=ctx.type.items * value)
return ctx.default_return_type
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/plugins/default.py
|
Python
|
NOASSERTION
| 22,464 |
"""
This file contains a variety of plugins for refining how mypy infers types of
expressions involving Enums.
Currently, this file focuses on providing better inference for expressions like
'SomeEnum.FOO.name' and 'SomeEnum.FOO.value'. Note that the type of both expressions
will vary depending on exactly which instance of SomeEnum we're looking at.
Note that this file does *not* contain all special-cased logic related to enums:
we actually bake some of it directly in to the semantic analysis layer (see
semanal_enum.py).
"""
from __future__ import annotations
from typing import Final, Iterable, Sequence, TypeVar, cast
import mypy.plugin # To avoid circular imports.
from mypy.nodes import TypeInfo
from mypy.semanal_enum import ENUM_BASES
from mypy.subtypes import is_equivalent
from mypy.typeops import fixup_partial_type, make_simplified_union
from mypy.types import (
CallableType,
Instance,
LiteralType,
ProperType,
Type,
get_proper_type,
is_named_instance,
)
ENUM_NAME_ACCESS: Final = {f"{prefix}.name" for prefix in ENUM_BASES} | {
f"{prefix}._name_" for prefix in ENUM_BASES
}
ENUM_VALUE_ACCESS: Final = {f"{prefix}.value" for prefix in ENUM_BASES} | {
f"{prefix}._value_" for prefix in ENUM_BASES
}
def enum_name_callback(ctx: mypy.plugin.AttributeContext) -> Type:
"""This plugin refines the 'name' attribute in enums to act as if
they were declared to be final.
For example, the expression 'MyEnum.FOO.name' normally is inferred
to be of type 'str'.
This plugin will instead make the inferred type be a 'str' where the
last known value is 'Literal["FOO"]'. This means it would be legal to
use 'MyEnum.FOO.name' in contexts that expect a Literal type, just like
any other Final variable or attribute.
This plugin assumes that the provided context is an attribute access
matching one of the strings found in 'ENUM_NAME_ACCESS'.
"""
enum_field_name = _extract_underlying_field_name(ctx.type)
if enum_field_name is None:
return ctx.default_attr_type
else:
str_type = ctx.api.named_generic_type("builtins.str", [])
literal_type = LiteralType(enum_field_name, fallback=str_type)
return str_type.copy_modified(last_known_value=literal_type)
_T = TypeVar("_T")
def _first(it: Iterable[_T]) -> _T | None:
"""Return the first value from any iterable.
Returns ``None`` if the iterable is empty.
"""
for val in it:
return val
return None
def _infer_value_type_with_auto_fallback(
ctx: mypy.plugin.AttributeContext, proper_type: ProperType | None
) -> Type | None:
"""Figure out the type of an enum value accounting for `auto()`.
This method is a no-op for a `None` proper_type and also in the case where
the type is not "enum.auto"
"""
if proper_type is None:
return None
proper_type = get_proper_type(fixup_partial_type(proper_type))
if not (isinstance(proper_type, Instance) and proper_type.type.fullname == "enum.auto"):
if is_named_instance(proper_type, "enum.member") and proper_type.args:
return proper_type.args[0]
return proper_type
assert isinstance(ctx.type, Instance), "An incorrect ctx.type was passed."
info = ctx.type.type
# Find the first _generate_next_value_ on the mro. We need to know
# if it is `Enum` because `Enum` types say that the return-value of
# `_generate_next_value_` is `Any`. In reality the default `auto()`
# returns an `int` (presumably the `Any` in typeshed is to make it
# easier to subclass and change the returned type).
type_with_gnv = _first(ti for ti in info.mro if ti.names.get("_generate_next_value_"))
if type_with_gnv is None:
return ctx.default_attr_type
stnode = type_with_gnv.names["_generate_next_value_"]
# This should be a `CallableType`
node_type = get_proper_type(stnode.type)
if isinstance(node_type, CallableType):
if type_with_gnv.fullname == "enum.Enum":
int_type = ctx.api.named_generic_type("builtins.int", [])
return int_type
return get_proper_type(node_type.ret_type)
return ctx.default_attr_type
def _implements_new(info: TypeInfo) -> bool:
"""Check whether __new__ comes from enum.Enum or was implemented in a
subclass. In the latter case, we must infer Any as long as mypy can't infer
the type of _value_ from assignments in __new__.
"""
type_with_new = _first(
ti
for ti in info.mro
if ti.names.get("__new__") and not ti.fullname.startswith("builtins.")
)
if type_with_new is None:
return False
return type_with_new.fullname not in ("enum.Enum", "enum.IntEnum", "enum.StrEnum")
def enum_member_callback(ctx: mypy.plugin.FunctionContext) -> Type:
"""By default `member(1)` will be infered as `member[int]`,
we want to improve the inference to be `Literal[1]` here."""
if ctx.arg_types or ctx.arg_types[0]:
arg = get_proper_type(ctx.arg_types[0][0])
proper_return = get_proper_type(ctx.default_return_type)
if (
isinstance(arg, Instance)
and arg.last_known_value
and isinstance(proper_return, Instance)
and len(proper_return.args) == 1
):
return proper_return.copy_modified(args=[arg])
return ctx.default_return_type
def enum_value_callback(ctx: mypy.plugin.AttributeContext) -> Type:
"""This plugin refines the 'value' attribute in enums to refer to
the original underlying value. For example, suppose we have the
following:
class SomeEnum:
FOO = A()
BAR = B()
By default, mypy will infer that 'SomeEnum.FOO.value' and
'SomeEnum.BAR.value' both are of type 'Any'. This plugin refines
this inference so that mypy understands the expressions are
actually of types 'A' and 'B' respectively. This better reflects
the actual runtime behavior.
This plugin works simply by looking up the original value assigned
to the enum. For example, when this plugin sees 'SomeEnum.BAR.value',
it will look up whatever type 'BAR' had in the SomeEnum TypeInfo and
use that as the inferred type of the overall expression.
This plugin assumes that the provided context is an attribute access
matching one of the strings found in 'ENUM_VALUE_ACCESS'.
"""
enum_field_name = _extract_underlying_field_name(ctx.type)
if enum_field_name is None:
# We do not know the enum field name (perhaps it was passed to a
# function and we only know that it _is_ a member). All is not lost
# however, if we can prove that the all of the enum members have the
# same value-type, then it doesn't matter which member was passed in.
# The value-type is still known.
if isinstance(ctx.type, Instance):
info = ctx.type.type
# As long as mypy doesn't understand attribute creation in __new__,
# there is no way to predict the value type if the enum class has a
# custom implementation
if _implements_new(info):
return ctx.default_attr_type
stnodes = (info.get(name) for name in info.names)
# Enums _can_ have methods, instance attributes, and `nonmember`s.
# Omit methods and attributes created by assigning to self.*
# for our value inference.
node_types = (
get_proper_type(n.type) if n else None
for n in stnodes
if n is None or not n.implicit
)
proper_types = [
_infer_value_type_with_auto_fallback(ctx, t)
for t in node_types
if t is None
or (not isinstance(t, CallableType) and not is_named_instance(t, "enum.nonmember"))
]
underlying_type = _first(proper_types)
if underlying_type is None:
return ctx.default_attr_type
# At first we try to predict future `value` type if all other items
# have the same type. For example, `int`.
# If this is the case, we simply return this type.
# See https://github.com/python/mypy/pull/9443
all_same_value_type = all(
proper_type is not None and proper_type == underlying_type
for proper_type in proper_types
)
if all_same_value_type:
if underlying_type is not None:
return underlying_type
# But, after we started treating all `Enum` values as `Final`,
# we start to infer types in
# `item = 1` as `Literal[1]`, not just `int`.
# So, for example types in this `Enum` will all be different:
#
# class Ordering(IntEnum):
# one = 1
# two = 2
# three = 3
#
# We will infer three `Literal` types here.
# They are not the same, but they are equivalent.
# So, we unify them to make sure `.value` prediction still works.
# Result will be `Literal[1] | Literal[2] | Literal[3]` for this case.
all_equivalent_types = all(
proper_type is not None and is_equivalent(proper_type, underlying_type)
for proper_type in proper_types
)
if all_equivalent_types:
return make_simplified_union(cast(Sequence[Type], proper_types))
return ctx.default_attr_type
assert isinstance(ctx.type, Instance)
info = ctx.type.type
# As long as mypy doesn't understand attribute creation in __new__,
# there is no way to predict the value type if the enum class has a
# custom implementation
if _implements_new(info):
return ctx.default_attr_type
stnode = info.get(enum_field_name)
if stnode is None:
return ctx.default_attr_type
underlying_type = _infer_value_type_with_auto_fallback(ctx, get_proper_type(stnode.type))
if underlying_type is None:
return ctx.default_attr_type
return underlying_type
def _extract_underlying_field_name(typ: Type) -> str | None:
"""If the given type corresponds to some Enum instance, returns the
original name of that enum. For example, if we receive in the type
corresponding to 'SomeEnum.FOO', we return the string "SomeEnum.Foo".
This helper takes advantage of the fact that Enum instances are valid
to use inside Literal[...] types. An expression like 'SomeEnum.FOO' is
actually represented by an Instance type with a Literal enum fallback.
We can examine this Literal fallback to retrieve the string.
"""
typ = get_proper_type(typ)
if not isinstance(typ, Instance):
return None
if not typ.type.is_enum:
return None
underlying_literal = typ.last_known_value
if underlying_literal is None:
return None
# The checks above have verified this LiteralType is representing an enum value,
# which means the 'value' field is guaranteed to be the name of the enum field
# as a string.
assert isinstance(underlying_literal.value, str)
return underlying_literal.value
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/plugins/enums.py
|
Python
|
NOASSERTION
| 11,338 |
"""Plugin for supporting the functools standard library module."""
from __future__ import annotations
from typing import Final, NamedTuple
import mypy.checker
import mypy.plugin
import mypy.semanal
from mypy.argmap import map_actuals_to_formals
from mypy.nodes import ARG_POS, ARG_STAR2, ArgKind, Argument, CallExpr, FuncItem, Var
from mypy.plugins.common import add_method_to_class
from mypy.typeops import get_all_type_vars
from mypy.types import (
AnyType,
CallableType,
Instance,
Overloaded,
Type,
TypeOfAny,
TypeVarType,
UnboundType,
UnionType,
get_proper_type,
)
functools_total_ordering_makers: Final = {"functools.total_ordering"}
_ORDERING_METHODS: Final = {"__lt__", "__le__", "__gt__", "__ge__"}
PARTIAL: Final = "functools.partial"
class _MethodInfo(NamedTuple):
is_static: bool
type: CallableType
def functools_total_ordering_maker_callback(
ctx: mypy.plugin.ClassDefContext, auto_attribs_default: bool = False
) -> bool:
"""Add dunder methods to classes decorated with functools.total_ordering."""
comparison_methods = _analyze_class(ctx)
if not comparison_methods:
ctx.api.fail(
'No ordering operation defined when using "functools.total_ordering": < > <= >=',
ctx.reason,
)
return True
# prefer __lt__ to __le__ to __gt__ to __ge__
root = max(comparison_methods, key=lambda k: (comparison_methods[k] is None, k))
root_method = comparison_methods[root]
if not root_method:
# None of the defined comparison methods can be analysed
return True
other_type = _find_other_type(root_method)
bool_type = ctx.api.named_type("builtins.bool")
ret_type: Type = bool_type
if root_method.type.ret_type != ctx.api.named_type("builtins.bool"):
proper_ret_type = get_proper_type(root_method.type.ret_type)
if not (
isinstance(proper_ret_type, UnboundType)
and proper_ret_type.name.split(".")[-1] == "bool"
):
ret_type = AnyType(TypeOfAny.implementation_artifact)
for additional_op in _ORDERING_METHODS:
# Either the method is not implemented
# or has an unknown signature that we can now extrapolate.
if not comparison_methods.get(additional_op):
args = [Argument(Var("other", other_type), other_type, None, ARG_POS)]
add_method_to_class(ctx.api, ctx.cls, additional_op, args, ret_type)
return True
def _find_other_type(method: _MethodInfo) -> Type:
"""Find the type of the ``other`` argument in a comparison method."""
first_arg_pos = 0 if method.is_static else 1
cur_pos_arg = 0
other_arg = None
for arg_kind, arg_type in zip(method.type.arg_kinds, method.type.arg_types):
if arg_kind.is_positional():
if cur_pos_arg == first_arg_pos:
other_arg = arg_type
break
cur_pos_arg += 1
elif arg_kind != ARG_STAR2:
other_arg = arg_type
break
if other_arg is None:
return AnyType(TypeOfAny.implementation_artifact)
return other_arg
def _analyze_class(ctx: mypy.plugin.ClassDefContext) -> dict[str, _MethodInfo | None]:
"""Analyze the class body, its parents, and return the comparison methods found."""
# Traverse the MRO and collect ordering methods.
comparison_methods: dict[str, _MethodInfo | None] = {}
# Skip object because total_ordering does not use methods from object
for cls in ctx.cls.info.mro[:-1]:
for name in _ORDERING_METHODS:
if name in cls.names and name not in comparison_methods:
node = cls.names[name].node
if isinstance(node, FuncItem) and isinstance(node.type, CallableType):
comparison_methods[name] = _MethodInfo(node.is_static, node.type)
continue
if isinstance(node, Var):
proper_type = get_proper_type(node.type)
if isinstance(proper_type, CallableType):
comparison_methods[name] = _MethodInfo(node.is_staticmethod, proper_type)
continue
comparison_methods[name] = None
return comparison_methods
def partial_new_callback(ctx: mypy.plugin.FunctionContext) -> Type:
"""Infer a more precise return type for functools.partial"""
if not isinstance(ctx.api, mypy.checker.TypeChecker): # use internals
return ctx.default_return_type
if len(ctx.arg_types) != 3: # fn, *args, **kwargs
return ctx.default_return_type
if len(ctx.arg_types[0]) != 1:
return ctx.default_return_type
if isinstance(get_proper_type(ctx.arg_types[0][0]), Overloaded):
# TODO: handle overloads, just fall back to whatever the non-plugin code does
return ctx.default_return_type
return handle_partial_with_callee(ctx, callee=ctx.arg_types[0][0])
def handle_partial_with_callee(ctx: mypy.plugin.FunctionContext, callee: Type) -> Type:
if not isinstance(ctx.api, mypy.checker.TypeChecker): # use internals
return ctx.default_return_type
if isinstance(callee_proper := get_proper_type(callee), UnionType):
return UnionType.make_union(
[handle_partial_with_callee(ctx, item) for item in callee_proper.items]
)
fn_type = ctx.api.extract_callable_type(callee, ctx=ctx.default_return_type)
if fn_type is None:
return ctx.default_return_type
# We must normalize from the start to have coherent view together with TypeChecker.
fn_type = fn_type.with_unpacked_kwargs().with_normalized_var_args()
last_context = ctx.api.type_context[-1]
if not fn_type.is_type_obj():
# We wrap the return type to get use of a possible type context provided by caller.
# We cannot do this in case of class objects, since otherwise the plugin may get
# falsely triggered when evaluating the constructed call itself.
ret_type: Type = ctx.api.named_generic_type(PARTIAL, [fn_type.ret_type])
wrapped_return = True
else:
ret_type = fn_type.ret_type
# Instead, for class objects we ignore any type context to avoid spurious errors,
# since the type context will be partial[X] etc., not X.
ctx.api.type_context[-1] = None
wrapped_return = False
# Flatten actual to formal mapping, since this is what check_call() expects.
actual_args = []
actual_arg_kinds = []
actual_arg_names = []
actual_types = []
seen_args = set()
for i, param in enumerate(ctx.args[1:], start=1):
for j, a in enumerate(param):
if a in seen_args:
# Same actual arg can map to multiple formals, but we need to include
# each one only once.
continue
# Here we rely on the fact that expressions are essentially immutable, so
# they can be compared by identity.
seen_args.add(a)
actual_args.append(a)
actual_arg_kinds.append(ctx.arg_kinds[i][j])
actual_arg_names.append(ctx.arg_names[i][j])
actual_types.append(ctx.arg_types[i][j])
formal_to_actual = map_actuals_to_formals(
actual_kinds=actual_arg_kinds,
actual_names=actual_arg_names,
formal_kinds=fn_type.arg_kinds,
formal_names=fn_type.arg_names,
actual_arg_type=lambda i: actual_types[i],
)
# We need to remove any type variables that appear only in formals that have
# no actuals, to avoid eagerly binding them in check_call() below.
can_infer_ids = set()
for i, arg_type in enumerate(fn_type.arg_types):
if not formal_to_actual[i]:
continue
can_infer_ids.update({tv.id for tv in get_all_type_vars(arg_type)})
defaulted = fn_type.copy_modified(
arg_kinds=[
(
ArgKind.ARG_OPT
if k == ArgKind.ARG_POS
else (ArgKind.ARG_NAMED_OPT if k == ArgKind.ARG_NAMED else k)
)
for k in fn_type.arg_kinds
],
ret_type=ret_type,
variables=[
tv
for tv in fn_type.variables
# Keep TypeVarTuple/ParamSpec to avoid spurious errors on empty args.
if tv.id in can_infer_ids or not isinstance(tv, TypeVarType)
],
)
if defaulted.line < 0:
# Make up a line number if we don't have one
defaulted.set_line(ctx.default_return_type)
# Create a valid context for various ad-hoc inspections in check_call().
call_expr = CallExpr(
callee=ctx.args[0][0],
args=actual_args,
arg_kinds=actual_arg_kinds,
arg_names=actual_arg_names,
analyzed=ctx.context.analyzed if isinstance(ctx.context, CallExpr) else None,
)
call_expr.set_line(ctx.context)
_, bound = ctx.api.expr_checker.check_call(
callee=defaulted,
args=actual_args,
arg_kinds=actual_arg_kinds,
arg_names=actual_arg_names,
context=call_expr,
)
if not wrapped_return:
# Restore previously ignored context.
ctx.api.type_context[-1] = last_context
bound = get_proper_type(bound)
if not isinstance(bound, CallableType):
return ctx.default_return_type
if wrapped_return:
# Reverse the wrapping we did above.
ret_type = get_proper_type(bound.ret_type)
if not isinstance(ret_type, Instance) or ret_type.type.fullname != PARTIAL:
return ctx.default_return_type
bound = bound.copy_modified(ret_type=ret_type.args[0])
partial_kinds = []
partial_types = []
partial_names = []
# We need to fully apply any positional arguments (they cannot be respecified)
# However, keyword arguments can be respecified, so just give them a default
for i, actuals in enumerate(formal_to_actual):
if len(bound.arg_types) == len(fn_type.arg_types):
arg_type = bound.arg_types[i]
if not mypy.checker.is_valid_inferred_type(arg_type):
arg_type = fn_type.arg_types[i] # bit of a hack
else:
# TODO: I assume that bound and fn_type have the same arguments. It appears this isn't
# true when PEP 646 things are happening. See testFunctoolsPartialTypeVarTuple
arg_type = fn_type.arg_types[i]
if not actuals or fn_type.arg_kinds[i] in (ArgKind.ARG_STAR, ArgKind.ARG_STAR2):
partial_kinds.append(fn_type.arg_kinds[i])
partial_types.append(arg_type)
partial_names.append(fn_type.arg_names[i])
else:
assert actuals
if any(actual_arg_kinds[j] in (ArgKind.ARG_POS, ArgKind.ARG_STAR) for j in actuals):
# Don't add params for arguments passed positionally
continue
# Add defaulted params for arguments passed via keyword
kind = actual_arg_kinds[actuals[0]]
if kind == ArgKind.ARG_NAMED or kind == ArgKind.ARG_STAR2:
kind = ArgKind.ARG_NAMED_OPT
partial_kinds.append(kind)
partial_types.append(arg_type)
partial_names.append(fn_type.arg_names[i])
ret_type = bound.ret_type
if not mypy.checker.is_valid_inferred_type(ret_type):
ret_type = fn_type.ret_type # same kind of hack as above
partially_applied = fn_type.copy_modified(
arg_types=partial_types,
arg_kinds=partial_kinds,
arg_names=partial_names,
ret_type=ret_type,
)
ret = ctx.api.named_generic_type(PARTIAL, [ret_type])
ret = ret.copy_with_extra_attr("__mypy_partial", partially_applied)
return ret
def partial_call_callback(ctx: mypy.plugin.MethodContext) -> Type:
"""Infer a more precise return type for functools.partial.__call__."""
if (
not isinstance(ctx.api, mypy.checker.TypeChecker) # use internals
or not isinstance(ctx.type, Instance)
or ctx.type.type.fullname != PARTIAL
or not ctx.type.extra_attrs
or "__mypy_partial" not in ctx.type.extra_attrs.attrs
):
return ctx.default_return_type
partial_type = ctx.type.extra_attrs.attrs["__mypy_partial"]
if len(ctx.arg_types) != 2: # *args, **kwargs
return ctx.default_return_type
# See comments for similar actual to formal code above
actual_args = []
actual_arg_kinds = []
actual_arg_names = []
seen_args = set()
for i, param in enumerate(ctx.args):
for j, a in enumerate(param):
if a in seen_args:
continue
seen_args.add(a)
actual_args.append(a)
actual_arg_kinds.append(ctx.arg_kinds[i][j])
actual_arg_names.append(ctx.arg_names[i][j])
result = ctx.api.expr_checker.check_call(
callee=partial_type,
args=actual_args,
arg_kinds=actual_arg_kinds,
arg_names=actual_arg_names,
context=ctx.context,
)
return result[0]
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/plugins/functools.py
|
Python
|
NOASSERTION
| 13,074 |
"""
This plugin is helpful for mypy development itself.
By default, it is not enabled for mypy users.
It also can be used by plugin developers as a part of their CI checks.
It finds missing ``get_proper_type()`` call, which can lead to multiple errors.
"""
from __future__ import annotations
from typing import Callable
from mypy.checker import TypeChecker
from mypy.nodes import TypeInfo
from mypy.plugin import FunctionContext, Plugin
from mypy.subtypes import is_proper_subtype
from mypy.types import (
AnyType,
FunctionLike,
Instance,
NoneTyp,
ProperType,
TupleType,
Type,
UnionType,
get_proper_type,
get_proper_types,
)
class ProperTypePlugin(Plugin):
"""
A plugin to ensure that every type is expanded before doing any special-casing.
This solves the problem that we have hundreds of call sites like:
if isinstance(typ, UnionType):
... # special-case union
But after introducing a new type TypeAliasType (and removing immediate expansion)
all these became dangerous because typ may be e.g. an alias to union.
"""
def get_function_hook(self, fullname: str) -> Callable[[FunctionContext], Type] | None:
if fullname == "builtins.isinstance":
return isinstance_proper_hook
if fullname == "mypy.types.get_proper_type":
return proper_type_hook
if fullname == "mypy.types.get_proper_types":
return proper_types_hook
return None
def isinstance_proper_hook(ctx: FunctionContext) -> Type:
if len(ctx.arg_types) != 2 or not ctx.arg_types[1]:
return ctx.default_return_type
right = get_proper_type(ctx.arg_types[1][0])
for arg in ctx.arg_types[0]:
if (
is_improper_type(arg) or isinstance(get_proper_type(arg), AnyType)
) and is_dangerous_target(right):
if is_special_target(right):
return ctx.default_return_type
ctx.api.fail(
"Never apply isinstance() to unexpanded types;"
" use mypy.types.get_proper_type() first",
ctx.context,
)
ctx.api.note( # type: ignore[attr-defined]
"If you pass on the original type"
" after the check, always use its unexpanded version",
ctx.context,
)
return ctx.default_return_type
def is_special_target(right: ProperType) -> bool:
"""Whitelist some special cases for use in isinstance() with improper types."""
if isinstance(right, FunctionLike) and right.is_type_obj():
if right.type_object().fullname == "builtins.tuple":
# Used with Union[Type, Tuple[Type, ...]].
return True
if right.type_object().fullname in (
"mypy.types.Type",
"mypy.types.ProperType",
"mypy.types.TypeAliasType",
):
# Special case: things like assert isinstance(typ, ProperType) are always OK.
return True
if right.type_object().fullname in (
"mypy.types.UnboundType",
"mypy.types.TypeVarLikeType",
"mypy.types.TypeVarType",
"mypy.types.UnpackType",
"mypy.types.TypeVarTupleType",
"mypy.types.ParamSpecType",
"mypy.types.Parameters",
"mypy.types.RawExpressionType",
"mypy.types.EllipsisType",
"mypy.types.StarType",
"mypy.types.TypeList",
"mypy.types.CallableArgument",
"mypy.types.PartialType",
"mypy.types.ErasedType",
"mypy.types.DeletedType",
"mypy.types.RequiredType",
"mypy.types.ReadOnlyType",
):
# Special case: these are not valid targets for a type alias and thus safe.
# TODO: introduce a SyntheticType base to simplify this?
return True
elif isinstance(right, TupleType):
return all(is_special_target(t) for t in get_proper_types(right.items))
return False
def is_improper_type(typ: Type) -> bool:
"""Is this a type that is not a subtype of ProperType?"""
typ = get_proper_type(typ)
if isinstance(typ, Instance):
info = typ.type
return info.has_base("mypy.types.Type") and not info.has_base("mypy.types.ProperType")
if isinstance(typ, UnionType):
return any(is_improper_type(t) for t in typ.items)
return False
def is_dangerous_target(typ: ProperType) -> bool:
"""Is this a dangerous target (right argument) for an isinstance() check?"""
if isinstance(typ, TupleType):
return any(is_dangerous_target(get_proper_type(t)) for t in typ.items)
if isinstance(typ, FunctionLike) and typ.is_type_obj():
return typ.type_object().has_base("mypy.types.Type")
return False
def proper_type_hook(ctx: FunctionContext) -> Type:
"""Check if this get_proper_type() call is not redundant."""
arg_types = ctx.arg_types[0]
if arg_types:
arg_type = get_proper_type(arg_types[0])
proper_type = get_proper_type_instance(ctx)
if is_proper_subtype(arg_type, UnionType.make_union([NoneTyp(), proper_type])):
# Minimize amount of spurious errors from overload machinery.
# TODO: call the hook on the overload as a whole?
if isinstance(arg_type, (UnionType, Instance)):
ctx.api.fail("Redundant call to get_proper_type()", ctx.context)
return ctx.default_return_type
def proper_types_hook(ctx: FunctionContext) -> Type:
"""Check if this get_proper_types() call is not redundant."""
arg_types = ctx.arg_types[0]
if arg_types:
arg_type = arg_types[0]
proper_type = get_proper_type_instance(ctx)
item_type = UnionType.make_union([NoneTyp(), proper_type])
ok_type = ctx.api.named_generic_type("typing.Iterable", [item_type])
if is_proper_subtype(arg_type, ok_type):
ctx.api.fail("Redundant call to get_proper_types()", ctx.context)
return ctx.default_return_type
def get_proper_type_instance(ctx: FunctionContext) -> Instance:
checker = ctx.api
assert isinstance(checker, TypeChecker)
types = checker.modules["mypy.types"]
proper_type_info = types.names["ProperType"]
assert isinstance(proper_type_info.node, TypeInfo)
return Instance(proper_type_info.node, [])
def plugin(version: str) -> type[ProperTypePlugin]:
return ProperTypePlugin
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/plugins/proper_plugin.py
|
Python
|
NOASSERTION
| 6,481 |
from __future__ import annotations
from typing import Final, NamedTuple, Sequence, TypeVar, Union
from typing_extensions import TypeAlias as _TypeAlias
from mypy.messages import format_type
from mypy.nodes import ARG_POS, Argument, Block, ClassDef, Context, SymbolTable, TypeInfo, Var
from mypy.options import Options
from mypy.plugin import CheckerPluginInterface, FunctionContext, MethodContext, MethodSigContext
from mypy.plugins.common import add_method_to_class
from mypy.subtypes import is_subtype
from mypy.types import (
AnyType,
CallableType,
FunctionLike,
Instance,
NoneType,
Overloaded,
Type,
TypeOfAny,
get_proper_type,
)
class SingledispatchTypeVars(NamedTuple):
return_type: Type
fallback: CallableType
class RegisterCallableInfo(NamedTuple):
register_type: Type
singledispatch_obj: Instance
SINGLEDISPATCH_TYPE: Final = "functools._SingleDispatchCallable"
SINGLEDISPATCH_REGISTER_METHOD: Final = f"{SINGLEDISPATCH_TYPE}.register"
SINGLEDISPATCH_CALLABLE_CALL_METHOD: Final = f"{SINGLEDISPATCH_TYPE}.__call__"
def get_singledispatch_info(typ: Instance) -> SingledispatchTypeVars | None:
if len(typ.args) == 2:
return SingledispatchTypeVars(*typ.args) # type: ignore[arg-type]
return None
T = TypeVar("T")
def get_first_arg(args: list[list[T]]) -> T | None:
"""Get the element that corresponds to the first argument passed to the function"""
if args and args[0]:
return args[0][0]
return None
REGISTER_RETURN_CLASS: Final = "_SingleDispatchRegisterCallable"
REGISTER_CALLABLE_CALL_METHOD: Final = f"functools.{REGISTER_RETURN_CLASS}.__call__"
def make_fake_register_class_instance(
api: CheckerPluginInterface, type_args: Sequence[Type]
) -> Instance:
defn = ClassDef(REGISTER_RETURN_CLASS, Block([]))
defn.fullname = f"functools.{REGISTER_RETURN_CLASS}"
info = TypeInfo(SymbolTable(), defn, "functools")
obj_type = api.named_generic_type("builtins.object", []).type
info.bases = [Instance(obj_type, [])]
info.mro = [info, obj_type]
defn.info = info
func_arg = Argument(Var("name"), AnyType(TypeOfAny.implementation_artifact), None, ARG_POS)
add_method_to_class(api, defn, "__call__", [func_arg], NoneType())
return Instance(info, type_args)
PluginContext: _TypeAlias = Union[FunctionContext, MethodContext]
def fail(ctx: PluginContext, msg: str, context: Context | None) -> None:
"""Emit an error message.
This tries to emit an error message at the location specified by `context`, falling back to the
location specified by `ctx.context`. This is helpful when the only context information about
where you want to put the error message may be None (like it is for `CallableType.definition`)
and falling back to the location of the calling function is fine."""
# TODO: figure out if there is some more reliable way of getting context information, so this
# function isn't necessary
if context is not None:
err_context = context
else:
err_context = ctx.context
ctx.api.fail(msg, err_context)
def create_singledispatch_function_callback(ctx: FunctionContext) -> Type:
"""Called for functools.singledispatch"""
func_type = get_proper_type(get_first_arg(ctx.arg_types))
if isinstance(func_type, CallableType):
if len(func_type.arg_kinds) < 1:
fail(
ctx, "Singledispatch function requires at least one argument", func_type.definition
)
return ctx.default_return_type
elif not func_type.arg_kinds[0].is_positional(star=True):
fail(
ctx,
"First argument to singledispatch function must be a positional argument",
func_type.definition,
)
return ctx.default_return_type
# singledispatch returns an instance of functools._SingleDispatchCallable according to
# typeshed
singledispatch_obj = get_proper_type(ctx.default_return_type)
assert isinstance(singledispatch_obj, Instance)
singledispatch_obj.args += (func_type,)
return ctx.default_return_type
def singledispatch_register_callback(ctx: MethodContext) -> Type:
"""Called for functools._SingleDispatchCallable.register"""
assert isinstance(ctx.type, Instance)
# TODO: check that there's only one argument
first_arg_type = get_proper_type(get_first_arg(ctx.arg_types))
if isinstance(first_arg_type, (CallableType, Overloaded)) and first_arg_type.is_type_obj():
# HACK: We received a class as an argument to register. We need to be able
# to access the function that register is being applied to, and the typeshed definition
# of register has it return a generic Callable, so we create a new
# SingleDispatchRegisterCallable class, define a __call__ method, and then add a
# plugin hook for that.
# is_subtype doesn't work when the right type is Overloaded, so we need the
# actual type
register_type = first_arg_type.items[0].ret_type
type_args = RegisterCallableInfo(register_type, ctx.type)
register_callable = make_fake_register_class_instance(ctx.api, type_args)
return register_callable
elif isinstance(first_arg_type, CallableType):
# TODO: do more checking for registered functions
register_function(ctx, ctx.type, first_arg_type, ctx.api.options)
# The typeshed stubs for register say that the function returned is Callable[..., T], even
# though the function returned is the same as the one passed in. We return the type of the
# function so that mypy can properly type check cases where the registered function is used
# directly (instead of through singledispatch)
return first_arg_type
# fallback in case we don't recognize the arguments
return ctx.default_return_type
def register_function(
ctx: PluginContext,
singledispatch_obj: Instance,
func: Type,
options: Options,
register_arg: Type | None = None,
) -> None:
"""Register a function"""
func = get_proper_type(func)
if not isinstance(func, CallableType):
return
metadata = get_singledispatch_info(singledispatch_obj)
if metadata is None:
# if we never added the fallback to the type variables, we already reported an error, so
# just don't do anything here
return
dispatch_type = get_dispatch_type(func, register_arg)
if dispatch_type is None:
# TODO: report an error here that singledispatch requires at least one argument
# (might want to do the error reporting in get_dispatch_type)
return
fallback = metadata.fallback
fallback_dispatch_type = fallback.arg_types[0]
if not is_subtype(dispatch_type, fallback_dispatch_type):
fail(
ctx,
"Dispatch type {} must be subtype of fallback function first argument {}".format(
format_type(dispatch_type, options), format_type(fallback_dispatch_type, options)
),
func.definition,
)
return
return
def get_dispatch_type(func: CallableType, register_arg: Type | None) -> Type | None:
if register_arg is not None:
return register_arg
if func.arg_types:
return func.arg_types[0]
return None
def call_singledispatch_function_after_register_argument(ctx: MethodContext) -> Type:
"""Called on the function after passing a type to register"""
register_callable = ctx.type
if isinstance(register_callable, Instance):
type_args = RegisterCallableInfo(*register_callable.args) # type: ignore[arg-type]
func = get_first_arg(ctx.arg_types)
if func is not None:
register_function(
ctx, type_args.singledispatch_obj, func, ctx.api.options, type_args.register_type
)
# see call to register_function in the callback for register
return func
return ctx.default_return_type
def call_singledispatch_function_callback(ctx: MethodSigContext) -> FunctionLike:
"""Called for functools._SingleDispatchCallable.__call__"""
if not isinstance(ctx.type, Instance):
return ctx.default_signature
metadata = get_singledispatch_info(ctx.type)
if metadata is None:
return ctx.default_signature
return metadata.fallback
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/plugins/singledispatch.py
|
Python
|
NOASSERTION
| 8,446 |
from __future__ import annotations
"""Utilities to find the site and prefix information of a Python executable.
This file MUST remain compatible with all Python 3.8+ versions. Since we cannot make any
assumptions about the Python being executed, this module should not use *any* dependencies outside
of the standard library found in Python 3.8. This file is run each mypy run, so it should be kept
as fast as possible.
"""
import sys
if __name__ == "__main__":
# HACK: We don't want to pick up mypy.types as the top-level types
# module. This could happen if this file is run as a script.
# This workaround fixes this for Python versions before 3.11.
if sys.version_info < (3, 11):
old_sys_path = sys.path
sys.path = sys.path[1:]
import types # noqa: F401
sys.path = old_sys_path
import os
import site
import sysconfig
def getsitepackages() -> list[str]:
res = []
if hasattr(site, "getsitepackages"):
res.extend(site.getsitepackages())
if hasattr(site, "getusersitepackages") and site.ENABLE_USER_SITE:
res.insert(0, site.getusersitepackages())
else:
res = [sysconfig.get_paths()["purelib"]]
return res
def getsyspath() -> list[str]:
# Do not include things from the standard library
# because those should come from typeshed.
stdlib_zip = os.path.join(
sys.base_exec_prefix,
getattr(sys, "platlibdir", "lib"),
f"python{sys.version_info.major}{sys.version_info.minor}.zip",
)
stdlib = sysconfig.get_path("stdlib")
stdlib_ext = os.path.join(stdlib, "lib-dynload")
excludes = {stdlib_zip, stdlib, stdlib_ext}
# Drop the first entry of sys.path
# - If pyinfo.py is executed as a script (in a subprocess), this is the directory
# containing pyinfo.py
# - Otherwise, if mypy launched via console script, this is the directory of the script
# - Otherwise, if mypy launched via python -m mypy, this is the current directory
# In all these cases, it is desirable to drop the first entry
# Note that mypy adds the cwd to SearchPaths.python_path, so we still find things on the
# cwd consistently (the return value here sets SearchPaths.package_path)
# Python 3.11 adds a "safe_path" flag wherein Python won't automatically prepend
# anything to sys.path. In this case, the first entry of sys.path is no longer special.
offset = 0 if sys.version_info >= (3, 11) and sys.flags.safe_path else 1
abs_sys_path = (os.path.abspath(p) for p in sys.path[offset:])
return [p for p in abs_sys_path if p not in excludes]
def getsearchdirs() -> tuple[list[str], list[str]]:
return (getsyspath(), getsitepackages())
if __name__ == "__main__":
sys.stdout.reconfigure(encoding="utf-8") # type: ignore[union-attr]
if sys.argv[-1] == "getsearchdirs":
print(repr(getsearchdirs()))
else:
print("ERROR: incorrect argument to pyinfo.py.", file=sys.stderr)
sys.exit(1)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/pyinfo.py
|
Python
|
NOASSERTION
| 3,014 |
"""Utilities related to determining the reachability of code (in semantic analysis)."""
from __future__ import annotations
from typing import Final, Tuple, TypeVar
from mypy.literals import literal
from mypy.nodes import (
LITERAL_YES,
AssertStmt,
Block,
CallExpr,
ComparisonExpr,
Expression,
FuncDef,
IfStmt,
Import,
ImportAll,
ImportFrom,
IndexExpr,
IntExpr,
MatchStmt,
MemberExpr,
NameExpr,
OpExpr,
SliceExpr,
StrExpr,
TupleExpr,
UnaryExpr,
)
from mypy.options import Options
from mypy.patterns import AsPattern, OrPattern, Pattern
from mypy.traverser import TraverserVisitor
# Inferred truth value of an expression.
ALWAYS_TRUE: Final = 1
MYPY_TRUE: Final = 2 # True in mypy, False at runtime
ALWAYS_FALSE: Final = 3
MYPY_FALSE: Final = 4 # False in mypy, True at runtime
TRUTH_VALUE_UNKNOWN: Final = 5
inverted_truth_mapping: Final = {
ALWAYS_TRUE: ALWAYS_FALSE,
ALWAYS_FALSE: ALWAYS_TRUE,
TRUTH_VALUE_UNKNOWN: TRUTH_VALUE_UNKNOWN,
MYPY_TRUE: MYPY_FALSE,
MYPY_FALSE: MYPY_TRUE,
}
reverse_op: Final = {"==": "==", "!=": "!=", "<": ">", ">": "<", "<=": ">=", ">=": "<="}
def infer_reachability_of_if_statement(s: IfStmt, options: Options) -> None:
for i in range(len(s.expr)):
result = infer_condition_value(s.expr[i], options)
if result in (ALWAYS_FALSE, MYPY_FALSE):
# The condition is considered always false, so we skip the if/elif body.
mark_block_unreachable(s.body[i])
elif result in (ALWAYS_TRUE, MYPY_TRUE):
# This condition is considered always true, so all of the remaining
# elif/else bodies should not be checked.
if result == MYPY_TRUE:
# This condition is false at runtime; this will affect
# import priorities.
mark_block_mypy_only(s.body[i])
for body in s.body[i + 1 :]:
mark_block_unreachable(body)
# Make sure else body always exists and is marked as
# unreachable so the type checker always knows that
# all control flow paths will flow through the if
# statement body.
if not s.else_body:
s.else_body = Block([])
mark_block_unreachable(s.else_body)
break
def infer_reachability_of_match_statement(s: MatchStmt, options: Options) -> None:
for i, guard in enumerate(s.guards):
pattern_value = infer_pattern_value(s.patterns[i])
if guard is not None:
guard_value = infer_condition_value(guard, options)
else:
guard_value = ALWAYS_TRUE
if pattern_value in (ALWAYS_FALSE, MYPY_FALSE) or guard_value in (
ALWAYS_FALSE,
MYPY_FALSE,
):
# The case is considered always false, so we skip the case body.
mark_block_unreachable(s.bodies[i])
elif pattern_value in (ALWAYS_FALSE, MYPY_TRUE) and guard_value in (
ALWAYS_TRUE,
MYPY_TRUE,
):
for body in s.bodies[i + 1 :]:
mark_block_unreachable(body)
if guard_value == MYPY_TRUE:
# This condition is false at runtime; this will affect
# import priorities.
mark_block_mypy_only(s.bodies[i])
def assert_will_always_fail(s: AssertStmt, options: Options) -> bool:
return infer_condition_value(s.expr, options) in (ALWAYS_FALSE, MYPY_FALSE)
def infer_condition_value(expr: Expression, options: Options) -> int:
"""Infer whether the given condition is always true/false.
Return ALWAYS_TRUE if always true, ALWAYS_FALSE if always false,
MYPY_TRUE if true under mypy and false at runtime, MYPY_FALSE if
false under mypy and true at runtime, else TRUTH_VALUE_UNKNOWN.
"""
pyversion = options.python_version
name = ""
negated = False
alias = expr
if isinstance(alias, UnaryExpr):
if alias.op == "not":
expr = alias.expr
negated = True
result = TRUTH_VALUE_UNKNOWN
if isinstance(expr, NameExpr):
name = expr.name
elif isinstance(expr, MemberExpr):
name = expr.name
elif isinstance(expr, OpExpr) and expr.op in ("and", "or"):
left = infer_condition_value(expr.left, options)
if (left in (ALWAYS_TRUE, MYPY_TRUE) and expr.op == "and") or (
left in (ALWAYS_FALSE, MYPY_FALSE) and expr.op == "or"
):
# Either `True and <other>` or `False or <other>`: the result will
# always be the right-hand-side.
return infer_condition_value(expr.right, options)
else:
# The result will always be the left-hand-side (e.g. ALWAYS_* or
# TRUTH_VALUE_UNKNOWN).
return left
else:
result = consider_sys_version_info(expr, pyversion)
if result == TRUTH_VALUE_UNKNOWN:
result = consider_sys_platform(expr, options.platform)
if result == TRUTH_VALUE_UNKNOWN:
if name == "PY2":
result = ALWAYS_FALSE
elif name == "PY3":
result = ALWAYS_TRUE
elif name == "MYPY" or name == "TYPE_CHECKING":
result = MYPY_TRUE
elif name in options.always_true:
result = ALWAYS_TRUE
elif name in options.always_false:
result = ALWAYS_FALSE
if negated:
result = inverted_truth_mapping[result]
return result
def infer_pattern_value(pattern: Pattern) -> int:
if isinstance(pattern, AsPattern) and pattern.pattern is None:
return ALWAYS_TRUE
elif isinstance(pattern, OrPattern) and any(
infer_pattern_value(p) == ALWAYS_TRUE for p in pattern.patterns
):
return ALWAYS_TRUE
else:
return TRUTH_VALUE_UNKNOWN
def consider_sys_version_info(expr: Expression, pyversion: tuple[int, ...]) -> int:
"""Consider whether expr is a comparison involving sys.version_info.
Return ALWAYS_TRUE, ALWAYS_FALSE, or TRUTH_VALUE_UNKNOWN.
"""
# Cases supported:
# - sys.version_info[<int>] <compare_op> <int>
# - sys.version_info[:<int>] <compare_op> <tuple_of_n_ints>
# - sys.version_info <compare_op> <tuple_of_1_or_2_ints>
# (in this case <compare_op> must be >, >=, <, <=, but cannot be ==, !=)
if not isinstance(expr, ComparisonExpr):
return TRUTH_VALUE_UNKNOWN
# Let's not yet support chained comparisons.
if len(expr.operators) > 1:
return TRUTH_VALUE_UNKNOWN
op = expr.operators[0]
if op not in ("==", "!=", "<=", ">=", "<", ">"):
return TRUTH_VALUE_UNKNOWN
index = contains_sys_version_info(expr.operands[0])
thing = contains_int_or_tuple_of_ints(expr.operands[1])
if index is None or thing is None:
index = contains_sys_version_info(expr.operands[1])
thing = contains_int_or_tuple_of_ints(expr.operands[0])
op = reverse_op[op]
if isinstance(index, int) and isinstance(thing, int):
# sys.version_info[i] <compare_op> k
if 0 <= index <= 1:
return fixed_comparison(pyversion[index], op, thing)
else:
return TRUTH_VALUE_UNKNOWN
elif isinstance(index, tuple) and isinstance(thing, tuple):
lo, hi = index
if lo is None:
lo = 0
if hi is None:
hi = 2
if 0 <= lo < hi <= 2:
val = pyversion[lo:hi]
if len(val) == len(thing) or len(val) > len(thing) and op not in ("==", "!="):
return fixed_comparison(val, op, thing)
return TRUTH_VALUE_UNKNOWN
def consider_sys_platform(expr: Expression, platform: str) -> int:
"""Consider whether expr is a comparison involving sys.platform.
Return ALWAYS_TRUE, ALWAYS_FALSE, or TRUTH_VALUE_UNKNOWN.
"""
# Cases supported:
# - sys.platform == 'posix'
# - sys.platform != 'win32'
# - sys.platform.startswith('win')
if isinstance(expr, ComparisonExpr):
# Let's not yet support chained comparisons.
if len(expr.operators) > 1:
return TRUTH_VALUE_UNKNOWN
op = expr.operators[0]
if op not in ("==", "!="):
return TRUTH_VALUE_UNKNOWN
if not is_sys_attr(expr.operands[0], "platform"):
return TRUTH_VALUE_UNKNOWN
right = expr.operands[1]
if not isinstance(right, StrExpr):
return TRUTH_VALUE_UNKNOWN
return fixed_comparison(platform, op, right.value)
elif isinstance(expr, CallExpr):
if not isinstance(expr.callee, MemberExpr):
return TRUTH_VALUE_UNKNOWN
if len(expr.args) != 1 or not isinstance(expr.args[0], StrExpr):
return TRUTH_VALUE_UNKNOWN
if not is_sys_attr(expr.callee.expr, "platform"):
return TRUTH_VALUE_UNKNOWN
if expr.callee.name != "startswith":
return TRUTH_VALUE_UNKNOWN
if platform.startswith(expr.args[0].value):
return ALWAYS_TRUE
else:
return ALWAYS_FALSE
else:
return TRUTH_VALUE_UNKNOWN
Targ = TypeVar("Targ", int, str, Tuple[int, ...])
def fixed_comparison(left: Targ, op: str, right: Targ) -> int:
rmap = {False: ALWAYS_FALSE, True: ALWAYS_TRUE}
if op == "==":
return rmap[left == right]
if op == "!=":
return rmap[left != right]
if op == "<=":
return rmap[left <= right]
if op == ">=":
return rmap[left >= right]
if op == "<":
return rmap[left < right]
if op == ">":
return rmap[left > right]
return TRUTH_VALUE_UNKNOWN
def contains_int_or_tuple_of_ints(expr: Expression) -> None | int | tuple[int, ...]:
if isinstance(expr, IntExpr):
return expr.value
if isinstance(expr, TupleExpr):
if literal(expr) == LITERAL_YES:
thing = []
for x in expr.items:
if not isinstance(x, IntExpr):
return None
thing.append(x.value)
return tuple(thing)
return None
def contains_sys_version_info(expr: Expression) -> None | int | tuple[int | None, int | None]:
if is_sys_attr(expr, "version_info"):
return (None, None) # Same as sys.version_info[:]
if isinstance(expr, IndexExpr) and is_sys_attr(expr.base, "version_info"):
index = expr.index
if isinstance(index, IntExpr):
return index.value
if isinstance(index, SliceExpr):
if index.stride is not None:
if not isinstance(index.stride, IntExpr) or index.stride.value != 1:
return None
begin = end = None
if index.begin_index is not None:
if not isinstance(index.begin_index, IntExpr):
return None
begin = index.begin_index.value
if index.end_index is not None:
if not isinstance(index.end_index, IntExpr):
return None
end = index.end_index.value
return (begin, end)
return None
def is_sys_attr(expr: Expression, name: str) -> bool:
# TODO: This currently doesn't work with code like this:
# - import sys as _sys
# - from sys import version_info
if isinstance(expr, MemberExpr) and expr.name == name:
if isinstance(expr.expr, NameExpr) and expr.expr.name == "sys":
# TODO: Guard against a local named sys, etc.
# (Though later passes will still do most checking.)
return True
return False
def mark_block_unreachable(block: Block) -> None:
block.is_unreachable = True
block.accept(MarkImportsUnreachableVisitor())
class MarkImportsUnreachableVisitor(TraverserVisitor):
"""Visitor that flags all imports nested within a node as unreachable."""
def visit_import(self, node: Import) -> None:
node.is_unreachable = True
def visit_import_from(self, node: ImportFrom) -> None:
node.is_unreachable = True
def visit_import_all(self, node: ImportAll) -> None:
node.is_unreachable = True
def mark_block_mypy_only(block: Block) -> None:
block.accept(MarkImportsMypyOnlyVisitor())
class MarkImportsMypyOnlyVisitor(TraverserVisitor):
"""Visitor that sets is_mypy_only (which affects priority)."""
def visit_import(self, node: Import) -> None:
node.is_mypy_only = True
def visit_import_from(self, node: ImportFrom) -> None:
node.is_mypy_only = True
def visit_import_all(self, node: ImportAll) -> None:
node.is_mypy_only = True
def visit_func_def(self, node: FuncDef) -> None:
node.is_mypy_only = True
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/reachability.py
|
Python
|
NOASSERTION
| 12,690 |
"""Find line-level reference information from a mypy AST (undocumented feature)"""
from __future__ import annotations
from mypy.nodes import (
LDEF,
Expression,
FuncDef,
MemberExpr,
MypyFile,
NameExpr,
RefExpr,
SymbolNode,
TypeInfo,
)
from mypy.traverser import TraverserVisitor
from mypy.typeops import tuple_fallback
from mypy.types import (
FunctionLike,
Instance,
TupleType,
Type,
TypeType,
TypeVarLikeType,
get_proper_type,
)
class RefInfoVisitor(TraverserVisitor):
def __init__(self, type_map: dict[Expression, Type]) -> None:
super().__init__()
self.type_map = type_map
self.data: list[dict[str, object]] = []
def visit_name_expr(self, expr: NameExpr) -> None:
super().visit_name_expr(expr)
self.record_ref_expr(expr)
def visit_member_expr(self, expr: MemberExpr) -> None:
super().visit_member_expr(expr)
self.record_ref_expr(expr)
def visit_func_def(self, func: FuncDef) -> None:
if func.expanded:
for item in func.expanded:
if isinstance(item, FuncDef):
super().visit_func_def(item)
else:
super().visit_func_def(func)
def record_ref_expr(self, expr: RefExpr) -> None:
fullname = None
if expr.kind != LDEF and "." in expr.fullname:
fullname = expr.fullname
elif isinstance(expr, MemberExpr):
typ = self.type_map.get(expr.expr)
sym = None
if isinstance(expr.expr, RefExpr):
sym = expr.expr.node
if typ:
tfn = type_fullname(typ, sym)
if tfn:
fullname = f"{tfn}.{expr.name}"
if not fullname:
fullname = f"*.{expr.name}"
if fullname is not None:
self.data.append({"line": expr.line, "column": expr.column, "target": fullname})
def type_fullname(typ: Type, node: SymbolNode | None = None) -> str | None:
typ = get_proper_type(typ)
if isinstance(typ, Instance):
return typ.type.fullname
elif isinstance(typ, TypeType):
return type_fullname(typ.item)
elif isinstance(typ, FunctionLike) and typ.is_type_obj():
if isinstance(node, TypeInfo):
return node.fullname
return type_fullname(typ.fallback)
elif isinstance(typ, TupleType):
return type_fullname(tuple_fallback(typ))
elif isinstance(typ, TypeVarLikeType):
return type_fullname(typ.upper_bound)
return None
def get_undocumented_ref_info_json(
tree: MypyFile, type_map: dict[Expression, Type]
) -> list[dict[str, object]]:
visitor = RefInfoVisitor(type_map)
tree.accept(visitor)
return visitor.data
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/refinfo.py
|
Python
|
NOASSERTION
| 2,784 |
from __future__ import annotations
from contextlib import contextmanager
from typing import Final, Iterator
from mypy.nodes import (
AssignmentStmt,
Block,
BreakStmt,
ClassDef,
ContinueStmt,
ForStmt,
FuncDef,
Import,
ImportAll,
ImportFrom,
IndexExpr,
ListExpr,
Lvalue,
MatchStmt,
MemberExpr,
MypyFile,
NameExpr,
StarExpr,
TryStmt,
TupleExpr,
WhileStmt,
WithStmt,
)
from mypy.patterns import AsPattern
from mypy.traverser import TraverserVisitor
# Scope kinds
FILE: Final = 0
FUNCTION: Final = 1
CLASS: Final = 2
class VariableRenameVisitor(TraverserVisitor):
"""Rename variables to allow redefinition of variables.
For example, consider this code:
x = 0
f(x)
x = "a"
g(x)
It will be transformed like this:
x' = 0
f(x')
x = "a"
g(x)
There will be two independent variables (x' and x) that will have separate
inferred types. The publicly exposed variant will get the non-suffixed name.
This is the last definition at module top level and the first definition
(argument) within a function.
Renaming only happens for assignments within the same block. Renaming is
performed before semantic analysis, immediately after parsing.
The implementation performs a rudimentary static analysis. The analysis is
overly conservative to keep things simple.
"""
def __init__(self) -> None:
# Counter for labeling new blocks
self.block_id = 0
# Number of surrounding try statements that disallow variable redefinition
self.disallow_redef_depth = 0
# Number of surrounding loop statements
self.loop_depth = 0
# Map block id to loop depth.
self.block_loop_depth: dict[int, int] = {}
# Stack of block ids being processed.
self.blocks: list[int] = []
# List of scopes; each scope maps short (unqualified) name to block id.
self.var_blocks: list[dict[str, int]] = []
# References to variables that we may need to rename. List of
# scopes; each scope is a mapping from name to list of collections
# of names that refer to the same logical variable.
self.refs: list[dict[str, list[list[NameExpr]]]] = []
# Number of reads of the most recent definition of a variable (per scope)
self.num_reads: list[dict[str, int]] = []
# Kinds of nested scopes (FILE, FUNCTION or CLASS)
self.scope_kinds: list[int] = []
def visit_mypy_file(self, file_node: MypyFile) -> None:
"""Rename variables within a file.
This is the main entry point to this class.
"""
self.clear()
with self.enter_scope(FILE), self.enter_block():
for d in file_node.defs:
d.accept(self)
def visit_func_def(self, fdef: FuncDef) -> None:
# Conservatively do not allow variable defined before a function to
# be redefined later, since function could refer to either definition.
self.reject_redefinition_of_vars_in_scope()
with self.enter_scope(FUNCTION), self.enter_block():
for arg in fdef.arguments:
name = arg.variable.name
# 'self' can't be redefined since it's special as it allows definition of
# attributes. 'cls' can't be used to define attributes so we can ignore it.
can_be_redefined = name != "self" # TODO: Proper check
self.record_assignment(arg.variable.name, can_be_redefined)
self.handle_arg(name)
for stmt in fdef.body.body:
stmt.accept(self)
def visit_class_def(self, cdef: ClassDef) -> None:
self.reject_redefinition_of_vars_in_scope()
with self.enter_scope(CLASS):
super().visit_class_def(cdef)
def visit_block(self, block: Block) -> None:
with self.enter_block():
super().visit_block(block)
def visit_while_stmt(self, stmt: WhileStmt) -> None:
with self.enter_loop():
super().visit_while_stmt(stmt)
def visit_for_stmt(self, stmt: ForStmt) -> None:
stmt.expr.accept(self)
self.analyze_lvalue(stmt.index, True)
# Also analyze as non-lvalue so that every for loop index variable is assumed to be read.
stmt.index.accept(self)
with self.enter_loop():
stmt.body.accept(self)
if stmt.else_body:
stmt.else_body.accept(self)
def visit_break_stmt(self, stmt: BreakStmt) -> None:
self.reject_redefinition_of_vars_in_loop()
def visit_continue_stmt(self, stmt: ContinueStmt) -> None:
self.reject_redefinition_of_vars_in_loop()
def visit_try_stmt(self, stmt: TryStmt) -> None:
# Variables defined by a try statement get special treatment in the
# type checker which allows them to be always redefined, so no need to
# do renaming here.
with self.enter_try():
super().visit_try_stmt(stmt)
def visit_with_stmt(self, stmt: WithStmt) -> None:
for expr in stmt.expr:
expr.accept(self)
for target in stmt.target:
if target is not None:
self.analyze_lvalue(target)
# We allow redefinitions in the body of a with statement for
# convenience. This is unsafe since with statements can affect control
# flow by catching exceptions, but this is rare except for
# assertRaises() and other similar functions, where the exception is
# raised by the last statement in the body, which usually isn't a
# problem.
stmt.body.accept(self)
def visit_import(self, imp: Import) -> None:
for id, as_id in imp.ids:
self.record_assignment(as_id or id, False)
def visit_import_from(self, imp: ImportFrom) -> None:
for id, as_id in imp.names:
self.record_assignment(as_id or id, False)
def visit_assignment_stmt(self, s: AssignmentStmt) -> None:
s.rvalue.accept(self)
for lvalue in s.lvalues:
self.analyze_lvalue(lvalue)
def visit_match_stmt(self, s: MatchStmt) -> None:
s.subject.accept(self)
for i in range(len(s.patterns)):
with self.enter_block():
s.patterns[i].accept(self)
guard = s.guards[i]
if guard is not None:
guard.accept(self)
# We already entered a block, so visit this block's statements directly
for stmt in s.bodies[i].body:
stmt.accept(self)
def visit_capture_pattern(self, p: AsPattern) -> None:
if p.name is not None:
self.analyze_lvalue(p.name)
def analyze_lvalue(self, lvalue: Lvalue, is_nested: bool = False) -> None:
"""Process assignment; in particular, keep track of (re)defined names.
Args:
is_nested: True for non-outermost Lvalue in a multiple assignment such as
"x, y = ..."
"""
if isinstance(lvalue, NameExpr):
name = lvalue.name
is_new = self.record_assignment(name, True)
if is_new:
self.handle_def(lvalue)
else:
self.handle_refine(lvalue)
if is_nested:
# This allows these to be redefined freely even if never read. Multiple
# assignment like "x, _ _ = y" defines dummy variables that are never read.
self.handle_ref(lvalue)
elif isinstance(lvalue, (ListExpr, TupleExpr)):
for item in lvalue.items:
self.analyze_lvalue(item, is_nested=True)
elif isinstance(lvalue, MemberExpr):
lvalue.expr.accept(self)
elif isinstance(lvalue, IndexExpr):
lvalue.base.accept(self)
lvalue.index.accept(self)
elif isinstance(lvalue, StarExpr):
# Propagate is_nested since in a typical use case like "x, *rest = ..." 'rest' may
# be freely reused.
self.analyze_lvalue(lvalue.expr, is_nested=is_nested)
def visit_name_expr(self, expr: NameExpr) -> None:
self.handle_ref(expr)
# Helpers for renaming references
def handle_arg(self, name: str) -> None:
"""Store function argument."""
self.refs[-1][name] = [[]]
self.num_reads[-1][name] = 0
def handle_def(self, expr: NameExpr) -> None:
"""Store new name definition."""
name = expr.name
names = self.refs[-1].setdefault(name, [])
names.append([expr])
self.num_reads[-1][name] = 0
def handle_refine(self, expr: NameExpr) -> None:
"""Store assignment to an existing name (that replaces previous value, if any)."""
name = expr.name
if name in self.refs[-1]:
names = self.refs[-1][name]
if not names:
names.append([])
names[-1].append(expr)
def handle_ref(self, expr: NameExpr) -> None:
"""Store reference to defined name."""
name = expr.name
if name in self.refs[-1]:
names = self.refs[-1][name]
if not names:
names.append([])
names[-1].append(expr)
num_reads = self.num_reads[-1]
num_reads[name] = num_reads.get(name, 0) + 1
def flush_refs(self) -> None:
"""Rename all references within the current scope.
This will be called at the end of a scope.
"""
is_func = self.scope_kinds[-1] == FUNCTION
for refs in self.refs[-1].values():
if len(refs) == 1:
# Only one definition -- no renaming needed.
continue
if is_func:
# In a function, don't rename the first definition, as it
# may be an argument that must preserve the name.
to_rename = refs[1:]
else:
# At module top level, don't rename the final definition,
# as it will be publicly visible outside the module.
to_rename = refs[:-1]
for i, item in enumerate(to_rename):
rename_refs(item, i)
self.refs.pop()
# Helpers for determining which assignments define new variables
def clear(self) -> None:
self.blocks = []
self.var_blocks = []
@contextmanager
def enter_block(self) -> Iterator[None]:
self.block_id += 1
self.blocks.append(self.block_id)
self.block_loop_depth[self.block_id] = self.loop_depth
try:
yield
finally:
self.blocks.pop()
@contextmanager
def enter_try(self) -> Iterator[None]:
self.disallow_redef_depth += 1
try:
yield
finally:
self.disallow_redef_depth -= 1
@contextmanager
def enter_loop(self) -> Iterator[None]:
self.loop_depth += 1
try:
yield
finally:
self.loop_depth -= 1
def current_block(self) -> int:
return self.blocks[-1]
@contextmanager
def enter_scope(self, kind: int) -> Iterator[None]:
self.var_blocks.append({})
self.refs.append({})
self.num_reads.append({})
self.scope_kinds.append(kind)
try:
yield
finally:
self.flush_refs()
self.var_blocks.pop()
self.num_reads.pop()
self.scope_kinds.pop()
def is_nested(self) -> int:
return len(self.var_blocks) > 1
def reject_redefinition_of_vars_in_scope(self) -> None:
"""Make it impossible to redefine defined variables in the current scope.
This is used if we encounter a function definition that
can make it ambiguous which definition is live. Example:
x = 0
def f() -> int:
return x
x = '' # Error -- cannot redefine x across function definition
"""
var_blocks = self.var_blocks[-1]
for key in var_blocks:
var_blocks[key] = -1
def reject_redefinition_of_vars_in_loop(self) -> None:
"""Reject redefinition of variables in the innermost loop.
If there is an early exit from a loop, there may be ambiguity about which
value may escape the loop. Example where this matters:
while f():
x = 0
if g():
break
x = '' # Error -- not a redefinition
reveal_type(x) # int
This method ensures that the second assignment to 'x' doesn't introduce a new
variable.
"""
var_blocks = self.var_blocks[-1]
for key, block in var_blocks.items():
if self.block_loop_depth.get(block) == self.loop_depth:
var_blocks[key] = -1
def record_assignment(self, name: str, can_be_redefined: bool) -> bool:
"""Record assignment to given name and return True if it defines a new variable.
Args:
can_be_redefined: If True, allows assignment in the same block to redefine
this name (if this is a new definition)
"""
if self.num_reads[-1].get(name, -1) == 0:
# Only set, not read, so no reason to redefine
return False
if self.disallow_redef_depth > 0:
# Can't redefine within try/with a block.
can_be_redefined = False
block = self.current_block()
var_blocks = self.var_blocks[-1]
if name not in var_blocks:
# New definition in this scope.
if can_be_redefined:
# Store the block where this was defined to allow redefinition in
# the same block only.
var_blocks[name] = block
else:
# This doesn't support arbitrary redefinition.
var_blocks[name] = -1
return True
elif var_blocks[name] == block:
# Redefinition -- defines a new variable with the same name.
return True
else:
# Assigns to an existing variable.
return False
class LimitedVariableRenameVisitor(TraverserVisitor):
"""Perform some limited variable renaming in with statements.
This allows reusing a variable in multiple with statements with
different types. For example, the two instances of 'x' can have
incompatible types:
with C() as x:
f(x)
with D() as x:
g(x)
The above code gets renamed conceptually into this (not valid Python!):
with C() as x':
f(x')
with D() as x:
g(x)
If there's a reference to a variable defined in 'with' outside the
statement, or if there's any trickiness around variable visibility
(e.g. function definitions), we give up and won't perform renaming.
The main use case is to allow binding both readable and writable
binary files into the same variable. These have different types:
with open(fnam, 'rb') as f: ...
with open(fnam, 'wb') as f: ...
"""
def __init__(self) -> None:
# Short names of variables bound in with statements using "as"
# in a surrounding scope
self.bound_vars: list[str] = []
# Stack of names that can't be safely renamed, per scope ('*' means that
# no names can be renamed)
self.skipped: list[set[str]] = []
# References to variables that we may need to rename. Stack of
# scopes; each scope is a mapping from name to list of collections
# of names that refer to the same logical variable.
self.refs: list[dict[str, list[list[NameExpr]]]] = []
def visit_mypy_file(self, file_node: MypyFile) -> None:
"""Rename variables within a file.
This is the main entry point to this class.
"""
with self.enter_scope():
for d in file_node.defs:
d.accept(self)
def visit_func_def(self, fdef: FuncDef) -> None:
self.reject_redefinition_of_vars_in_scope()
with self.enter_scope():
for arg in fdef.arguments:
self.record_skipped(arg.variable.name)
super().visit_func_def(fdef)
def visit_class_def(self, cdef: ClassDef) -> None:
self.reject_redefinition_of_vars_in_scope()
with self.enter_scope():
super().visit_class_def(cdef)
def visit_with_stmt(self, stmt: WithStmt) -> None:
for expr in stmt.expr:
expr.accept(self)
old_len = len(self.bound_vars)
for target in stmt.target:
if target is not None:
self.analyze_lvalue(target)
for target in stmt.target:
if target:
target.accept(self)
stmt.body.accept(self)
while len(self.bound_vars) > old_len:
self.bound_vars.pop()
def analyze_lvalue(self, lvalue: Lvalue) -> None:
if isinstance(lvalue, NameExpr):
name = lvalue.name
if name in self.bound_vars:
# Name bound in a surrounding with statement, so it can be renamed
self.visit_name_expr(lvalue)
else:
var_info = self.refs[-1]
if name not in var_info:
var_info[name] = []
var_info[name].append([])
self.bound_vars.append(name)
elif isinstance(lvalue, (ListExpr, TupleExpr)):
for item in lvalue.items:
self.analyze_lvalue(item)
elif isinstance(lvalue, MemberExpr):
lvalue.expr.accept(self)
elif isinstance(lvalue, IndexExpr):
lvalue.base.accept(self)
lvalue.index.accept(self)
elif isinstance(lvalue, StarExpr):
self.analyze_lvalue(lvalue.expr)
def visit_import(self, imp: Import) -> None:
# We don't support renaming imports
for id, as_id in imp.ids:
self.record_skipped(as_id or id)
def visit_import_from(self, imp: ImportFrom) -> None:
# We don't support renaming imports
for id, as_id in imp.names:
self.record_skipped(as_id or id)
def visit_import_all(self, imp: ImportAll) -> None:
# Give up, since we don't know all imported names yet
self.reject_redefinition_of_vars_in_scope()
def visit_name_expr(self, expr: NameExpr) -> None:
name = expr.name
if name in self.bound_vars:
# Record reference so that it can be renamed later
for scope in reversed(self.refs):
if name in scope:
scope[name][-1].append(expr)
else:
self.record_skipped(name)
@contextmanager
def enter_scope(self) -> Iterator[None]:
self.skipped.append(set())
self.refs.append({})
yield None
self.flush_refs()
def reject_redefinition_of_vars_in_scope(self) -> None:
self.record_skipped("*")
def record_skipped(self, name: str) -> None:
self.skipped[-1].add(name)
def flush_refs(self) -> None:
ref_dict = self.refs.pop()
skipped = self.skipped.pop()
if "*" not in skipped:
for name, refs in ref_dict.items():
if len(refs) <= 1 or name in skipped:
continue
# At module top level we must not rename the final definition,
# as it may be publicly visible
to_rename = refs[:-1]
for i, item in enumerate(to_rename):
rename_refs(item, i)
def rename_refs(names: list[NameExpr], index: int) -> None:
name = names[0].name
new_name = name + "'" * (index + 1)
for expr in names:
expr.name = new_name
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/renaming.py
|
Python
|
NOASSERTION
| 19,910 |
"""Classes for producing HTML reports about imprecision."""
from __future__ import annotations
import collections
import itertools
import json
import os
import shutil
import sys
import time
import tokenize
from abc import ABCMeta, abstractmethod
from operator import attrgetter
from typing import Any, Callable, Dict, Final, Iterator, Tuple
from typing_extensions import TypeAlias as _TypeAlias
from urllib.request import pathname2url
from mypy import stats
from mypy.defaults import REPORTER_NAMES
from mypy.nodes import Expression, FuncDef, MypyFile
from mypy.options import Options
from mypy.traverser import TraverserVisitor
from mypy.types import Type, TypeOfAny
from mypy.version import __version__
try:
from lxml import etree # type: ignore[import-untyped]
LXML_INSTALLED = True
except ImportError:
LXML_INSTALLED = False
type_of_any_name_map: Final[collections.OrderedDict[int, str]] = collections.OrderedDict(
[
(TypeOfAny.unannotated, "Unannotated"),
(TypeOfAny.explicit, "Explicit"),
(TypeOfAny.from_unimported_type, "Unimported"),
(TypeOfAny.from_omitted_generics, "Omitted Generics"),
(TypeOfAny.from_error, "Error"),
(TypeOfAny.special_form, "Special Form"),
(TypeOfAny.implementation_artifact, "Implementation Artifact"),
]
)
ReporterClasses: _TypeAlias = Dict[
str, Tuple[Callable[["Reports", str], "AbstractReporter"], bool]
]
reporter_classes: Final[ReporterClasses] = {}
class Reports:
def __init__(self, data_dir: str, report_dirs: dict[str, str]) -> None:
self.data_dir = data_dir
self.reporters: list[AbstractReporter] = []
self.named_reporters: dict[str, AbstractReporter] = {}
for report_type, report_dir in sorted(report_dirs.items()):
self.add_report(report_type, report_dir)
def add_report(self, report_type: str, report_dir: str) -> AbstractReporter:
try:
return self.named_reporters[report_type]
except KeyError:
pass
reporter_cls, needs_lxml = reporter_classes[report_type]
if needs_lxml and not LXML_INSTALLED:
print(
(
"You must install the lxml package before you can run mypy"
" with `--{}-report`.\n"
"You can do this with `python3 -m pip install lxml`."
).format(report_type),
file=sys.stderr,
)
raise ImportError
reporter = reporter_cls(self, report_dir)
self.reporters.append(reporter)
self.named_reporters[report_type] = reporter
return reporter
def file(
self,
tree: MypyFile,
modules: dict[str, MypyFile],
type_map: dict[Expression, Type],
options: Options,
) -> None:
for reporter in self.reporters:
reporter.on_file(tree, modules, type_map, options)
def finish(self) -> None:
for reporter in self.reporters:
reporter.on_finish()
class AbstractReporter(metaclass=ABCMeta):
def __init__(self, reports: Reports, output_dir: str) -> None:
self.output_dir = output_dir
if output_dir != "<memory>":
os.makedirs(output_dir, exist_ok=True)
@abstractmethod
def on_file(
self,
tree: MypyFile,
modules: dict[str, MypyFile],
type_map: dict[Expression, Type],
options: Options,
) -> None:
pass
@abstractmethod
def on_finish(self) -> None:
pass
def register_reporter(
report_name: str,
reporter: Callable[[Reports, str], AbstractReporter],
needs_lxml: bool = False,
) -> None:
reporter_classes[report_name] = (reporter, needs_lxml)
def alias_reporter(source_reporter: str, target_reporter: str) -> None:
reporter_classes[target_reporter] = reporter_classes[source_reporter]
def should_skip_path(path: str) -> bool:
if stats.is_special_module(path):
return True
if path.startswith(".."):
return True
if "stubs" in path.split("/") or "stubs" in path.split(os.sep):
return True
return False
def iterate_python_lines(path: str) -> Iterator[tuple[int, str]]:
"""Return an iterator over (line number, line text) from a Python file."""
try:
with tokenize.open(path) as input_file:
yield from enumerate(input_file, 1)
except IsADirectoryError:
# can happen with namespace packages
pass
class FuncCounterVisitor(TraverserVisitor):
def __init__(self) -> None:
super().__init__()
self.counts = [0, 0]
def visit_func_def(self, defn: FuncDef) -> None:
self.counts[defn.type is not None] += 1
class LineCountReporter(AbstractReporter):
def __init__(self, reports: Reports, output_dir: str) -> None:
super().__init__(reports, output_dir)
self.counts: dict[str, tuple[int, int, int, int]] = {}
def on_file(
self,
tree: MypyFile,
modules: dict[str, MypyFile],
type_map: dict[Expression, Type],
options: Options,
) -> None:
# Count physical lines. This assumes the file's encoding is a
# superset of ASCII (or at least uses \n in its line endings).
try:
with open(tree.path, "rb") as f:
physical_lines = len(f.readlines())
except IsADirectoryError:
# can happen with namespace packages
physical_lines = 0
func_counter = FuncCounterVisitor()
tree.accept(func_counter)
unannotated_funcs, annotated_funcs = func_counter.counts
total_funcs = annotated_funcs + unannotated_funcs
# Don't count lines or functions as annotated if they have their errors ignored.
if options.ignore_errors:
annotated_funcs = 0
imputed_annotated_lines = (
physical_lines * annotated_funcs // total_funcs if total_funcs else physical_lines
)
self.counts[tree._fullname] = (
imputed_annotated_lines,
physical_lines,
annotated_funcs,
total_funcs,
)
def on_finish(self) -> None:
counts: list[tuple[tuple[int, int, int, int], str]] = sorted(
((c, p) for p, c in self.counts.items()), reverse=True
)
total_counts = tuple(sum(c[i] for c, p in counts) for i in range(4))
with open(os.path.join(self.output_dir, "linecount.txt"), "w") as f:
f.write("{:7} {:7} {:6} {:6} total\n".format(*total_counts))
for c, p in counts:
f.write(f"{c[0]:7} {c[1]:7} {c[2]:6} {c[3]:6} {p}\n")
register_reporter("linecount", LineCountReporter)
class AnyExpressionsReporter(AbstractReporter):
"""Report frequencies of different kinds of Any types."""
def __init__(self, reports: Reports, output_dir: str) -> None:
super().__init__(reports, output_dir)
self.counts: dict[str, tuple[int, int]] = {}
self.any_types_counter: dict[str, collections.Counter[int]] = {}
def on_file(
self,
tree: MypyFile,
modules: dict[str, MypyFile],
type_map: dict[Expression, Type],
options: Options,
) -> None:
visitor = stats.StatisticsVisitor(
inferred=True,
filename=tree.fullname,
modules=modules,
typemap=type_map,
all_nodes=True,
visit_untyped_defs=False,
)
tree.accept(visitor)
self.any_types_counter[tree.fullname] = visitor.type_of_any_counter
num_unanalyzed_lines = list(visitor.line_map.values()).count(stats.TYPE_UNANALYZED)
# count each line of dead code as one expression of type "Any"
num_any = visitor.num_any_exprs + num_unanalyzed_lines
num_total = visitor.num_imprecise_exprs + visitor.num_precise_exprs + num_any
if num_total > 0:
self.counts[tree.fullname] = (num_any, num_total)
def on_finish(self) -> None:
self._report_any_exprs()
self._report_types_of_anys()
def _write_out_report(
self, filename: str, header: list[str], rows: list[list[str]], footer: list[str]
) -> None:
row_len = len(header)
assert all(len(row) == row_len for row in rows + [header, footer])
min_column_distance = 3 # minimum distance between numbers in two columns
widths = [-1] * row_len
for row in rows + [header, footer]:
for i, value in enumerate(row):
widths[i] = max(widths[i], len(value))
for i, w in enumerate(widths):
# Do not add min_column_distance to the first column.
if i > 0:
widths[i] = w + min_column_distance
with open(os.path.join(self.output_dir, filename), "w") as f:
header_str = ("{:>{}}" * len(widths)).format(*itertools.chain(*zip(header, widths)))
separator = "-" * len(header_str)
f.write(header_str + "\n")
f.write(separator + "\n")
for row_values in rows:
r = ("{:>{}}" * len(widths)).format(*itertools.chain(*zip(row_values, widths)))
f.write(r + "\n")
f.write(separator + "\n")
footer_str = ("{:>{}}" * len(widths)).format(*itertools.chain(*zip(footer, widths)))
f.write(footer_str + "\n")
def _report_any_exprs(self) -> None:
total_any = sum(num_any for num_any, _ in self.counts.values())
total_expr = sum(total for _, total in self.counts.values())
total_coverage = 100.0
if total_expr > 0:
total_coverage = (float(total_expr - total_any) / float(total_expr)) * 100
column_names = ["Name", "Anys", "Exprs", "Coverage"]
rows: list[list[str]] = []
for filename in sorted(self.counts):
(num_any, num_total) = self.counts[filename]
coverage = (float(num_total - num_any) / float(num_total)) * 100
coverage_str = f"{coverage:.2f}%"
rows.append([filename, str(num_any), str(num_total), coverage_str])
rows.sort(key=lambda x: x[0])
total_row = ["Total", str(total_any), str(total_expr), f"{total_coverage:.2f}%"]
self._write_out_report("any-exprs.txt", column_names, rows, total_row)
def _report_types_of_anys(self) -> None:
total_counter: collections.Counter[int] = collections.Counter()
for counter in self.any_types_counter.values():
for any_type, value in counter.items():
total_counter[any_type] += value
file_column_name = "Name"
total_row_name = "Total"
column_names = [file_column_name] + list(type_of_any_name_map.values())
rows: list[list[str]] = []
for filename, counter in self.any_types_counter.items():
rows.append([filename] + [str(counter[typ]) for typ in type_of_any_name_map])
rows.sort(key=lambda x: x[0])
total_row = [total_row_name] + [str(total_counter[typ]) for typ in type_of_any_name_map]
self._write_out_report("types-of-anys.txt", column_names, rows, total_row)
register_reporter("any-exprs", AnyExpressionsReporter)
class LineCoverageVisitor(TraverserVisitor):
def __init__(self, source: list[str]) -> None:
self.source = source
# For each line of source, we maintain a pair of
# * the indentation level of the surrounding function
# (-1 if not inside a function), and
# * whether the surrounding function is typed.
# Initially, everything is covered at indentation level -1.
self.lines_covered = [(-1, True) for l in source]
# The Python AST has position information for the starts of
# elements, but not for their ends. Fortunately the
# indentation-based syntax makes it pretty easy to find where a
# block ends without doing any real parsing.
# TODO: Handle line continuations (explicit and implicit) and
# multi-line string literals. (But at least line continuations
# are normally more indented than their surrounding block anyways,
# by PEP 8.)
def indentation_level(self, line_number: int) -> int | None:
"""Return the indentation of a line of the source (specified by
zero-indexed line number). Returns None for blank lines or comments."""
line = self.source[line_number]
indent = 0
for char in list(line):
if char == " ":
indent += 1
elif char == "\t":
indent = 8 * ((indent + 8) // 8)
elif char == "#":
# Line is a comment; ignore it
return None
elif char == "\n":
# Line is entirely whitespace; ignore it
return None
# TODO line continuation (\)
else:
# Found a non-whitespace character
return indent
# Line is entirely whitespace, and at end of file
# with no trailing newline; ignore it
return None
def visit_func_def(self, defn: FuncDef) -> None:
start_line = defn.line - 1
start_indent = None
# When a function is decorated, sometimes the start line will point to
# whitespace or comments between the decorator and the function, so
# we have to look for the start.
while start_line < len(self.source):
start_indent = self.indentation_level(start_line)
if start_indent is not None:
break
start_line += 1
# If we can't find the function give up and don't annotate anything.
# Our line numbers are not reliable enough to be asserting on.
if start_indent is None:
return
cur_line = start_line + 1
end_line = cur_line
# After this loop, function body will be lines [start_line, end_line)
while cur_line < len(self.source):
cur_indent = self.indentation_level(cur_line)
if cur_indent is None:
# Consume the line, but don't mark it as belonging to the function yet.
cur_line += 1
elif cur_indent > start_indent:
# A non-blank line that belongs to the function.
cur_line += 1
end_line = cur_line
else:
# We reached a line outside the function definition.
break
is_typed = defn.type is not None
for line in range(start_line, end_line):
old_indent, _ = self.lines_covered[line]
# If there was an old indent level for this line, and the new
# level isn't increasing the indentation, ignore it.
# This is to be defensive against funniness in our line numbers,
# which are not always reliable.
if old_indent <= start_indent:
self.lines_covered[line] = (start_indent, is_typed)
# Visit the body, in case there are nested functions
super().visit_func_def(defn)
class LineCoverageReporter(AbstractReporter):
"""Exact line coverage reporter.
This reporter writes a JSON dictionary with one field 'lines' to
the file 'coverage.json' in the specified report directory. The
value of that field is a dictionary which associates to each
source file's absolute pathname the list of line numbers that
belong to typed functions in that file.
"""
def __init__(self, reports: Reports, output_dir: str) -> None:
super().__init__(reports, output_dir)
self.lines_covered: dict[str, list[int]] = {}
def on_file(
self,
tree: MypyFile,
modules: dict[str, MypyFile],
type_map: dict[Expression, Type],
options: Options,
) -> None:
with open(tree.path) as f:
tree_source = f.readlines()
coverage_visitor = LineCoverageVisitor(tree_source)
tree.accept(coverage_visitor)
covered_lines = []
for line_number, (_, typed) in enumerate(coverage_visitor.lines_covered):
if typed:
covered_lines.append(line_number + 1)
self.lines_covered[os.path.abspath(tree.path)] = covered_lines
def on_finish(self) -> None:
with open(os.path.join(self.output_dir, "coverage.json"), "w") as f:
json.dump({"lines": self.lines_covered}, f)
register_reporter("linecoverage", LineCoverageReporter)
class FileInfo:
def __init__(self, name: str, module: str) -> None:
self.name = name
self.module = module
self.counts = [0] * len(stats.precision_names)
def total(self) -> int:
return sum(self.counts)
def attrib(self) -> dict[str, str]:
return {name: str(val) for name, val in sorted(zip(stats.precision_names, self.counts))}
class MemoryXmlReporter(AbstractReporter):
"""Internal reporter that generates XML in memory.
This is used by all other XML-based reporters to avoid duplication.
"""
def __init__(self, reports: Reports, output_dir: str) -> None:
super().__init__(reports, output_dir)
self.xslt_html_path = os.path.join(reports.data_dir, "xml", "mypy-html.xslt")
self.xslt_txt_path = os.path.join(reports.data_dir, "xml", "mypy-txt.xslt")
self.css_html_path = os.path.join(reports.data_dir, "xml", "mypy-html.css")
xsd_path = os.path.join(reports.data_dir, "xml", "mypy.xsd")
self.schema = etree.XMLSchema(etree.parse(xsd_path))
self.last_xml: Any | None = None
self.files: list[FileInfo] = []
# XML doesn't like control characters, but they are sometimes
# legal in source code (e.g. comments, string literals).
# Tabs (#x09) are allowed in XML content.
control_fixer: Final = str.maketrans("".join(chr(i) for i in range(32) if i != 9), "?" * 31)
def on_file(
self,
tree: MypyFile,
modules: dict[str, MypyFile],
type_map: dict[Expression, Type],
options: Options,
) -> None:
self.last_xml = None
try:
path = os.path.relpath(tree.path)
except ValueError:
return
if should_skip_path(path) or os.path.isdir(path):
return # `path` can sometimes be a directory, see #11334
visitor = stats.StatisticsVisitor(
inferred=True,
filename=tree.fullname,
modules=modules,
typemap=type_map,
all_nodes=True,
)
tree.accept(visitor)
root = etree.Element("mypy-report-file", name=path, module=tree._fullname)
doc = etree.ElementTree(root)
file_info = FileInfo(path, tree._fullname)
for lineno, line_text in iterate_python_lines(path):
status = visitor.line_map.get(lineno, stats.TYPE_EMPTY)
file_info.counts[status] += 1
etree.SubElement(
root,
"line",
any_info=self._get_any_info_for_line(visitor, lineno),
content=line_text.rstrip("\n").translate(self.control_fixer),
number=str(lineno),
precision=stats.precision_names[status],
)
# Assumes a layout similar to what XmlReporter uses.
xslt_path = os.path.relpath("mypy-html.xslt", path)
transform_pi = etree.ProcessingInstruction(
"xml-stylesheet", f'type="text/xsl" href="{pathname2url(xslt_path)}"'
)
root.addprevious(transform_pi)
self.schema.assertValid(doc)
self.last_xml = doc
self.files.append(file_info)
@staticmethod
def _get_any_info_for_line(visitor: stats.StatisticsVisitor, lineno: int) -> str:
if lineno in visitor.any_line_map:
result = "Any Types on this line: "
counter: collections.Counter[int] = collections.Counter()
for typ in visitor.any_line_map[lineno]:
counter[typ.type_of_any] += 1
for any_type, occurrences in counter.items():
result += f"\n{type_of_any_name_map[any_type]} (x{occurrences})"
return result
else:
return "No Anys on this line!"
def on_finish(self) -> None:
self.last_xml = None
# index_path = os.path.join(self.output_dir, 'index.xml')
output_files = sorted(self.files, key=lambda x: x.module)
root = etree.Element("mypy-report-index", name="index")
doc = etree.ElementTree(root)
for file_info in output_files:
etree.SubElement(
root,
"file",
file_info.attrib(),
module=file_info.module,
name=pathname2url(file_info.name),
total=str(file_info.total()),
)
xslt_path = os.path.relpath("mypy-html.xslt", ".")
transform_pi = etree.ProcessingInstruction(
"xml-stylesheet", f'type="text/xsl" href="{pathname2url(xslt_path)}"'
)
root.addprevious(transform_pi)
self.schema.assertValid(doc)
self.last_xml = doc
register_reporter("memory-xml", MemoryXmlReporter, needs_lxml=True)
def get_line_rate(covered_lines: int, total_lines: int) -> str:
if total_lines == 0:
return str(1.0)
else:
return f"{covered_lines / total_lines:.4f}"
class CoberturaPackage:
"""Container for XML and statistics mapping python modules to Cobertura package."""
def __init__(self, name: str) -> None:
self.name = name
self.classes: dict[str, Any] = {}
self.packages: dict[str, CoberturaPackage] = {}
self.total_lines = 0
self.covered_lines = 0
def as_xml(self) -> Any:
package_element = etree.Element("package", complexity="1.0", name=self.name)
package_element.attrib["branch-rate"] = "0"
package_element.attrib["line-rate"] = get_line_rate(self.covered_lines, self.total_lines)
classes_element = etree.SubElement(package_element, "classes")
for class_name in sorted(self.classes):
classes_element.append(self.classes[class_name])
self.add_packages(package_element)
return package_element
def add_packages(self, parent_element: Any) -> None:
if self.packages:
packages_element = etree.SubElement(parent_element, "packages")
for package in sorted(self.packages.values(), key=attrgetter("name")):
packages_element.append(package.as_xml())
class CoberturaXmlReporter(AbstractReporter):
"""Reporter for generating Cobertura compliant XML."""
def __init__(self, reports: Reports, output_dir: str) -> None:
super().__init__(reports, output_dir)
self.root = etree.Element("coverage", timestamp=str(int(time.time())), version=__version__)
self.doc = etree.ElementTree(self.root)
self.root_package = CoberturaPackage(".")
def on_file(
self,
tree: MypyFile,
modules: dict[str, MypyFile],
type_map: dict[Expression, Type],
options: Options,
) -> None:
path = os.path.relpath(tree.path)
visitor = stats.StatisticsVisitor(
inferred=True,
filename=tree.fullname,
modules=modules,
typemap=type_map,
all_nodes=True,
)
tree.accept(visitor)
class_name = os.path.basename(path)
file_info = FileInfo(path, tree._fullname)
class_element = etree.Element("class", complexity="1.0", filename=path, name=class_name)
etree.SubElement(class_element, "methods")
lines_element = etree.SubElement(class_element, "lines")
class_lines_covered = 0
class_total_lines = 0
for lineno, _ in iterate_python_lines(path):
status = visitor.line_map.get(lineno, stats.TYPE_EMPTY)
hits = 0
branch = False
if status == stats.TYPE_EMPTY:
continue
class_total_lines += 1
if status != stats.TYPE_ANY:
class_lines_covered += 1
hits = 1
if status == stats.TYPE_IMPRECISE:
branch = True
file_info.counts[status] += 1
line_element = etree.SubElement(
lines_element,
"line",
branch=str(branch).lower(),
hits=str(hits),
number=str(lineno),
precision=stats.precision_names[status],
)
if branch:
line_element.attrib["condition-coverage"] = "50% (1/2)"
class_element.attrib["branch-rate"] = "0"
class_element.attrib["line-rate"] = get_line_rate(class_lines_covered, class_total_lines)
# parent_module is set to whichever module contains this file. For most files, we want
# to simply strip the last element off of the module. But for __init__.py files,
# the module == the parent module.
parent_module = file_info.module.rsplit(".", 1)[0]
if file_info.name.endswith("__init__.py"):
parent_module = file_info.module
if parent_module not in self.root_package.packages:
self.root_package.packages[parent_module] = CoberturaPackage(parent_module)
current_package = self.root_package.packages[parent_module]
packages_to_update = [self.root_package, current_package]
for package in packages_to_update:
package.total_lines += class_total_lines
package.covered_lines += class_lines_covered
current_package.classes[class_name] = class_element
def on_finish(self) -> None:
self.root.attrib["line-rate"] = get_line_rate(
self.root_package.covered_lines, self.root_package.total_lines
)
self.root.attrib["branch-rate"] = "0"
self.root.attrib["lines-covered"] = str(self.root_package.covered_lines)
self.root.attrib["lines-valid"] = str(self.root_package.total_lines)
sources = etree.SubElement(self.root, "sources")
source_element = etree.SubElement(sources, "source")
source_element.text = os.getcwd()
self.root_package.add_packages(self.root)
out_path = os.path.join(self.output_dir, "cobertura.xml")
self.doc.write(out_path, encoding="utf-8", pretty_print=True)
print("Generated Cobertura report:", os.path.abspath(out_path))
register_reporter("cobertura-xml", CoberturaXmlReporter, needs_lxml=True)
class AbstractXmlReporter(AbstractReporter):
"""Internal abstract class for reporters that work via XML."""
def __init__(self, reports: Reports, output_dir: str) -> None:
super().__init__(reports, output_dir)
memory_reporter = reports.add_report("memory-xml", "<memory>")
assert isinstance(memory_reporter, MemoryXmlReporter)
# The dependency will be called first.
self.memory_xml = memory_reporter
class XmlReporter(AbstractXmlReporter):
"""Public reporter that exports XML.
The produced XML files contain a reference to the absolute path
of the html transform, so they will be locally viewable in a browser.
However, there is a bug in Chrome and all other WebKit-based browsers
that makes it fail from file:// URLs but work on http:// URLs.
"""
def on_file(
self,
tree: MypyFile,
modules: dict[str, MypyFile],
type_map: dict[Expression, Type],
options: Options,
) -> None:
last_xml = self.memory_xml.last_xml
if last_xml is None:
return
path = os.path.relpath(tree.path)
if path.startswith(".."):
return
out_path = os.path.join(self.output_dir, "xml", path + ".xml")
os.makedirs(os.path.dirname(out_path), exist_ok=True)
last_xml.write(out_path, encoding="utf-8")
def on_finish(self) -> None:
last_xml = self.memory_xml.last_xml
assert last_xml is not None
out_path = os.path.join(self.output_dir, "index.xml")
out_xslt = os.path.join(self.output_dir, "mypy-html.xslt")
out_css = os.path.join(self.output_dir, "mypy-html.css")
last_xml.write(out_path, encoding="utf-8")
shutil.copyfile(self.memory_xml.xslt_html_path, out_xslt)
shutil.copyfile(self.memory_xml.css_html_path, out_css)
print("Generated XML report:", os.path.abspath(out_path))
register_reporter("xml", XmlReporter, needs_lxml=True)
class XsltHtmlReporter(AbstractXmlReporter):
"""Public reporter that exports HTML via XSLT.
This is slightly different than running `xsltproc` on the .xml files,
because it passes a parameter to rewrite the links.
"""
def __init__(self, reports: Reports, output_dir: str) -> None:
super().__init__(reports, output_dir)
self.xslt_html = etree.XSLT(etree.parse(self.memory_xml.xslt_html_path))
self.param_html = etree.XSLT.strparam("html")
def on_file(
self,
tree: MypyFile,
modules: dict[str, MypyFile],
type_map: dict[Expression, Type],
options: Options,
) -> None:
last_xml = self.memory_xml.last_xml
if last_xml is None:
return
path = os.path.relpath(tree.path)
if path.startswith(".."):
return
out_path = os.path.join(self.output_dir, "html", path + ".html")
os.makedirs(os.path.dirname(out_path), exist_ok=True)
transformed_html = bytes(self.xslt_html(last_xml, ext=self.param_html))
with open(out_path, "wb") as out_file:
out_file.write(transformed_html)
def on_finish(self) -> None:
last_xml = self.memory_xml.last_xml
assert last_xml is not None
out_path = os.path.join(self.output_dir, "index.html")
out_css = os.path.join(self.output_dir, "mypy-html.css")
transformed_html = bytes(self.xslt_html(last_xml, ext=self.param_html))
with open(out_path, "wb") as out_file:
out_file.write(transformed_html)
shutil.copyfile(self.memory_xml.css_html_path, out_css)
print("Generated HTML report (via XSLT):", os.path.abspath(out_path))
register_reporter("xslt-html", XsltHtmlReporter, needs_lxml=True)
class XsltTxtReporter(AbstractXmlReporter):
"""Public reporter that exports TXT via XSLT.
Currently this only does the summary, not the individual reports.
"""
def __init__(self, reports: Reports, output_dir: str) -> None:
super().__init__(reports, output_dir)
self.xslt_txt = etree.XSLT(etree.parse(self.memory_xml.xslt_txt_path))
def on_file(
self,
tree: MypyFile,
modules: dict[str, MypyFile],
type_map: dict[Expression, Type],
options: Options,
) -> None:
pass
def on_finish(self) -> None:
last_xml = self.memory_xml.last_xml
assert last_xml is not None
out_path = os.path.join(self.output_dir, "index.txt")
transformed_txt = bytes(self.xslt_txt(last_xml))
with open(out_path, "wb") as out_file:
out_file.write(transformed_txt)
print("Generated TXT report (via XSLT):", os.path.abspath(out_path))
register_reporter("xslt-txt", XsltTxtReporter, needs_lxml=True)
alias_reporter("xslt-html", "html")
alias_reporter("xslt-txt", "txt")
class LinePrecisionReporter(AbstractReporter):
"""Report per-module line counts for typing precision.
Each line is classified into one of these categories:
* precise (fully type checked)
* imprecise (Any types in a type component, such as List[Any])
* any (something with an Any type, implicit or explicit)
* empty (empty line, comment or docstring)
* unanalyzed (mypy considers line unreachable)
The meaning of these categories varies slightly depending on
context.
"""
def __init__(self, reports: Reports, output_dir: str) -> None:
super().__init__(reports, output_dir)
self.files: list[FileInfo] = []
def on_file(
self,
tree: MypyFile,
modules: dict[str, MypyFile],
type_map: dict[Expression, Type],
options: Options,
) -> None:
try:
path = os.path.relpath(tree.path)
except ValueError:
return
if should_skip_path(path):
return
visitor = stats.StatisticsVisitor(
inferred=True,
filename=tree.fullname,
modules=modules,
typemap=type_map,
all_nodes=True,
)
tree.accept(visitor)
file_info = FileInfo(path, tree._fullname)
for lineno, _ in iterate_python_lines(path):
status = visitor.line_map.get(lineno, stats.TYPE_EMPTY)
file_info.counts[status] += 1
self.files.append(file_info)
def on_finish(self) -> None:
if not self.files:
# Nothing to do.
return
output_files = sorted(self.files, key=lambda x: x.module)
report_file = os.path.join(self.output_dir, "lineprecision.txt")
width = max(4, max(len(info.module) for info in output_files))
titles = ("Lines", "Precise", "Imprecise", "Any", "Empty", "Unanalyzed")
widths = (width,) + tuple(len(t) for t in titles)
fmt = "{:%d} {:%d} {:%d} {:%d} {:%d} {:%d} {:%d}\n" % widths
with open(report_file, "w") as f:
f.write(fmt.format("Name", *titles))
f.write("-" * (width + 51) + "\n")
for file_info in output_files:
counts = file_info.counts
f.write(
fmt.format(
file_info.module.ljust(width),
file_info.total(),
counts[stats.TYPE_PRECISE],
counts[stats.TYPE_IMPRECISE],
counts[stats.TYPE_ANY],
counts[stats.TYPE_EMPTY],
counts[stats.TYPE_UNANALYZED],
)
)
register_reporter("lineprecision", LinePrecisionReporter)
# Reporter class names are defined twice to speed up mypy startup, as this
# module is slow to import. Ensure that the two definitions match.
assert set(reporter_classes) == set(REPORTER_NAMES)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/report.py
|
Python
|
NOASSERTION
| 34,446 |
"""Track current scope to easily calculate the corresponding fine-grained target.
TODO: Use everywhere where we track targets, including in mypy.errors.
"""
from __future__ import annotations
from contextlib import contextmanager, nullcontext
from typing import Iterator, Optional, Tuple
from typing_extensions import TypeAlias as _TypeAlias
from mypy.nodes import FuncBase, TypeInfo
SavedScope: _TypeAlias = Tuple[str, Optional[TypeInfo], Optional[FuncBase]]
class Scope:
"""Track which target we are processing at any given time."""
def __init__(self) -> None:
self.module: str | None = None
self.classes: list[TypeInfo] = []
self.function: FuncBase | None = None
self.functions: list[FuncBase] = []
# Number of nested scopes ignored (that don't get their own separate targets)
self.ignored = 0
def current_module_id(self) -> str:
assert self.module
return self.module
def current_target(self) -> str:
"""Return the current target (non-class; for a class return enclosing module)."""
assert self.module
if self.function:
fullname = self.function.fullname
return fullname or ""
return self.module
def current_full_target(self) -> str:
"""Return the current target (may be a class)."""
assert self.module
if self.function:
return self.function.fullname
if self.classes:
return self.classes[-1].fullname
return self.module
def current_type_name(self) -> str | None:
"""Return the current type's short name if it exists"""
return self.classes[-1].name if self.classes else None
def current_function_name(self) -> str | None:
"""Return the current function's short name if it exists"""
return self.function.name if self.function else None
@contextmanager
def module_scope(self, prefix: str) -> Iterator[None]:
self.module = prefix
self.classes = []
self.function = None
self.ignored = 0
yield
assert self.module
self.module = None
@contextmanager
def function_scope(self, fdef: FuncBase) -> Iterator[None]:
self.functions.append(fdef)
if not self.function:
self.function = fdef
else:
# Nested functions are part of the topmost function target.
self.ignored += 1
yield
self.functions.pop()
if self.ignored:
# Leave a scope that's included in the enclosing target.
self.ignored -= 1
else:
assert self.function
self.function = None
def outer_functions(self) -> list[FuncBase]:
return self.functions[:-1]
def enter_class(self, info: TypeInfo) -> None:
"""Enter a class target scope."""
if not self.function:
self.classes.append(info)
else:
# Classes within functions are part of the enclosing function target.
self.ignored += 1
def leave_class(self) -> None:
"""Leave a class target scope."""
if self.ignored:
# Leave a scope that's included in the enclosing target.
self.ignored -= 1
else:
assert self.classes
# Leave the innermost class.
self.classes.pop()
@contextmanager
def class_scope(self, info: TypeInfo) -> Iterator[None]:
self.enter_class(info)
yield
self.leave_class()
def save(self) -> SavedScope:
"""Produce a saved scope that can be entered with saved_scope()"""
assert self.module
# We only save the innermost class, which is sufficient since
# the rest are only needed for when classes are left.
cls = self.classes[-1] if self.classes else None
return self.module, cls, self.function
@contextmanager
def saved_scope(self, saved: SavedScope) -> Iterator[None]:
module, info, function = saved
with self.module_scope(module):
with self.class_scope(info) if info else nullcontext():
with self.function_scope(function) if function else nullcontext():
yield
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/scope.py
|
Python
|
NOASSERTION
| 4,258 |
"""The semantic analyzer.
Bind names to definitions and do various other simple consistency
checks. Populate symbol tables. The semantic analyzer also detects
special forms which reuse generic syntax such as NamedTuple and
cast(). Multiple analysis iterations may be needed to analyze forward
references and import cycles. Each iteration "fills in" additional
bindings and references until everything has been bound.
For example, consider this program:
x = 1
y = x
Here semantic analysis would detect that the assignment 'x = 1'
defines a new variable, the type of which is to be inferred (in a
later pass; type inference or type checking is not part of semantic
analysis). Also, it would bind both references to 'x' to the same
module-level variable (Var) node. The second assignment would also
be analyzed, and the type of 'y' marked as being inferred.
Semantic analysis of types is implemented in typeanal.py.
See semanal_main.py for the top-level logic.
Some important properties:
* After semantic analysis is complete, no PlaceholderNode and
PlaceholderType instances should remain. During semantic analysis,
if we encounter one of these, the current target should be deferred.
* A TypeInfo is only created once we know certain basic information about
a type, such as the MRO, existence of a Tuple base class (e.g., for named
tuples), and whether we have a TypedDict. We use a temporary
PlaceholderNode node in the symbol table if some such information is
missing.
* For assignments, we only add a non-placeholder symbol table entry once
we know the sort of thing being defined (variable, NamedTuple, type alias,
etc.).
* Every part of the analysis step must support multiple iterations over
the same AST nodes, and each iteration must be able to fill in arbitrary
things that were missing or incomplete in previous iterations.
* Changes performed by the analysis need to be reversible, since mypy
daemon strips and reuses existing ASTs (to improve performance and/or
reduce memory use).
"""
from __future__ import annotations
from contextlib import contextmanager
from typing import Any, Callable, Collection, Final, Iterable, Iterator, List, TypeVar, cast
from typing_extensions import TypeAlias as _TypeAlias, TypeGuard
from mypy import errorcodes as codes, message_registry
from mypy.constant_fold import constant_fold_expr
from mypy.errorcodes import PROPERTY_DECORATOR, ErrorCode
from mypy.errors import Errors, report_internal_error
from mypy.exprtotype import TypeTranslationError, expr_to_unanalyzed_type
from mypy.message_registry import ErrorMessage
from mypy.messages import (
SUGGESTED_TEST_FIXTURES,
TYPES_FOR_UNIMPORTED_HINTS,
MessageBuilder,
best_matches,
pretty_seq,
)
from mypy.mro import MroError, calculate_mro
from mypy.nodes import (
ARG_NAMED,
ARG_POS,
ARG_STAR,
ARG_STAR2,
CONTRAVARIANT,
COVARIANT,
GDEF,
IMPLICITLY_ABSTRACT,
INVARIANT,
IS_ABSTRACT,
LDEF,
MDEF,
NOT_ABSTRACT,
PARAM_SPEC_KIND,
REVEAL_LOCALS,
REVEAL_TYPE,
RUNTIME_PROTOCOL_DECOS,
TYPE_VAR_KIND,
TYPE_VAR_TUPLE_KIND,
VARIANCE_NOT_READY,
ArgKind,
AssertStmt,
AssertTypeExpr,
AssignmentExpr,
AssignmentStmt,
AwaitExpr,
Block,
BreakStmt,
CallExpr,
CastExpr,
ClassDef,
ComparisonExpr,
ConditionalExpr,
Context,
ContinueStmt,
DataclassTransformSpec,
Decorator,
DelStmt,
DictExpr,
DictionaryComprehension,
EllipsisExpr,
EnumCallExpr,
Expression,
ExpressionStmt,
FakeExpression,
ForStmt,
FuncBase,
FuncDef,
FuncItem,
GeneratorExpr,
GlobalDecl,
IfStmt,
Import,
ImportAll,
ImportBase,
ImportFrom,
IndexExpr,
LambdaExpr,
ListComprehension,
ListExpr,
Lvalue,
MatchStmt,
MemberExpr,
MypyFile,
NamedTupleExpr,
NameExpr,
Node,
NonlocalDecl,
OperatorAssignmentStmt,
OpExpr,
OverloadedFuncDef,
OverloadPart,
ParamSpecExpr,
PassStmt,
PlaceholderNode,
PromoteExpr,
RaiseStmt,
RefExpr,
ReturnStmt,
RevealExpr,
SetComprehension,
SetExpr,
SliceExpr,
StarExpr,
Statement,
StrExpr,
SuperExpr,
SymbolNode,
SymbolTable,
SymbolTableNode,
TempNode,
TryStmt,
TupleExpr,
TypeAlias,
TypeAliasExpr,
TypeAliasStmt,
TypeApplication,
TypedDictExpr,
TypeInfo,
TypeParam,
TypeVarExpr,
TypeVarLikeExpr,
TypeVarTupleExpr,
UnaryExpr,
Var,
WhileStmt,
WithStmt,
YieldExpr,
YieldFromExpr,
get_member_expr_fullname,
get_nongen_builtins,
implicit_module_attrs,
is_final_node,
type_aliases,
type_aliases_source_versions,
typing_extensions_aliases,
)
from mypy.options import Options
from mypy.patterns import (
AsPattern,
ClassPattern,
MappingPattern,
OrPattern,
SequencePattern,
StarredPattern,
ValuePattern,
)
from mypy.plugin import (
ClassDefContext,
DynamicClassDefContext,
Plugin,
SemanticAnalyzerPluginInterface,
)
from mypy.plugins import dataclasses as dataclasses_plugin
from mypy.reachability import (
ALWAYS_FALSE,
ALWAYS_TRUE,
MYPY_FALSE,
MYPY_TRUE,
infer_condition_value,
infer_reachability_of_if_statement,
infer_reachability_of_match_statement,
)
from mypy.scope import Scope
from mypy.semanal_enum import EnumCallAnalyzer
from mypy.semanal_namedtuple import NamedTupleAnalyzer
from mypy.semanal_newtype import NewTypeAnalyzer
from mypy.semanal_shared import (
ALLOW_INCOMPATIBLE_OVERRIDE,
PRIORITY_FALLBACKS,
SemanticAnalyzerInterface,
calculate_tuple_fallback,
find_dataclass_transform_spec,
has_placeholder,
parse_bool,
require_bool_literal_argument,
set_callable_name as set_callable_name,
)
from mypy.semanal_typeddict import TypedDictAnalyzer
from mypy.tvar_scope import TypeVarLikeScope
from mypy.typeanal import (
SELF_TYPE_NAMES,
FindTypeVarVisitor,
TypeAnalyser,
TypeVarDefaultTranslator,
TypeVarLikeList,
analyze_type_alias,
check_for_explicit_any,
detect_diverging_alias,
find_self_type,
fix_instance,
has_any_from_unimported_type,
no_subscript_builtin_alias,
type_constructors,
validate_instance,
)
from mypy.typeops import function_type, get_type_vars, try_getting_str_literals_from_type
from mypy.types import (
ASSERT_TYPE_NAMES,
DATACLASS_TRANSFORM_NAMES,
FINAL_DECORATOR_NAMES,
FINAL_TYPE_NAMES,
IMPORTED_REVEAL_TYPE_NAMES,
NEVER_NAMES,
OVERLOAD_NAMES,
OVERRIDE_DECORATOR_NAMES,
PROTOCOL_NAMES,
REVEAL_TYPE_NAMES,
TPDICT_NAMES,
TYPE_ALIAS_NAMES,
TYPE_CHECK_ONLY_NAMES,
TYPE_VAR_LIKE_NAMES,
TYPED_NAMEDTUPLE_NAMES,
AnyType,
CallableType,
FunctionLike,
Instance,
LiteralType,
NoneType,
Overloaded,
Parameters,
ParamSpecType,
PlaceholderType,
ProperType,
TrivialSyntheticTypeTranslator,
TupleType,
Type,
TypeAliasType,
TypedDictType,
TypeOfAny,
TypeType,
TypeVarId,
TypeVarLikeType,
TypeVarTupleType,
TypeVarType,
UnboundType,
UnionType,
UnpackType,
get_proper_type,
get_proper_types,
is_named_instance,
remove_dups,
type_vars_as_args,
)
from mypy.types_utils import is_invalid_recursive_alias, store_argument_type
from mypy.typevars import fill_typevars
from mypy.util import correct_relative_import, is_dunder, module_prefix, unmangle, unnamed_function
from mypy.visitor import NodeVisitor
T = TypeVar("T")
FUTURE_IMPORTS: Final = {
"__future__.nested_scopes": "nested_scopes",
"__future__.generators": "generators",
"__future__.division": "division",
"__future__.absolute_import": "absolute_import",
"__future__.with_statement": "with_statement",
"__future__.print_function": "print_function",
"__future__.unicode_literals": "unicode_literals",
"__future__.barry_as_FLUFL": "barry_as_FLUFL",
"__future__.generator_stop": "generator_stop",
"__future__.annotations": "annotations",
}
# Special cased built-in classes that are needed for basic functionality and need to be
# available very early on.
CORE_BUILTIN_CLASSES: Final = ["object", "bool", "function"]
# Python has several different scope/namespace kinds with subtly different semantics.
SCOPE_GLOBAL: Final = 0 # Module top level
SCOPE_CLASS: Final = 1 # Class body
SCOPE_FUNC: Final = 2 # Function or lambda
SCOPE_COMPREHENSION: Final = 3 # Comprehension or generator expression
SCOPE_ANNOTATION: Final = 4 # Annotation scopes for type parameters and aliases (PEP 695)
# Used for tracking incomplete references
Tag: _TypeAlias = int
class SemanticAnalyzer(
NodeVisitor[None], SemanticAnalyzerInterface, SemanticAnalyzerPluginInterface
):
"""Semantically analyze parsed mypy files.
The analyzer binds names and does various consistency checks for an
AST. Note that type checking is performed as a separate pass.
"""
__deletable__ = ["patches", "options", "cur_mod_node"]
# Module name space
modules: dict[str, MypyFile]
# Global name space for current module
globals: SymbolTable
# Names declared using "global" (separate set for each scope)
global_decls: list[set[str]]
# Names declared using "nonlocal" (separate set for each scope)
nonlocal_decls: list[set[str]]
# Local names of function scopes; None for non-function scopes.
locals: list[SymbolTable | None]
# Type of each scope (SCOPE_*, indexes match locals)
scope_stack: list[int]
# Nested block depths of scopes
block_depth: list[int]
# TypeInfo of directly enclosing class (or None)
_type: TypeInfo | None = None
# Stack of outer classes (the second tuple item contains tvars).
type_stack: list[TypeInfo | None]
# Type variables bound by the current scope, be it class or function
tvar_scope: TypeVarLikeScope
# Per-module options
options: Options
# Stack of functions being analyzed
function_stack: list[FuncItem]
# Set to True if semantic analysis defines a name, or replaces a
# placeholder definition. If some iteration makes no progress,
# there can be at most one additional final iteration (see below).
progress = False
deferred = False # Set to true if another analysis pass is needed
incomplete = False # Set to true if current module namespace is missing things
# Is this the final iteration of semantic analysis (where we report
# unbound names due to cyclic definitions and should not defer)?
_final_iteration = False
# These names couldn't be added to the symbol table due to incomplete deps.
# Note that missing names are per module, _not_ per namespace. This means that e.g.
# a missing name at global scope will block adding same name at a class scope.
# This should not affect correctness and is purely a performance issue,
# since it can cause unnecessary deferrals. These are represented as
# PlaceholderNodes in the symbol table. We use this to ensure that the first
# definition takes precedence even if it's incomplete.
#
# Note that a star import adds a special name '*' to the set, this blocks
# adding _any_ names in the current file.
missing_names: list[set[str]]
# Callbacks that will be called after semantic analysis to tweak things.
patches: list[tuple[int, Callable[[], None]]]
loop_depth: list[int] # Depth of breakable loops
cur_mod_id = "" # Current module id (or None) (phase 2)
_is_stub_file = False # Are we analyzing a stub file?
_is_typeshed_stub_file = False # Are we analyzing a typeshed stub file?
imports: set[str] # Imported modules (during phase 2 analysis)
# Note: some imports (and therefore dependencies) might
# not be found in phase 1, for example due to * imports.
errors: Errors # Keeps track of generated errors
plugin: Plugin # Mypy plugin for special casing of library features
statement: Statement | None = None # Statement/definition being analyzed
# Mapping from 'async def' function definitions to their return type wrapped as a
# 'Coroutine[Any, Any, T]'. Used to keep track of whether a function definition's
# return type has already been wrapped, by checking if the function definition's
# type is stored in this mapping and that it still matches.
wrapped_coro_return_types: dict[FuncDef, Type] = {}
def __init__(
self,
modules: dict[str, MypyFile],
missing_modules: set[str],
incomplete_namespaces: set[str],
errors: Errors,
plugin: Plugin,
) -> None:
"""Construct semantic analyzer.
We reuse the same semantic analyzer instance across multiple modules.
Args:
modules: Global modules dictionary
missing_modules: Modules that could not be imported encountered so far
incomplete_namespaces: Namespaces that are being populated during semantic analysis
(can contain modules and classes within the current SCC; mutated by the caller)
errors: Report analysis errors using this instance
"""
self.locals = [None]
self.scope_stack = [SCOPE_GLOBAL]
# Saved namespaces from previous iteration. Every top-level function/method body is
# analyzed in several iterations until all names are resolved. We need to save
# the local namespaces for the top level function and all nested functions between
# these iterations. See also semanal_main.process_top_level_function().
self.saved_locals: dict[
FuncItem | GeneratorExpr | DictionaryComprehension, SymbolTable
] = {}
self.imports = set()
self._type = None
self.type_stack = []
# Are the namespaces of classes being processed complete?
self.incomplete_type_stack: list[bool] = []
self.tvar_scope = TypeVarLikeScope()
self.function_stack = []
self.block_depth = [0]
self.loop_depth = [0]
self.errors = errors
self.modules = modules
self.msg = MessageBuilder(errors, modules)
self.missing_modules = missing_modules
self.missing_names = [set()]
# These namespaces are still in process of being populated. If we encounter a
# missing name in these namespaces, we need to defer the current analysis target,
# since it's possible that the name will be there once the namespace is complete.
self.incomplete_namespaces = incomplete_namespaces
self.all_exports: list[str] = []
# Map from module id to list of explicitly exported names (i.e. names in __all__).
self.export_map: dict[str, list[str]] = {}
self.plugin = plugin
# If True, process function definitions. If False, don't. This is used
# for processing module top levels in fine-grained incremental mode.
self.recurse_into_functions = True
self.scope = Scope()
# Trace line numbers for every file where deferral happened during analysis of
# current SCC or top-level function.
self.deferral_debug_context: list[tuple[str, int]] = []
# This is needed to properly support recursive type aliases. The problem is that
# Foo[Bar] could mean three things depending on context: a target for type alias,
# a normal index expression (including enum index), or a type application.
# The latter is particularly problematic as it can falsely create incomplete
# refs while analysing rvalues of type aliases. To avoid this we first analyse
# rvalues while temporarily setting this to True.
self.basic_type_applications = False
# Used to temporarily enable unbound type variables in some contexts. Namely,
# in base class expressions, and in right hand sides of type aliases. Do not add
# new uses of this, as this may cause leaking `UnboundType`s to type checking.
self.allow_unbound_tvars = False
# Used to pass information about current overload index to visit_func_def().
self.current_overload_item: int | None = None
# mypyc doesn't properly handle implementing an abstractproperty
# with a regular attribute so we make them properties
@property
def type(self) -> TypeInfo | None:
return self._type
@property
def is_stub_file(self) -> bool:
return self._is_stub_file
@property
def is_typeshed_stub_file(self) -> bool:
return self._is_typeshed_stub_file
@property
def final_iteration(self) -> bool:
return self._final_iteration
@contextmanager
def allow_unbound_tvars_set(self) -> Iterator[None]:
old = self.allow_unbound_tvars
self.allow_unbound_tvars = True
try:
yield
finally:
self.allow_unbound_tvars = old
#
# Preparing module (performed before semantic analysis)
#
def prepare_file(self, file_node: MypyFile) -> None:
"""Prepare a freshly parsed file for semantic analysis."""
if "builtins" in self.modules:
file_node.names["__builtins__"] = SymbolTableNode(GDEF, self.modules["builtins"])
if file_node.fullname == "builtins":
self.prepare_builtins_namespace(file_node)
if file_node.fullname == "typing":
self.prepare_typing_namespace(file_node, type_aliases)
if file_node.fullname == "typing_extensions":
self.prepare_typing_namespace(file_node, typing_extensions_aliases)
def prepare_typing_namespace(self, file_node: MypyFile, aliases: dict[str, str]) -> None:
"""Remove dummy alias definitions such as List = TypeAlias(object) from typing.
They will be replaced with real aliases when corresponding targets are ready.
"""
# This is all pretty unfortunate. typeshed now has a
# sys.version_info check for OrderedDict, and we shouldn't
# take it out, because it is correct and a typechecker should
# use that as a source of truth. But instead we rummage
# through IfStmts to remove the info first. (I tried to
# remove this whole machinery and ran into issues with the
# builtins/typing import cycle.)
def helper(defs: list[Statement]) -> None:
for stmt in defs.copy():
if isinstance(stmt, IfStmt):
for body in stmt.body:
helper(body.body)
if stmt.else_body:
helper(stmt.else_body.body)
if (
isinstance(stmt, AssignmentStmt)
and len(stmt.lvalues) == 1
and isinstance(stmt.lvalues[0], NameExpr)
):
# Assignment to a simple name, remove it if it is a dummy alias.
if f"{file_node.fullname}.{stmt.lvalues[0].name}" in aliases:
defs.remove(stmt)
helper(file_node.defs)
def prepare_builtins_namespace(self, file_node: MypyFile) -> None:
"""Add certain special-cased definitions to the builtins module.
Some definitions are too special or fundamental to be processed
normally from the AST.
"""
names = file_node.names
# Add empty definition for core built-in classes, since they are required for basic
# operation. These will be completed later on.
for name in CORE_BUILTIN_CLASSES:
cdef = ClassDef(name, Block([])) # Dummy ClassDef, will be replaced later
info = TypeInfo(SymbolTable(), cdef, "builtins")
info._fullname = f"builtins.{name}"
names[name] = SymbolTableNode(GDEF, info)
bool_info = names["bool"].node
assert isinstance(bool_info, TypeInfo)
bool_type = Instance(bool_info, [])
special_var_types: list[tuple[str, Type]] = [
("None", NoneType()),
# reveal_type is a mypy-only function that gives an error with
# the type of its arg.
("reveal_type", AnyType(TypeOfAny.special_form)),
# reveal_locals is a mypy-only function that gives an error with the types of
# locals
("reveal_locals", AnyType(TypeOfAny.special_form)),
("True", bool_type),
("False", bool_type),
("__debug__", bool_type),
]
for name, typ in special_var_types:
v = Var(name, typ)
v._fullname = f"builtins.{name}"
file_node.names[name] = SymbolTableNode(GDEF, v)
#
# Analyzing a target
#
def refresh_partial(
self,
node: MypyFile | FuncDef | OverloadedFuncDef,
patches: list[tuple[int, Callable[[], None]]],
final_iteration: bool,
file_node: MypyFile,
options: Options,
active_type: TypeInfo | None = None,
) -> None:
"""Refresh a stale target in fine-grained incremental mode."""
self.patches = patches
self.deferred = False
self.incomplete = False
self._final_iteration = final_iteration
self.missing_names[-1] = set()
with self.file_context(file_node, options, active_type):
if isinstance(node, MypyFile):
self.refresh_top_level(node)
else:
self.recurse_into_functions = True
self.accept(node)
del self.patches
def refresh_top_level(self, file_node: MypyFile) -> None:
"""Reanalyze a stale module top-level in fine-grained incremental mode."""
self.recurse_into_functions = False
self.add_implicit_module_attrs(file_node)
for d in file_node.defs:
self.accept(d)
if file_node.fullname == "typing":
self.add_builtin_aliases(file_node)
if file_node.fullname == "typing_extensions":
self.add_typing_extension_aliases(file_node)
self.adjust_public_exports()
self.export_map[self.cur_mod_id] = self.all_exports
self.all_exports = []
def add_implicit_module_attrs(self, file_node: MypyFile) -> None:
"""Manually add implicit definitions of module '__name__' etc."""
str_type: Type | None = self.named_type_or_none("builtins.str")
if str_type is None:
str_type = UnboundType("builtins.str")
inst: Type | None
for name, t in implicit_module_attrs.items():
if name == "__doc__":
typ: Type = str_type
elif name == "__path__":
if not file_node.is_package_init_file():
continue
# Need to construct the type ourselves, to avoid issues with __builtins__.list
# not being subscriptable or typing.List not getting bound
inst = self.named_type_or_none("builtins.list", [str_type])
if inst is None:
assert not self.final_iteration, "Cannot find builtins.list to add __path__"
self.defer()
return
typ = inst
elif name == "__annotations__":
inst = self.named_type_or_none(
"builtins.dict", [str_type, AnyType(TypeOfAny.special_form)]
)
if inst is None:
assert (
not self.final_iteration
), "Cannot find builtins.dict to add __annotations__"
self.defer()
return
typ = inst
elif name == "__spec__":
if self.options.use_builtins_fixtures:
inst = self.named_type_or_none("builtins.object")
else:
inst = self.named_type_or_none("importlib.machinery.ModuleSpec")
if inst is None:
if self.final_iteration:
inst = self.named_type_or_none("builtins.object")
assert inst is not None, "Cannot find builtins.object"
else:
self.defer()
return
if file_node.name == "__main__":
# https://docs.python.org/3/reference/import.html#main-spec
inst = UnionType.make_union([inst, NoneType()])
typ = inst
else:
assert t is not None, f"type should be specified for {name}"
typ = UnboundType(t)
existing = file_node.names.get(name)
if existing is not None and not isinstance(existing.node, PlaceholderNode):
# Already exists.
continue
an_type = self.anal_type(typ)
if an_type:
var = Var(name, an_type)
var._fullname = self.qualified_name(name)
var.is_ready = True
self.add_symbol(name, var, dummy_context())
else:
self.add_symbol(
name,
PlaceholderNode(self.qualified_name(name), file_node, -1),
dummy_context(),
)
def add_builtin_aliases(self, tree: MypyFile) -> None:
"""Add builtin type aliases to typing module.
For historical reasons, the aliases like `List = list` are not defined
in typeshed stubs for typing module. Instead we need to manually add the
corresponding nodes on the fly. We explicitly mark these aliases as normalized,
so that a user can write `typing.List[int]`.
"""
assert tree.fullname == "typing"
for alias, target_name in type_aliases.items():
if (
alias in type_aliases_source_versions
and type_aliases_source_versions[alias] > self.options.python_version
):
# This alias is not available on this Python version.
continue
name = alias.split(".")[-1]
if name in tree.names and not isinstance(tree.names[name].node, PlaceholderNode):
continue
self.create_alias(tree, target_name, alias, name)
def add_typing_extension_aliases(self, tree: MypyFile) -> None:
"""Typing extensions module does contain some type aliases.
We need to analyze them as such, because in typeshed
they are just defined as `_Alias()` call.
Which is not supported natively.
"""
assert tree.fullname == "typing_extensions"
for alias, target_name in typing_extensions_aliases.items():
name = alias.split(".")[-1]
if name in tree.names and isinstance(tree.names[name].node, TypeAlias):
continue # Do not reset TypeAliases on the second pass.
# We need to remove any node that is there at the moment. It is invalid.
tree.names.pop(name, None)
# Now, create a new alias.
self.create_alias(tree, target_name, alias, name)
def create_alias(self, tree: MypyFile, target_name: str, alias: str, name: str) -> None:
tag = self.track_incomplete_refs()
n = self.lookup_fully_qualified_or_none(target_name)
if n:
if isinstance(n.node, PlaceholderNode):
self.mark_incomplete(name, tree)
else:
# Found built-in class target. Create alias.
target = self.named_type_or_none(target_name, [])
assert target is not None
# Transform List to List[Any], etc.
fix_instance(
target, self.fail, self.note, disallow_any=False, options=self.options
)
alias_node = TypeAlias(
target,
alias,
line=-1,
column=-1, # there is no context
no_args=True,
normalized=True,
)
self.add_symbol(name, alias_node, tree)
elif self.found_incomplete_ref(tag):
# Built-in class target may not ready yet -- defer.
self.mark_incomplete(name, tree)
else:
# Test fixtures may be missing some builtin classes, which is okay.
# Kill the placeholder if there is one.
if name in tree.names:
assert isinstance(tree.names[name].node, PlaceholderNode)
del tree.names[name]
def adjust_public_exports(self) -> None:
"""Adjust the module visibility of globals due to __all__."""
if "__all__" in self.globals:
for name, g in self.globals.items():
# Being included in __all__ explicitly exports and makes public.
if name in self.all_exports:
g.module_public = True
g.module_hidden = False
# But when __all__ is defined, and a symbol is not included in it,
# it cannot be public.
else:
g.module_public = False
@contextmanager
def file_context(
self, file_node: MypyFile, options: Options, active_type: TypeInfo | None = None
) -> Iterator[None]:
"""Configure analyzer for analyzing targets within a file/class.
Args:
file_node: target file
options: options specific to the file
active_type: must be the surrounding class to analyze method targets
"""
scope = self.scope
self.options = options
self.errors.set_file(file_node.path, file_node.fullname, scope=scope, options=options)
self.cur_mod_node = file_node
self.cur_mod_id = file_node.fullname
with scope.module_scope(self.cur_mod_id):
self._is_stub_file = file_node.path.lower().endswith(".pyi")
self._is_typeshed_stub_file = file_node.is_typeshed_file(options)
self.globals = file_node.names
self.tvar_scope = TypeVarLikeScope()
self.named_tuple_analyzer = NamedTupleAnalyzer(options, self, self.msg)
self.typed_dict_analyzer = TypedDictAnalyzer(options, self, self.msg)
self.enum_call_analyzer = EnumCallAnalyzer(options, self)
self.newtype_analyzer = NewTypeAnalyzer(options, self, self.msg)
# Counter that keeps track of references to undefined things potentially caused by
# incomplete namespaces.
self.num_incomplete_refs = 0
if active_type:
enclosing_fullname = active_type.fullname.rsplit(".", 1)[0]
if "." in enclosing_fullname:
enclosing_node = self.lookup_fully_qualified_or_none(enclosing_fullname)
if enclosing_node and isinstance(enclosing_node.node, TypeInfo):
self._type = enclosing_node.node
self.push_type_args(active_type.defn.type_args, active_type.defn)
self.incomplete_type_stack.append(False)
scope.enter_class(active_type)
self.enter_class(active_type.defn.info)
for tvar in active_type.defn.type_vars:
self.tvar_scope.bind_existing(tvar)
yield
if active_type:
scope.leave_class()
self.leave_class()
self._type = None
self.incomplete_type_stack.pop()
self.pop_type_args(active_type.defn.type_args)
del self.options
#
# Functions
#
def visit_func_def(self, defn: FuncDef) -> None:
self.statement = defn
# Visit default values because they may contain assignment expressions.
for arg in defn.arguments:
if arg.initializer:
arg.initializer.accept(self)
defn.is_conditional = self.block_depth[-1] > 0
# Set full names even for those definitions that aren't added
# to a symbol table. For example, for overload items.
defn._fullname = self.qualified_name(defn.name)
# We don't add module top-level functions to symbol tables
# when we analyze their bodies in the second phase on analysis,
# since they were added in the first phase. Nested functions
# get always added, since they aren't separate targets.
if not self.recurse_into_functions or len(self.function_stack) > 0:
if not defn.is_decorated and not defn.is_overload:
self.add_function_to_symbol_table(defn)
if not self.recurse_into_functions:
return
with self.scope.function_scope(defn):
self.analyze_func_def(defn)
def function_fullname(self, fullname: str) -> str:
if self.current_overload_item is None:
return fullname
return f"{fullname}#{self.current_overload_item}"
def analyze_func_def(self, defn: FuncDef) -> None:
if self.push_type_args(defn.type_args, defn) is None:
self.defer(defn)
return
self.function_stack.append(defn)
if defn.type:
assert isinstance(defn.type, CallableType)
has_self_type = self.update_function_type_variables(defn.type, defn)
else:
has_self_type = False
self.function_stack.pop()
if self.is_class_scope():
# Method definition
assert self.type is not None
defn.info = self.type
if defn.type is not None and defn.name in ("__init__", "__init_subclass__"):
assert isinstance(defn.type, CallableType)
if isinstance(get_proper_type(defn.type.ret_type), AnyType):
defn.type = defn.type.copy_modified(ret_type=NoneType())
self.prepare_method_signature(defn, self.type, has_self_type)
# Analyze function signature
fullname = self.function_fullname(defn.fullname)
with self.tvar_scope_frame(self.tvar_scope.method_frame(fullname)):
if defn.type:
self.check_classvar_in_signature(defn.type)
assert isinstance(defn.type, CallableType)
# Signature must be analyzed in the surrounding scope so that
# class-level imported names and type variables are in scope.
analyzer = self.type_analyzer()
tag = self.track_incomplete_refs()
result = analyzer.visit_callable_type(defn.type, nested=False, namespace=fullname)
# Don't store not ready types (including placeholders).
if self.found_incomplete_ref(tag) or has_placeholder(result):
self.defer(defn)
self.pop_type_args(defn.type_args)
return
assert isinstance(result, ProperType)
if isinstance(result, CallableType):
# type guards need to have a positional argument, to spec
skip_self = self.is_class_scope() and not defn.is_static
if result.type_guard and ARG_POS not in result.arg_kinds[skip_self:]:
self.fail(
"TypeGuard functions must have a positional argument",
result,
code=codes.VALID_TYPE,
)
# in this case, we just kind of just ... remove the type guard.
result = result.copy_modified(type_guard=None)
if result.type_is and ARG_POS not in result.arg_kinds[skip_self:]:
self.fail(
'"TypeIs" functions must have a positional argument',
result,
code=codes.VALID_TYPE,
)
result = result.copy_modified(type_is=None)
result = self.remove_unpack_kwargs(defn, result)
if has_self_type and self.type is not None:
info = self.type
if info.self_type is not None:
result.variables = [info.self_type] + list(result.variables)
defn.type = result
self.add_type_alias_deps(analyzer.aliases_used)
self.check_function_signature(defn)
self.check_paramspec_definition(defn)
if isinstance(defn, FuncDef):
assert isinstance(defn.type, CallableType)
defn.type = set_callable_name(defn.type, defn)
self.analyze_arg_initializers(defn)
self.analyze_function_body(defn)
if self.is_class_scope():
assert self.type is not None
# Mark protocol methods with empty bodies as implicitly abstract.
# This makes explicit protocol subclassing type-safe.
if (
self.type.is_protocol
and not self.is_stub_file # Bodies in stub files are always empty.
and (not isinstance(self.scope.function, OverloadedFuncDef) or defn.is_property)
and defn.abstract_status != IS_ABSTRACT
and is_trivial_body(defn.body)
):
defn.abstract_status = IMPLICITLY_ABSTRACT
if (
is_trivial_body(defn.body)
and not self.is_stub_file
and defn.abstract_status != NOT_ABSTRACT
):
defn.is_trivial_body = True
if (
defn.is_coroutine
and isinstance(defn.type, CallableType)
and self.wrapped_coro_return_types.get(defn) != defn.type
):
if defn.is_async_generator:
# Async generator types are handled elsewhere
pass
else:
# A coroutine defined as `async def foo(...) -> T: ...`
# has external return type `Coroutine[Any, Any, T]`.
any_type = AnyType(TypeOfAny.special_form)
ret_type = self.named_type_or_none(
"typing.Coroutine", [any_type, any_type, defn.type.ret_type]
)
assert ret_type is not None, "Internal error: typing.Coroutine not found"
defn.type = defn.type.copy_modified(ret_type=ret_type)
self.wrapped_coro_return_types[defn] = defn.type
self.pop_type_args(defn.type_args)
def remove_unpack_kwargs(self, defn: FuncDef, typ: CallableType) -> CallableType:
if not typ.arg_kinds or typ.arg_kinds[-1] is not ArgKind.ARG_STAR2:
return typ
last_type = typ.arg_types[-1]
if not isinstance(last_type, UnpackType):
return typ
last_type = get_proper_type(last_type.type)
if not isinstance(last_type, TypedDictType):
self.fail("Unpack item in ** argument must be a TypedDict", last_type)
new_arg_types = typ.arg_types[:-1] + [AnyType(TypeOfAny.from_error)]
return typ.copy_modified(arg_types=new_arg_types)
overlap = set(typ.arg_names) & set(last_type.items)
# It is OK for TypedDict to have a key named 'kwargs'.
overlap.discard(typ.arg_names[-1])
if overlap:
overlapped = ", ".join([f'"{name}"' for name in overlap])
self.fail(f"Overlap between argument names and ** TypedDict items: {overlapped}", defn)
new_arg_types = typ.arg_types[:-1] + [AnyType(TypeOfAny.from_error)]
return typ.copy_modified(arg_types=new_arg_types)
# OK, everything looks right now, mark the callable type as using unpack.
new_arg_types = typ.arg_types[:-1] + [last_type]
return typ.copy_modified(arg_types=new_arg_types, unpack_kwargs=True)
def prepare_method_signature(self, func: FuncDef, info: TypeInfo, has_self_type: bool) -> None:
"""Check basic signature validity and tweak annotation of self/cls argument."""
# Only non-static methods are special, as well as __new__.
functype = func.type
if func.name == "__new__":
func.is_static = True
if not func.is_static or func.name == "__new__":
if func.name in ["__init_subclass__", "__class_getitem__"]:
func.is_class = True
if not func.arguments:
self.fail(
'Method must have at least one argument. Did you forget the "self" argument?',
func,
)
elif isinstance(functype, CallableType):
self_type = get_proper_type(functype.arg_types[0])
if isinstance(self_type, AnyType):
if has_self_type:
assert self.type is not None and self.type.self_type is not None
leading_type: Type = self.type.self_type
else:
leading_type = fill_typevars(info)
if func.is_class or func.name == "__new__":
leading_type = self.class_type(leading_type)
func.type = replace_implicit_first_type(functype, leading_type)
elif has_self_type and isinstance(func.unanalyzed_type, CallableType):
if not isinstance(get_proper_type(func.unanalyzed_type.arg_types[0]), AnyType):
if self.is_expected_self_type(
self_type, func.is_class or func.name == "__new__"
):
# This error is off by default, since it is explicitly allowed
# by the PEP 673.
self.fail(
'Redundant "Self" annotation for the first method argument',
func,
code=codes.REDUNDANT_SELF_TYPE,
)
else:
self.fail(
"Method cannot have explicit self annotation and Self type", func
)
elif has_self_type:
self.fail("Static methods cannot use Self type", func)
def is_expected_self_type(self, typ: Type, is_classmethod: bool) -> bool:
"""Does this (analyzed or not) type represent the expected Self type for a method?"""
assert self.type is not None
typ = get_proper_type(typ)
if is_classmethod:
if isinstance(typ, TypeType):
return self.is_expected_self_type(typ.item, is_classmethod=False)
if isinstance(typ, UnboundType):
sym = self.lookup_qualified(typ.name, typ, suppress_errors=True)
if (
sym is not None
and (
sym.fullname == "typing.Type"
or (
sym.fullname == "builtins.type"
and (
self.is_stub_file
or self.is_future_flag_set("annotations")
or self.options.python_version >= (3, 9)
)
)
)
and typ.args
):
return self.is_expected_self_type(typ.args[0], is_classmethod=False)
return False
if isinstance(typ, TypeVarType):
return typ == self.type.self_type
if isinstance(typ, UnboundType):
sym = self.lookup_qualified(typ.name, typ, suppress_errors=True)
return sym is not None and sym.fullname in SELF_TYPE_NAMES
return False
def set_original_def(self, previous: Node | None, new: FuncDef | Decorator) -> bool:
"""If 'new' conditionally redefine 'previous', set 'previous' as original
We reject straight redefinitions of functions, as they are usually
a programming error. For example:
def f(): ...
def f(): ... # Error: 'f' redefined
"""
if isinstance(new, Decorator):
new = new.func
if (
isinstance(previous, (FuncDef, Decorator))
and unnamed_function(new.name)
and unnamed_function(previous.name)
):
return True
if isinstance(previous, (FuncDef, Var, Decorator)) and new.is_conditional:
new.original_def = previous
return True
else:
return False
def update_function_type_variables(self, fun_type: CallableType, defn: FuncItem) -> bool:
"""Make any type variables in the signature of defn explicit.
Update the signature of defn to contain type variable definitions
if defn is generic. Return True, if the signature contains typing.Self
type, or False otherwise.
"""
fullname = self.function_fullname(defn.fullname)
with self.tvar_scope_frame(self.tvar_scope.method_frame(fullname)):
a = self.type_analyzer()
fun_type.variables, has_self_type = a.bind_function_type_variables(fun_type, defn)
if has_self_type and self.type is not None:
self.setup_self_type()
if defn.type_args:
bound_fullnames = {v.fullname for v in fun_type.variables}
declared_fullnames = {self.qualified_name(p.name) for p in defn.type_args}
extra = sorted(bound_fullnames - declared_fullnames)
if extra:
self.msg.type_parameters_should_be_declared(
[n.split(".")[-1] for n in extra], defn
)
return has_self_type
def setup_self_type(self) -> None:
"""Setup a (shared) Self type variable for current class.
We intentionally don't add it to the class symbol table,
so it can be accessed only by mypy and will not cause
clashes with user defined names.
"""
assert self.type is not None
info = self.type
if info.self_type is not None:
if has_placeholder(info.self_type.upper_bound):
# Similar to regular (user defined) type variables.
self.process_placeholder(
None,
"Self upper bound",
info,
force_progress=info.self_type.upper_bound != fill_typevars(info),
)
else:
return
info.self_type = TypeVarType(
"Self",
f"{info.fullname}.Self",
id=TypeVarId(0), # 0 is a special value for self-types.
values=[],
upper_bound=fill_typevars(info),
default=AnyType(TypeOfAny.from_omitted_generics),
)
def visit_overloaded_func_def(self, defn: OverloadedFuncDef) -> None:
self.statement = defn
self.add_function_to_symbol_table(defn)
if not self.recurse_into_functions:
return
# NB: Since _visit_overloaded_func_def will call accept on the
# underlying FuncDefs, the function might get entered twice.
# This is fine, though, because only the outermost function is
# used to compute targets.
with self.scope.function_scope(defn):
self.analyze_overloaded_func_def(defn)
@contextmanager
def overload_item_set(self, item: int | None) -> Iterator[None]:
self.current_overload_item = item
try:
yield
finally:
self.current_overload_item = None
def analyze_overloaded_func_def(self, defn: OverloadedFuncDef) -> None:
# OverloadedFuncDef refers to any legitimate situation where you have
# more than one declaration for the same function in a row. This occurs
# with a @property with a setter or a deleter, and for a classic
# @overload.
defn._fullname = self.qualified_name(defn.name)
# TODO: avoid modifying items.
defn.items = defn.unanalyzed_items.copy()
first_item = defn.items[0]
first_item.is_overload = True
with self.overload_item_set(0):
first_item.accept(self)
if isinstance(first_item, Decorator) and first_item.func.is_property:
# This is a property.
first_item.func.is_overload = True
self.analyze_property_with_multi_part_definition(defn)
typ = function_type(first_item.func, self.named_type("builtins.function"))
assert isinstance(typ, CallableType)
types = [typ]
else:
# This is an a normal overload. Find the item signatures, the
# implementation (if outside a stub), and any missing @overload
# decorators.
types, impl, non_overload_indexes = self.analyze_overload_sigs_and_impl(defn)
defn.impl = impl
if non_overload_indexes:
self.handle_missing_overload_decorators(
defn, non_overload_indexes, some_overload_decorators=len(types) > 0
)
# If we found an implementation, remove it from the overload item list,
# as it's special.
if impl is not None:
assert impl is defn.items[-1]
defn.items = defn.items[:-1]
elif not non_overload_indexes:
self.handle_missing_overload_implementation(defn)
if types and not any(
# If some overload items are decorated with other decorators, then
# the overload type will be determined during type checking.
isinstance(it, Decorator) and len(it.decorators) > 1
for it in defn.items
):
# TODO: should we enforce decorated overloads consistency somehow?
# Some existing code uses both styles:
# * Put decorator only on implementation, use "effective" types in overloads
# * Put decorator everywhere, use "bare" types in overloads.
defn.type = Overloaded(types)
defn.type.line = defn.line
if not defn.items:
# It was not a real overload after all, but function redefinition. We've
# visited the redefinition(s) already.
if not defn.impl:
# For really broken overloads with no items and no implementation we need to keep
# at least one item to hold basic information like function name.
defn.impl = defn.unanalyzed_items[-1]
return
# We know this is an overload def. Infer properties and perform some checks.
self.process_final_in_overload(defn)
self.process_static_or_class_method_in_overload(defn)
self.process_overload_impl(defn)
def process_overload_impl(self, defn: OverloadedFuncDef) -> None:
"""Set flags for an overload implementation.
Currently, this checks for a trivial body in protocols classes,
where it makes the method implicitly abstract.
"""
if defn.impl is None:
return
impl = defn.impl if isinstance(defn.impl, FuncDef) else defn.impl.func
if is_trivial_body(impl.body) and self.is_class_scope() and not self.is_stub_file:
assert self.type is not None
if self.type.is_protocol:
impl.abstract_status = IMPLICITLY_ABSTRACT
if impl.abstract_status != NOT_ABSTRACT:
impl.is_trivial_body = True
def analyze_overload_sigs_and_impl(
self, defn: OverloadedFuncDef
) -> tuple[list[CallableType], OverloadPart | None, list[int]]:
"""Find overload signatures, the implementation, and items with missing @overload.
Assume that the first was already analyzed. As a side effect:
analyzes remaining items and updates 'is_overload' flags.
"""
types = []
non_overload_indexes = []
impl: OverloadPart | None = None
for i, item in enumerate(defn.items):
if i != 0:
# Assume that the first item was already visited
item.is_overload = True
with self.overload_item_set(i if i < len(defn.items) - 1 else None):
item.accept(self)
# TODO: support decorated overloaded functions properly
if isinstance(item, Decorator):
callable = function_type(item.func, self.named_type("builtins.function"))
assert isinstance(callable, CallableType)
if not any(refers_to_fullname(dec, OVERLOAD_NAMES) for dec in item.decorators):
if i == len(defn.items) - 1 and not self.is_stub_file:
# Last item outside a stub is impl
impl = item
else:
# Oops it wasn't an overload after all. A clear error
# will vary based on where in the list it is, record
# that.
non_overload_indexes.append(i)
else:
item.func.is_overload = True
types.append(callable)
if item.var.is_property:
self.fail("An overload can not be a property", item)
# If any item was decorated with `@override`, the whole overload
# becomes an explicit override.
defn.is_explicit_override |= item.func.is_explicit_override
elif isinstance(item, FuncDef):
if i == len(defn.items) - 1 and not self.is_stub_file:
impl = item
else:
non_overload_indexes.append(i)
return types, impl, non_overload_indexes
def handle_missing_overload_decorators(
self,
defn: OverloadedFuncDef,
non_overload_indexes: list[int],
some_overload_decorators: bool,
) -> None:
"""Generate errors for overload items without @overload.
Side effect: remote non-overload items.
"""
if some_overload_decorators:
# Some of them were overloads, but not all.
for idx in non_overload_indexes:
if self.is_stub_file:
self.fail(
"An implementation for an overloaded function "
"is not allowed in a stub file",
defn.items[idx],
)
else:
self.fail(
"The implementation for an overloaded function must come last",
defn.items[idx],
)
else:
for idx in non_overload_indexes[1:]:
self.name_already_defined(defn.name, defn.items[idx], defn.items[0])
if defn.impl:
self.name_already_defined(defn.name, defn.impl, defn.items[0])
# Remove the non-overloads
for idx in reversed(non_overload_indexes):
del defn.items[idx]
def handle_missing_overload_implementation(self, defn: OverloadedFuncDef) -> None:
"""Generate error about missing overload implementation (only if needed)."""
if not self.is_stub_file:
if self.type and self.type.is_protocol and not self.is_func_scope():
# An overloaded protocol method doesn't need an implementation,
# but if it doesn't have one, then it is considered abstract.
for item in defn.items:
if isinstance(item, Decorator):
item.func.abstract_status = IS_ABSTRACT
else:
item.abstract_status = IS_ABSTRACT
else:
# TODO: also allow omitting an implementation for abstract methods in ABCs?
self.fail(
"An overloaded function outside a stub file must have an implementation",
defn,
code=codes.NO_OVERLOAD_IMPL,
)
def process_final_in_overload(self, defn: OverloadedFuncDef) -> None:
"""Detect the @final status of an overloaded function (and perform checks)."""
# If the implementation is marked as @final (or the first overload in
# stubs), then the whole overloaded definition if @final.
if any(item.is_final for item in defn.items):
# We anyway mark it as final because it was probably the intention.
defn.is_final = True
# Only show the error once per overload
bad_final = next(ov for ov in defn.items if ov.is_final)
if not self.is_stub_file:
self.fail("@final should be applied only to overload implementation", bad_final)
elif any(item.is_final for item in defn.items[1:]):
bad_final = next(ov for ov in defn.items[1:] if ov.is_final)
self.fail(
"In a stub file @final must be applied only to the first overload", bad_final
)
if defn.impl is not None and defn.impl.is_final:
defn.is_final = True
def process_static_or_class_method_in_overload(self, defn: OverloadedFuncDef) -> None:
class_status = []
static_status = []
for item in defn.items:
if isinstance(item, Decorator):
inner = item.func
elif isinstance(item, FuncDef):
inner = item
else:
assert False, f"The 'item' variable is an unexpected type: {type(item)}"
class_status.append(inner.is_class)
static_status.append(inner.is_static)
if defn.impl is not None:
if isinstance(defn.impl, Decorator):
inner = defn.impl.func
elif isinstance(defn.impl, FuncDef):
inner = defn.impl
else:
assert False, f"Unexpected impl type: {type(defn.impl)}"
class_status.append(inner.is_class)
static_status.append(inner.is_static)
if len(set(class_status)) != 1:
self.msg.overload_inconsistently_applies_decorator("classmethod", defn)
elif len(set(static_status)) != 1:
self.msg.overload_inconsistently_applies_decorator("staticmethod", defn)
else:
defn.is_class = class_status[0]
defn.is_static = static_status[0]
def analyze_property_with_multi_part_definition(self, defn: OverloadedFuncDef) -> None:
"""Analyze a property defined using multiple methods (e.g., using @x.setter).
Assume that the first method (@property) has already been analyzed.
"""
defn.is_property = True
items = defn.items
first_item = defn.items[0]
assert isinstance(first_item, Decorator)
deleted_items = []
for i, item in enumerate(items[1:]):
if isinstance(item, Decorator):
if len(item.decorators) >= 1:
node = item.decorators[0]
if isinstance(node, MemberExpr):
if node.name == "setter":
# The first item represents the entire property.
first_item.var.is_settable_property = True
# Get abstractness from the original definition.
item.func.abstract_status = first_item.func.abstract_status
if node.name == "deleter":
item.func.abstract_status = first_item.func.abstract_status
else:
self.fail(
f"Only supported top decorator is @{first_item.func.name}.setter", item
)
item.func.accept(self)
else:
self.fail(f'Unexpected definition for property "{first_item.func.name}"', item)
deleted_items.append(i + 1)
for i in reversed(deleted_items):
del items[i]
def add_function_to_symbol_table(self, func: FuncDef | OverloadedFuncDef) -> None:
if self.is_class_scope():
assert self.type is not None
func.info = self.type
func._fullname = self.qualified_name(func.name)
self.add_symbol(func.name, func, func)
def analyze_arg_initializers(self, defn: FuncItem) -> None:
fullname = self.function_fullname(defn.fullname)
with self.tvar_scope_frame(self.tvar_scope.method_frame(fullname)):
# Analyze default arguments
for arg in defn.arguments:
if arg.initializer:
arg.initializer.accept(self)
def analyze_function_body(self, defn: FuncItem) -> None:
is_method = self.is_class_scope()
fullname = self.function_fullname(defn.fullname)
with self.tvar_scope_frame(self.tvar_scope.method_frame(fullname)):
# Bind the type variables again to visit the body.
if defn.type:
a = self.type_analyzer()
typ = defn.type
assert isinstance(typ, CallableType)
a.bind_function_type_variables(typ, defn)
for i in range(len(typ.arg_types)):
store_argument_type(defn, i, typ, self.named_type)
self.function_stack.append(defn)
with self.enter(defn):
for arg in defn.arguments:
self.add_local(arg.variable, defn)
# The first argument of a non-static, non-class method is like 'self'
# (though the name could be different), having the enclosing class's
# instance type.
if is_method and (not defn.is_static or defn.name == "__new__") and defn.arguments:
if not defn.is_class:
defn.arguments[0].variable.is_self = True
else:
defn.arguments[0].variable.is_cls = True
defn.body.accept(self)
self.function_stack.pop()
def check_classvar_in_signature(self, typ: ProperType) -> None:
t: ProperType
if isinstance(typ, Overloaded):
for t in typ.items:
self.check_classvar_in_signature(t)
return
if not isinstance(typ, CallableType):
return
for t in get_proper_types(typ.arg_types) + [get_proper_type(typ.ret_type)]:
if self.is_classvar(t):
self.fail_invalid_classvar(t)
# Show only one error per signature
break
def check_function_signature(self, fdef: FuncItem) -> None:
sig = fdef.type
assert isinstance(sig, CallableType)
if len(sig.arg_types) < len(fdef.arguments):
self.fail("Type signature has too few arguments", fdef)
# Add dummy Any arguments to prevent crashes later.
num_extra_anys = len(fdef.arguments) - len(sig.arg_types)
extra_anys = [AnyType(TypeOfAny.from_error)] * num_extra_anys
sig.arg_types.extend(extra_anys)
elif len(sig.arg_types) > len(fdef.arguments):
self.fail("Type signature has too many arguments", fdef, blocker=True)
def check_paramspec_definition(self, defn: FuncDef) -> None:
func = defn.type
assert isinstance(func, CallableType)
if not any(isinstance(var, ParamSpecType) for var in func.variables):
return # Function does not have param spec variables
args = func.var_arg()
kwargs = func.kw_arg()
if args is None and kwargs is None:
return # Looks like this function does not have starred args
args_defn_type = None
kwargs_defn_type = None
for arg_def, arg_kind in zip(defn.arguments, defn.arg_kinds):
if arg_kind == ARG_STAR:
args_defn_type = arg_def.type_annotation
elif arg_kind == ARG_STAR2:
kwargs_defn_type = arg_def.type_annotation
# This may happen on invalid `ParamSpec` args / kwargs definition,
# type analyzer sets types of arguments to `Any`, but keeps
# definition types as `UnboundType` for now.
if not (
(isinstance(args_defn_type, UnboundType) and args_defn_type.name.endswith(".args"))
or (
isinstance(kwargs_defn_type, UnboundType)
and kwargs_defn_type.name.endswith(".kwargs")
)
):
# Looks like both `*args` and `**kwargs` are not `ParamSpec`
# It might be something else, skipping.
return
args_type = args.typ if args is not None else None
kwargs_type = kwargs.typ if kwargs is not None else None
if (
not isinstance(args_type, ParamSpecType)
or not isinstance(kwargs_type, ParamSpecType)
or args_type.name != kwargs_type.name
):
if isinstance(args_defn_type, UnboundType) and args_defn_type.name.endswith(".args"):
param_name = args_defn_type.name.split(".")[0]
elif isinstance(kwargs_defn_type, UnboundType) and kwargs_defn_type.name.endswith(
".kwargs"
):
param_name = kwargs_defn_type.name.split(".")[0]
else:
# Fallback for cases that probably should not ever happen:
param_name = "P"
self.fail(
f'ParamSpec must have "*args" typed as "{param_name}.args" and "**kwargs" typed as "{param_name}.kwargs"',
func,
code=codes.VALID_TYPE,
)
def visit_decorator(self, dec: Decorator) -> None:
self.statement = dec
# TODO: better don't modify them at all.
dec.decorators = dec.original_decorators.copy()
dec.func.is_conditional = self.block_depth[-1] > 0
if not dec.is_overload:
self.add_symbol(dec.name, dec, dec)
dec.func._fullname = self.qualified_name(dec.name)
dec.var._fullname = self.qualified_name(dec.name)
for d in dec.decorators:
d.accept(self)
removed: list[int] = []
no_type_check = False
could_be_decorated_property = False
for i, d in enumerate(dec.decorators):
# A bunch of decorators are special cased here.
if refers_to_fullname(d, "abc.abstractmethod"):
removed.append(i)
dec.func.abstract_status = IS_ABSTRACT
self.check_decorated_function_is_method("abstractmethod", dec)
elif refers_to_fullname(d, ("asyncio.coroutines.coroutine", "types.coroutine")):
removed.append(i)
dec.func.is_awaitable_coroutine = True
elif refers_to_fullname(d, "builtins.staticmethod"):
removed.append(i)
dec.func.is_static = True
dec.var.is_staticmethod = True
self.check_decorated_function_is_method("staticmethod", dec)
elif refers_to_fullname(d, "builtins.classmethod"):
removed.append(i)
dec.func.is_class = True
dec.var.is_classmethod = True
self.check_decorated_function_is_method("classmethod", dec)
elif refers_to_fullname(d, OVERRIDE_DECORATOR_NAMES):
removed.append(i)
dec.func.is_explicit_override = True
self.check_decorated_function_is_method("override", dec)
elif refers_to_fullname(
d,
(
"builtins.property",
"abc.abstractproperty",
"functools.cached_property",
"enum.property",
),
):
removed.append(i)
dec.func.is_property = True
dec.var.is_property = True
if refers_to_fullname(d, "abc.abstractproperty"):
dec.func.abstract_status = IS_ABSTRACT
elif refers_to_fullname(d, "functools.cached_property"):
dec.var.is_settable_property = True
self.check_decorated_function_is_method("property", dec)
elif refers_to_fullname(d, "typing.no_type_check"):
dec.var.type = AnyType(TypeOfAny.special_form)
no_type_check = True
elif refers_to_fullname(d, FINAL_DECORATOR_NAMES):
if self.is_class_scope():
assert self.type is not None, "No type set at class scope"
if self.type.is_protocol:
self.msg.protocol_members_cant_be_final(d)
else:
dec.func.is_final = True
dec.var.is_final = True
removed.append(i)
else:
self.fail("@final cannot be used with non-method functions", d)
elif refers_to_fullname(d, TYPE_CHECK_ONLY_NAMES):
# TODO: support `@overload` funcs.
dec.func.is_type_check_only = True
elif isinstance(d, CallExpr) and refers_to_fullname(
d.callee, DATACLASS_TRANSFORM_NAMES
):
dec.func.dataclass_transform_spec = self.parse_dataclass_transform_spec(d)
elif not dec.var.is_property:
# We have seen a "non-trivial" decorator before seeing @property, if
# we will see a @property later, give an error, as we don't support this.
could_be_decorated_property = True
for i in reversed(removed):
del dec.decorators[i]
if (not dec.is_overload or dec.var.is_property) and self.type:
dec.var.info = self.type
dec.var.is_initialized_in_class = True
if not no_type_check and self.recurse_into_functions:
dec.func.accept(self)
if could_be_decorated_property and dec.decorators and dec.var.is_property:
self.fail(
"Decorators on top of @property are not supported", dec, code=PROPERTY_DECORATOR
)
if (dec.func.is_static or dec.func.is_class) and dec.var.is_property:
self.fail("Only instance methods can be decorated with @property", dec)
if dec.func.abstract_status == IS_ABSTRACT and dec.func.is_final:
self.fail(f"Method {dec.func.name} is both abstract and final", dec)
if dec.func.is_static and dec.func.is_class:
self.fail(message_registry.CLASS_PATTERN_CLASS_OR_STATIC_METHOD, dec)
def check_decorated_function_is_method(self, decorator: str, context: Context) -> None:
if not self.type or self.is_func_scope():
self.fail(f'"{decorator}" used with a non-method', context)
#
# Classes
#
def visit_class_def(self, defn: ClassDef) -> None:
self.statement = defn
self.incomplete_type_stack.append(not defn.info)
namespace = self.qualified_name(defn.name)
with self.tvar_scope_frame(self.tvar_scope.class_frame(namespace)):
if self.push_type_args(defn.type_args, defn) is None:
self.mark_incomplete(defn.name, defn)
return
self.analyze_class(defn)
self.pop_type_args(defn.type_args)
self.incomplete_type_stack.pop()
def push_type_args(
self, type_args: list[TypeParam] | None, context: Context
) -> list[tuple[str, TypeVarLikeExpr]] | None:
if not type_args:
return []
self.locals.append(SymbolTable())
self.scope_stack.append(SCOPE_ANNOTATION)
tvs: list[tuple[str, TypeVarLikeExpr]] = []
for p in type_args:
tv = self.analyze_type_param(p, context)
if tv is None:
return None
tvs.append((p.name, tv))
for name, tv in tvs:
if self.is_defined_type_param(name):
self.fail(f'"{name}" already defined as a type parameter', context)
else:
self.add_symbol(name, tv, context, no_progress=True, type_param=True)
return tvs
def is_defined_type_param(self, name: str) -> bool:
for names in self.locals:
if names is None:
continue
if name in names:
node = names[name].node
if isinstance(node, TypeVarLikeExpr):
return True
return False
def analyze_type_param(
self, type_param: TypeParam, context: Context
) -> TypeVarLikeExpr | None:
fullname = self.qualified_name(type_param.name)
if type_param.upper_bound:
upper_bound = self.anal_type(type_param.upper_bound, allow_placeholder=True)
# TODO: we should validate the upper bound is valid for a given kind.
if upper_bound is None:
# This and below copies special-casing for old-style type variables, that
# is equally necessary for new-style classes to break a vicious circle.
upper_bound = PlaceholderType(None, [], context.line)
else:
if type_param.kind == TYPE_VAR_TUPLE_KIND:
upper_bound = self.named_type("builtins.tuple", [self.object_type()])
else:
upper_bound = self.object_type()
default = AnyType(TypeOfAny.from_omitted_generics)
if type_param.kind == TYPE_VAR_KIND:
values = []
if type_param.values:
for value in type_param.values:
analyzed = self.anal_type(value, allow_placeholder=True)
if analyzed is None:
analyzed = PlaceholderType(None, [], context.line)
values.append(analyzed)
return TypeVarExpr(
name=type_param.name,
fullname=fullname,
values=values,
upper_bound=upper_bound,
default=default,
variance=VARIANCE_NOT_READY,
is_new_style=True,
line=context.line,
)
elif type_param.kind == PARAM_SPEC_KIND:
return ParamSpecExpr(
name=type_param.name,
fullname=fullname,
upper_bound=upper_bound,
default=default,
is_new_style=True,
line=context.line,
)
else:
assert type_param.kind == TYPE_VAR_TUPLE_KIND
tuple_fallback = self.named_type("builtins.tuple", [self.object_type()])
return TypeVarTupleExpr(
name=type_param.name,
fullname=fullname,
upper_bound=upper_bound,
tuple_fallback=tuple_fallback,
default=default,
is_new_style=True,
line=context.line,
)
def pop_type_args(self, type_args: list[TypeParam] | None) -> None:
if not type_args:
return
self.locals.pop()
self.scope_stack.pop()
def analyze_class(self, defn: ClassDef) -> None:
fullname = self.qualified_name(defn.name)
if not defn.info and not self.is_core_builtin_class(defn):
# Add placeholder so that self-references in base classes can be
# resolved. We don't want this to cause a deferral, since if there
# are no incomplete references, we'll replace this with a TypeInfo
# before returning.
placeholder = PlaceholderNode(fullname, defn, defn.line, becomes_typeinfo=True)
self.add_symbol(defn.name, placeholder, defn, can_defer=False)
tag = self.track_incomplete_refs()
# Restore base classes after previous iteration (things like Generic[T] might be removed).
defn.base_type_exprs.extend(defn.removed_base_type_exprs)
defn.removed_base_type_exprs.clear()
self.infer_metaclass_and_bases_from_compat_helpers(defn)
bases = defn.base_type_exprs
bases, tvar_defs, is_protocol = self.clean_up_bases_and_infer_type_variables(
defn, bases, context=defn
)
self.check_type_alias_bases(bases)
for tvd in tvar_defs:
if isinstance(tvd, TypeVarType) and any(
has_placeholder(t) for t in [tvd.upper_bound] + tvd.values
):
# Some type variable bounds or values are not ready, we need
# to re-analyze this class.
self.defer()
if has_placeholder(tvd.default):
# Placeholder values in TypeVarLikeTypes may get substituted in.
# Defer current target until they are ready.
self.mark_incomplete(defn.name, defn)
return
self.analyze_class_keywords(defn)
bases_result = self.analyze_base_classes(bases)
if bases_result is None or self.found_incomplete_ref(tag):
# Something was incomplete. Defer current target.
self.mark_incomplete(defn.name, defn)
return
base_types, base_error = bases_result
if any(isinstance(base, PlaceholderType) for base, _ in base_types):
# We need to know the TypeInfo of each base to construct the MRO. Placeholder types
# are okay in nested positions, since they can't affect the MRO.
self.mark_incomplete(defn.name, defn)
return
declared_metaclass, should_defer, any_meta = self.get_declared_metaclass(
defn.name, defn.metaclass
)
if should_defer or self.found_incomplete_ref(tag):
# Metaclass was not ready. Defer current target.
self.mark_incomplete(defn.name, defn)
return
if self.analyze_typeddict_classdef(defn):
if defn.info:
self.setup_type_vars(defn, tvar_defs)
self.setup_alias_type_vars(defn)
return
if self.analyze_namedtuple_classdef(defn, tvar_defs):
return
# Create TypeInfo for class now that base classes and the MRO can be calculated.
self.prepare_class_def(defn)
self.setup_type_vars(defn, tvar_defs)
if base_error:
defn.info.fallback_to_any = True
if any_meta:
defn.info.meta_fallback_to_any = True
with self.scope.class_scope(defn.info):
self.configure_base_classes(defn, base_types)
defn.info.is_protocol = is_protocol
self.recalculate_metaclass(defn, declared_metaclass)
defn.info.runtime_protocol = False
if defn.type_args:
# PEP 695 type parameters are not in scope in class decorators, so
# temporarily disable type parameter namespace.
type_params_names = self.locals.pop()
self.scope_stack.pop()
for decorator in defn.decorators:
self.analyze_class_decorator(defn, decorator)
if defn.type_args:
self.locals.append(type_params_names)
self.scope_stack.append(SCOPE_ANNOTATION)
self.analyze_class_body_common(defn)
def check_type_alias_bases(self, bases: list[Expression]) -> None:
for base in bases:
if isinstance(base, IndexExpr):
base = base.base
if (
isinstance(base, RefExpr)
and isinstance(base.node, TypeAlias)
and base.node.python_3_12_type_alias
):
self.fail(
'Type alias defined using "type" statement not valid as base class', base
)
def setup_type_vars(self, defn: ClassDef, tvar_defs: list[TypeVarLikeType]) -> None:
defn.type_vars = tvar_defs
defn.info.type_vars = []
# we want to make sure any additional logic in add_type_vars gets run
defn.info.add_type_vars()
def setup_alias_type_vars(self, defn: ClassDef) -> None:
assert defn.info.special_alias is not None
defn.info.special_alias.alias_tvars = list(defn.type_vars)
# It is a bit unfortunate that we need to inline some logic from TypeAlias constructor,
# but it is required, since type variables may change during semantic analyzer passes.
for i, t in enumerate(defn.type_vars):
if isinstance(t, TypeVarTupleType):
defn.info.special_alias.tvar_tuple_index = i
target = defn.info.special_alias.target
assert isinstance(target, ProperType)
if isinstance(target, TypedDictType):
target.fallback.args = type_vars_as_args(defn.type_vars)
elif isinstance(target, TupleType):
target.partial_fallback.args = type_vars_as_args(defn.type_vars)
else:
assert False, f"Unexpected special alias type: {type(target)}"
def is_core_builtin_class(self, defn: ClassDef) -> bool:
return self.cur_mod_id == "builtins" and defn.name in CORE_BUILTIN_CLASSES
def analyze_class_body_common(self, defn: ClassDef) -> None:
"""Parts of class body analysis that are common to all kinds of class defs."""
self.enter_class(defn.info)
if any(b.self_type is not None for b in defn.info.mro):
self.setup_self_type()
defn.defs.accept(self)
self.apply_class_plugin_hooks(defn)
self.leave_class()
def analyze_typeddict_classdef(self, defn: ClassDef) -> bool:
if (
defn.info
and defn.info.typeddict_type
and not has_placeholder(defn.info.typeddict_type)
):
# This is a valid TypedDict, and it is fully analyzed.
return True
is_typeddict, info = self.typed_dict_analyzer.analyze_typeddict_classdef(defn)
if is_typeddict:
for decorator in defn.decorators:
decorator.accept(self)
if info is not None:
self.analyze_class_decorator_common(defn, info, decorator)
if info is None:
self.mark_incomplete(defn.name, defn)
else:
self.prepare_class_def(defn, info, custom_names=True)
return True
return False
def analyze_namedtuple_classdef(
self, defn: ClassDef, tvar_defs: list[TypeVarLikeType]
) -> bool:
"""Check if this class can define a named tuple."""
if (
defn.info
and defn.info.is_named_tuple
and defn.info.tuple_type
and not has_placeholder(defn.info.tuple_type)
):
# Don't reprocess everything. We just need to process methods defined
# in the named tuple class body.
is_named_tuple = True
info: TypeInfo | None = defn.info
else:
is_named_tuple, info = self.named_tuple_analyzer.analyze_namedtuple_classdef(
defn, self.is_stub_file, self.is_func_scope()
)
if is_named_tuple:
if info is None:
self.mark_incomplete(defn.name, defn)
else:
self.prepare_class_def(defn, info, custom_names=True)
self.setup_type_vars(defn, tvar_defs)
self.setup_alias_type_vars(defn)
with self.scope.class_scope(defn.info):
for deco in defn.decorators:
deco.accept(self)
self.analyze_class_decorator_common(defn, defn.info, deco)
with self.named_tuple_analyzer.save_namedtuple_body(info):
self.analyze_class_body_common(defn)
return True
return False
def apply_class_plugin_hooks(self, defn: ClassDef) -> None:
"""Apply a plugin hook that may infer a more precise definition for a class."""
for decorator in defn.decorators:
decorator_name = self.get_fullname_for_hook(decorator)
if decorator_name:
hook = self.plugin.get_class_decorator_hook(decorator_name)
# Special case: if the decorator is itself decorated with
# typing.dataclass_transform, apply the hook for the dataclasses plugin
# TODO: remove special casing here
if hook is None and find_dataclass_transform_spec(decorator):
hook = dataclasses_plugin.dataclass_tag_callback
if hook:
hook(ClassDefContext(defn, decorator, self))
if defn.metaclass:
metaclass_name = self.get_fullname_for_hook(defn.metaclass)
if metaclass_name:
hook = self.plugin.get_metaclass_hook(metaclass_name)
if hook:
hook(ClassDefContext(defn, defn.metaclass, self))
for base_expr in defn.base_type_exprs:
base_name = self.get_fullname_for_hook(base_expr)
if base_name:
hook = self.plugin.get_base_class_hook(base_name)
if hook:
hook(ClassDefContext(defn, base_expr, self))
# Check if the class definition itself triggers a dataclass transform (via a parent class/
# metaclass)
spec = find_dataclass_transform_spec(defn)
if spec is not None:
dataclasses_plugin.add_dataclass_tag(defn.info)
def get_fullname_for_hook(self, expr: Expression) -> str | None:
if isinstance(expr, CallExpr):
return self.get_fullname_for_hook(expr.callee)
elif isinstance(expr, IndexExpr):
return self.get_fullname_for_hook(expr.base)
elif isinstance(expr, RefExpr):
if expr.fullname:
return expr.fullname
# If we don't have a fullname look it up. This happens because base classes are
# analyzed in a different manner (see exprtotype.py) and therefore those AST
# nodes will not have full names.
sym = self.lookup_type_node(expr)
if sym:
return sym.fullname
return None
def analyze_class_keywords(self, defn: ClassDef) -> None:
for value in defn.keywords.values():
value.accept(self)
def enter_class(self, info: TypeInfo) -> None:
# Remember previous active class
self.type_stack.append(self.type)
self.locals.append(None) # Add class scope
self.scope_stack.append(SCOPE_CLASS)
self.block_depth.append(-1) # The class body increments this to 0
self.loop_depth.append(0)
self._type = info
self.missing_names.append(set())
def leave_class(self) -> None:
"""Restore analyzer state."""
self.block_depth.pop()
self.loop_depth.pop()
self.locals.pop()
self.scope_stack.pop()
self._type = self.type_stack.pop()
self.missing_names.pop()
def analyze_class_decorator(self, defn: ClassDef, decorator: Expression) -> None:
decorator.accept(self)
self.analyze_class_decorator_common(defn, defn.info, decorator)
if isinstance(decorator, RefExpr):
if decorator.fullname in RUNTIME_PROTOCOL_DECOS:
if defn.info.is_protocol:
defn.info.runtime_protocol = True
else:
self.fail("@runtime_checkable can only be used with protocol classes", defn)
elif isinstance(decorator, CallExpr) and refers_to_fullname(
decorator.callee, DATACLASS_TRANSFORM_NAMES
):
defn.info.dataclass_transform_spec = self.parse_dataclass_transform_spec(decorator)
def analyze_class_decorator_common(
self, defn: ClassDef, info: TypeInfo, decorator: Expression
) -> None:
"""Common method for applying class decorators.
Called on regular classes, typeddicts, and namedtuples.
"""
if refers_to_fullname(decorator, FINAL_DECORATOR_NAMES):
info.is_final = True
elif refers_to_fullname(decorator, TYPE_CHECK_ONLY_NAMES):
info.is_type_check_only = True
def clean_up_bases_and_infer_type_variables(
self, defn: ClassDef, base_type_exprs: list[Expression], context: Context
) -> tuple[list[Expression], list[TypeVarLikeType], bool]:
"""Remove extra base classes such as Generic and infer type vars.
For example, consider this class:
class Foo(Bar, Generic[T]): ...
Now we will remove Generic[T] from bases of Foo and infer that the
type variable 'T' is a type argument of Foo.
Note that this is performed *before* semantic analysis.
Returns (remaining base expressions, inferred type variables, is protocol).
"""
removed: list[int] = []
declared_tvars: TypeVarLikeList = []
is_protocol = False
if defn.type_args is not None:
for p in defn.type_args:
node = self.lookup(p.name, context)
assert node is not None
assert isinstance(node.node, TypeVarLikeExpr)
declared_tvars.append((p.name, node.node))
for i, base_expr in enumerate(base_type_exprs):
if isinstance(base_expr, StarExpr):
base_expr.valid = True
self.analyze_type_expr(base_expr)
try:
base = self.expr_to_unanalyzed_type(base_expr)
except TypeTranslationError:
# This error will be caught later.
continue
result = self.analyze_class_typevar_declaration(base)
if result is not None:
tvars = result[0]
is_protocol |= result[1]
if declared_tvars:
if defn.type_args:
if is_protocol:
self.fail('No arguments expected for "Protocol" base class', context)
else:
self.fail("Generic[...] base class is redundant", context)
else:
self.fail(
"Only single Generic[...] or Protocol[...] can be in bases", context
)
removed.append(i)
declared_tvars.extend(tvars)
if isinstance(base, UnboundType):
sym = self.lookup_qualified(base.name, base)
if sym is not None and sym.node is not None:
if sym.node.fullname in PROTOCOL_NAMES and i not in removed:
# also remove bare 'Protocol' bases
removed.append(i)
is_protocol = True
all_tvars = self.get_all_bases_tvars(base_type_exprs, removed)
if declared_tvars:
if len(remove_dups(declared_tvars)) < len(declared_tvars) and not defn.type_args:
self.fail("Duplicate type variables in Generic[...] or Protocol[...]", context)
declared_tvars = remove_dups(declared_tvars)
if not set(all_tvars).issubset(set(declared_tvars)):
if defn.type_args:
undeclared = sorted(set(all_tvars) - set(declared_tvars))
self.msg.type_parameters_should_be_declared(
[tv[0] for tv in undeclared], context
)
else:
self.fail(
"If Generic[...] or Protocol[...] is present"
" it should list all type variables",
context,
)
# In case of error, Generic tvars will go first
declared_tvars = remove_dups(declared_tvars + all_tvars)
else:
declared_tvars = all_tvars
for i in reversed(removed):
# We need to actually remove the base class expressions like Generic[T],
# mostly because otherwise they will create spurious dependencies in fine
# grained incremental mode.
defn.removed_base_type_exprs.append(defn.base_type_exprs[i])
del base_type_exprs[i]
tvar_defs: list[TypeVarLikeType] = []
last_tvar_name_with_default: str | None = None
for name, tvar_expr in declared_tvars:
tvar_expr.default = tvar_expr.default.accept(
TypeVarDefaultTranslator(self, tvar_expr.name, context)
)
tvar_def = self.tvar_scope.bind_new(name, tvar_expr)
if last_tvar_name_with_default is not None and not tvar_def.has_default():
self.msg.tvar_without_default_type(
tvar_def.name, last_tvar_name_with_default, context
)
tvar_def.default = AnyType(TypeOfAny.from_error)
elif tvar_def.has_default():
last_tvar_name_with_default = tvar_def.name
tvar_defs.append(tvar_def)
return base_type_exprs, tvar_defs, is_protocol
def analyze_class_typevar_declaration(self, base: Type) -> tuple[TypeVarLikeList, bool] | None:
"""Analyze type variables declared using Generic[...] or Protocol[...].
Args:
base: Non-analyzed base class
Return None if the base class does not declare type variables. Otherwise,
return the type variables.
"""
if not isinstance(base, UnboundType):
return None
unbound = base
sym = self.lookup_qualified(unbound.name, unbound)
if sym is None or sym.node is None:
return None
if (
sym.node.fullname == "typing.Generic"
or sym.node.fullname in PROTOCOL_NAMES
and base.args
):
is_proto = sym.node.fullname != "typing.Generic"
tvars: TypeVarLikeList = []
have_type_var_tuple = False
for arg in unbound.args:
tag = self.track_incomplete_refs()
tvar = self.analyze_unbound_tvar(arg)
if tvar:
if isinstance(tvar[1], TypeVarTupleExpr):
if have_type_var_tuple:
self.fail("Can only use one type var tuple in a class def", base)
continue
have_type_var_tuple = True
tvars.append(tvar)
elif not self.found_incomplete_ref(tag):
self.fail("Free type variable expected in %s[...]" % sym.node.name, base)
return tvars, is_proto
return None
def analyze_unbound_tvar(self, t: Type) -> tuple[str, TypeVarLikeExpr] | None:
if isinstance(t, UnpackType) and isinstance(t.type, UnboundType):
return self.analyze_unbound_tvar_impl(t.type, is_unpacked=True)
if isinstance(t, UnboundType):
sym = self.lookup_qualified(t.name, t)
if sym and sym.fullname in ("typing.Unpack", "typing_extensions.Unpack"):
inner_t = t.args[0]
if isinstance(inner_t, UnboundType):
return self.analyze_unbound_tvar_impl(inner_t, is_unpacked=True)
return None
return self.analyze_unbound_tvar_impl(t)
return None
def analyze_unbound_tvar_impl(
self, t: UnboundType, is_unpacked: bool = False, is_typealias_param: bool = False
) -> tuple[str, TypeVarLikeExpr] | None:
assert not is_unpacked or not is_typealias_param, "Mutually exclusive conditions"
sym = self.lookup_qualified(t.name, t)
if sym and isinstance(sym.node, PlaceholderNode):
self.record_incomplete_ref()
if not is_unpacked and sym and isinstance(sym.node, ParamSpecExpr):
if sym.fullname and not self.tvar_scope.allow_binding(sym.fullname):
# It's bound by our type variable scope
return None
return t.name, sym.node
if (is_unpacked or is_typealias_param) and sym and isinstance(sym.node, TypeVarTupleExpr):
if sym.fullname and not self.tvar_scope.allow_binding(sym.fullname):
# It's bound by our type variable scope
return None
return t.name, sym.node
if sym is None or not isinstance(sym.node, TypeVarExpr) or is_unpacked:
return None
elif sym.fullname and not self.tvar_scope.allow_binding(sym.fullname):
# It's bound by our type variable scope
return None
else:
assert isinstance(sym.node, TypeVarExpr)
return t.name, sym.node
def find_type_var_likes(self, t: Type) -> TypeVarLikeList:
visitor = FindTypeVarVisitor(self, self.tvar_scope)
t.accept(visitor)
return visitor.type_var_likes
def get_all_bases_tvars(
self, base_type_exprs: list[Expression], removed: list[int]
) -> TypeVarLikeList:
"""Return all type variable references in bases."""
tvars: TypeVarLikeList = []
for i, base_expr in enumerate(base_type_exprs):
if i not in removed:
try:
base = self.expr_to_unanalyzed_type(base_expr)
except TypeTranslationError:
# This error will be caught later.
continue
base_tvars = self.find_type_var_likes(base)
tvars.extend(base_tvars)
return remove_dups(tvars)
def get_and_bind_all_tvars(self, type_exprs: list[Expression]) -> list[TypeVarLikeType]:
"""Return all type variable references in item type expressions.
This is a helper for generic TypedDicts and NamedTuples. Essentially it is
a simplified version of the logic we use for ClassDef bases. We duplicate
some amount of code, because it is hard to refactor common pieces.
"""
tvars = []
for base_expr in type_exprs:
try:
base = self.expr_to_unanalyzed_type(base_expr)
except TypeTranslationError:
# This error will be caught later.
continue
base_tvars = self.find_type_var_likes(base)
tvars.extend(base_tvars)
tvars = remove_dups(tvars) # Variables are defined in order of textual appearance.
tvar_defs = []
for name, tvar_expr in tvars:
tvar_def = self.tvar_scope.bind_new(name, tvar_expr)
tvar_defs.append(tvar_def)
return tvar_defs
def prepare_class_def(
self, defn: ClassDef, info: TypeInfo | None = None, custom_names: bool = False
) -> None:
"""Prepare for the analysis of a class definition.
Create an empty TypeInfo and store it in a symbol table, or if the 'info'
argument is provided, store it instead (used for magic type definitions).
"""
if not defn.info:
defn.fullname = self.qualified_name(defn.name)
# TODO: Nested classes
info = info or self.make_empty_type_info(defn)
defn.info = info
info.defn = defn
if not custom_names:
# Some special classes (in particular NamedTuples) use custom fullname logic.
# Don't override it here (also see comment below, this needs cleanup).
if not self.is_func_scope():
info._fullname = self.qualified_name(defn.name)
else:
info._fullname = info.name
local_name = defn.name
if "@" in local_name:
local_name = local_name.split("@")[0]
self.add_symbol(local_name, defn.info, defn)
if self.is_nested_within_func_scope():
# We need to preserve local classes, let's store them
# in globals under mangled unique names
#
# TODO: Putting local classes into globals breaks assumptions in fine-grained
# incremental mode and we should avoid it. In general, this logic is too
# ad-hoc and needs to be removed/refactored.
if "@" not in defn.info._fullname:
global_name = defn.info.name + "@" + str(defn.line)
defn.info._fullname = self.cur_mod_id + "." + global_name
else:
# Preserve name from previous fine-grained incremental run.
global_name = defn.info.name
defn.fullname = defn.info._fullname
if defn.info.is_named_tuple or defn.info.typeddict_type:
# Named tuples and Typed dicts nested within a class are stored
# in the class symbol table.
self.add_symbol_skip_local(global_name, defn.info)
else:
self.globals[global_name] = SymbolTableNode(GDEF, defn.info)
def make_empty_type_info(self, defn: ClassDef) -> TypeInfo:
if (
self.is_module_scope()
and self.cur_mod_id == "builtins"
and defn.name in CORE_BUILTIN_CLASSES
):
# Special case core built-in classes. A TypeInfo was already
# created for it before semantic analysis, but with a dummy
# ClassDef. Patch the real ClassDef object.
info = self.globals[defn.name].node
assert isinstance(info, TypeInfo)
else:
info = TypeInfo(SymbolTable(), defn, self.cur_mod_id)
info.set_line(defn)
return info
def get_name_repr_of_expr(self, expr: Expression) -> str | None:
"""Try finding a short simplified textual representation of a base class expression."""
if isinstance(expr, NameExpr):
return expr.name
if isinstance(expr, MemberExpr):
return get_member_expr_fullname(expr)
if isinstance(expr, IndexExpr):
return self.get_name_repr_of_expr(expr.base)
if isinstance(expr, CallExpr):
return self.get_name_repr_of_expr(expr.callee)
return None
def analyze_base_classes(
self, base_type_exprs: list[Expression]
) -> tuple[list[tuple[ProperType, Expression]], bool] | None:
"""Analyze base class types.
Return None if some definition was incomplete. Otherwise, return a tuple
with these items:
* List of (analyzed type, original expression) tuples
* Boolean indicating whether one of the bases had a semantic analysis error
"""
is_error = False
bases = []
for base_expr in base_type_exprs:
if (
isinstance(base_expr, RefExpr)
and base_expr.fullname in TYPED_NAMEDTUPLE_NAMES + TPDICT_NAMES
) or (
isinstance(base_expr, CallExpr)
and isinstance(base_expr.callee, RefExpr)
and base_expr.callee.fullname in TPDICT_NAMES
):
# Ignore magic bases for now.
# For example:
# class Foo(TypedDict): ... # RefExpr
# class Foo(NamedTuple): ... # RefExpr
# class Foo(TypedDict("Foo", {"a": int})): ... # CallExpr
continue
try:
base = self.expr_to_analyzed_type(
base_expr, allow_placeholder=True, allow_type_any=True
)
except TypeTranslationError:
name = self.get_name_repr_of_expr(base_expr)
if isinstance(base_expr, CallExpr):
msg = "Unsupported dynamic base class"
else:
msg = "Invalid base class"
if name:
msg += f' "{name}"'
self.fail(msg, base_expr)
is_error = True
continue
if base is None:
return None
base = get_proper_type(base)
bases.append((base, base_expr))
return bases, is_error
def configure_base_classes(
self, defn: ClassDef, bases: list[tuple[ProperType, Expression]]
) -> None:
"""Set up base classes.
This computes several attributes on the corresponding TypeInfo defn.info
related to the base classes: defn.info.bases, defn.info.mro, and
miscellaneous others (at least tuple_type, fallback_to_any, and is_enum.)
"""
base_types: list[Instance] = []
info = defn.info
for base, base_expr in bases:
if isinstance(base, TupleType):
actual_base = self.configure_tuple_base_class(defn, base)
base_types.append(actual_base)
elif isinstance(base, Instance):
if base.type.is_newtype:
self.fail('Cannot subclass "NewType"', defn)
base_types.append(base)
elif isinstance(base, AnyType):
if self.options.disallow_subclassing_any:
if isinstance(base_expr, (NameExpr, MemberExpr)):
msg = f'Class cannot subclass "{base_expr.name}" (has type "Any")'
else:
msg = 'Class cannot subclass value of type "Any"'
self.fail(msg, base_expr)
info.fallback_to_any = True
elif isinstance(base, TypedDictType):
base_types.append(base.fallback)
else:
msg = "Invalid base class"
name = self.get_name_repr_of_expr(base_expr)
if name:
msg += f' "{name}"'
self.fail(msg, base_expr)
info.fallback_to_any = True
if self.options.disallow_any_unimported and has_any_from_unimported_type(base):
if isinstance(base_expr, (NameExpr, MemberExpr)):
prefix = f"Base type {base_expr.name}"
else:
prefix = "Base type"
self.msg.unimported_type_becomes_any(prefix, base, base_expr)
check_for_explicit_any(
base, self.options, self.is_typeshed_stub_file, self.msg, context=base_expr
)
# Add 'object' as implicit base if there is no other base class.
if not base_types and defn.fullname != "builtins.object":
base_types.append(self.object_type())
info.bases = base_types
# Calculate the MRO.
if not self.verify_base_classes(defn):
self.set_dummy_mro(defn.info)
return
if not self.verify_duplicate_base_classes(defn):
# We don't want to block the typechecking process,
# so, we just insert `Any` as the base class and show an error.
self.set_any_mro(defn.info)
self.calculate_class_mro(defn, self.object_type)
def configure_tuple_base_class(self, defn: ClassDef, base: TupleType) -> Instance:
info = defn.info
# There may be an existing valid tuple type from previous semanal iterations.
# Use equality to check if it is the case.
if info.tuple_type and info.tuple_type != base and not has_placeholder(info.tuple_type):
self.fail("Class has two incompatible bases derived from tuple", defn)
defn.has_incompatible_baseclass = True
if info.special_alias and has_placeholder(info.special_alias.target):
self.process_placeholder(
None, "tuple base", defn, force_progress=base != info.tuple_type
)
info.update_tuple_type(base)
self.setup_alias_type_vars(defn)
if base.partial_fallback.type.fullname == "builtins.tuple" and not has_placeholder(base):
# Fallback can only be safely calculated after semantic analysis, since base
# classes may be incomplete. Postpone the calculation.
self.schedule_patch(PRIORITY_FALLBACKS, lambda: calculate_tuple_fallback(base))
return base.partial_fallback
def set_dummy_mro(self, info: TypeInfo) -> None:
# Give it an MRO consisting of just the class itself and object.
info.mro = [info, self.object_type().type]
info.bad_mro = True
def set_any_mro(self, info: TypeInfo) -> None:
# Give it an MRO consisting direct `Any` subclass.
info.fallback_to_any = True
info.mro = [info, self.object_type().type]
def calculate_class_mro(
self, defn: ClassDef, obj_type: Callable[[], Instance] | None = None
) -> None:
"""Calculate method resolution order for a class.
`obj_type` exists just to fill in empty base class list in case of an error.
"""
try:
calculate_mro(defn.info, obj_type)
except MroError:
self.fail(
"Cannot determine consistent method resolution "
'order (MRO) for "%s"' % defn.name,
defn,
)
self.set_dummy_mro(defn.info)
# Allow plugins to alter the MRO to handle the fact that `def mro()`
# on metaclasses permits MRO rewriting.
if defn.fullname:
hook = self.plugin.get_customize_class_mro_hook(defn.fullname)
if hook:
hook(ClassDefContext(defn, FakeExpression(), self))
def infer_metaclass_and_bases_from_compat_helpers(self, defn: ClassDef) -> None:
"""Lookup for special metaclass declarations, and update defn fields accordingly.
* six.with_metaclass(M, B1, B2, ...)
* @six.add_metaclass(M)
* future.utils.with_metaclass(M, B1, B2, ...)
* past.utils.with_metaclass(M, B1, B2, ...)
"""
# Look for six.with_metaclass(M, B1, B2, ...)
with_meta_expr: Expression | None = None
if len(defn.base_type_exprs) == 1:
base_expr = defn.base_type_exprs[0]
if isinstance(base_expr, CallExpr) and isinstance(base_expr.callee, RefExpr):
self.analyze_type_expr(base_expr)
if (
base_expr.callee.fullname
in {
"six.with_metaclass",
"future.utils.with_metaclass",
"past.utils.with_metaclass",
}
and len(base_expr.args) >= 1
and all(kind == ARG_POS for kind in base_expr.arg_kinds)
):
with_meta_expr = base_expr.args[0]
defn.base_type_exprs = base_expr.args[1:]
# Look for @six.add_metaclass(M)
add_meta_expr: Expression | None = None
for dec_expr in defn.decorators:
if isinstance(dec_expr, CallExpr) and isinstance(dec_expr.callee, RefExpr):
dec_expr.callee.accept(self)
if (
dec_expr.callee.fullname == "six.add_metaclass"
and len(dec_expr.args) == 1
and dec_expr.arg_kinds[0] == ARG_POS
):
add_meta_expr = dec_expr.args[0]
break
metas = {defn.metaclass, with_meta_expr, add_meta_expr} - {None}
if len(metas) == 0:
return
if len(metas) > 1:
self.fail("Multiple metaclass definitions", defn)
return
defn.metaclass = metas.pop()
def verify_base_classes(self, defn: ClassDef) -> bool:
info = defn.info
cycle = False
for base in info.bases:
baseinfo = base.type
if self.is_base_class(info, baseinfo):
self.fail("Cycle in inheritance hierarchy", defn)
cycle = True
return not cycle
def verify_duplicate_base_classes(self, defn: ClassDef) -> bool:
dup = find_duplicate(defn.info.direct_base_classes())
if dup:
self.fail(f'Duplicate base class "{dup.name}"', defn)
return not dup
def is_base_class(self, t: TypeInfo, s: TypeInfo) -> bool:
"""Determine if t is a base class of s (but do not use mro)."""
# Search the base class graph for t, starting from s.
worklist = [s]
visited = {s}
while worklist:
nxt = worklist.pop()
if nxt == t:
return True
for base in nxt.bases:
if base.type not in visited:
worklist.append(base.type)
visited.add(base.type)
return False
def get_declared_metaclass(
self, name: str, metaclass_expr: Expression | None
) -> tuple[Instance | None, bool, bool]:
"""Get declared metaclass from metaclass expression.
Returns a tuple of three values:
* A metaclass instance or None
* A boolean indicating whether we should defer
* A boolean indicating whether we should set metaclass Any fallback
(either for Any metaclass or invalid/dynamic metaclass).
The two boolean flags can only be True if instance is None.
"""
declared_metaclass = None
if metaclass_expr:
metaclass_name = None
if isinstance(metaclass_expr, NameExpr):
metaclass_name = metaclass_expr.name
elif isinstance(metaclass_expr, MemberExpr):
metaclass_name = get_member_expr_fullname(metaclass_expr)
if metaclass_name is None:
self.fail(f'Dynamic metaclass not supported for "{name}"', metaclass_expr)
return None, False, True
sym = self.lookup_qualified(metaclass_name, metaclass_expr)
if sym is None:
# Probably a name error - it is already handled elsewhere
return None, False, True
if isinstance(sym.node, Var) and isinstance(get_proper_type(sym.node.type), AnyType):
if self.options.disallow_subclassing_any:
self.fail(
f'Class cannot use "{sym.node.name}" as a metaclass (has type "Any")',
metaclass_expr,
)
return None, False, True
if isinstance(sym.node, PlaceholderNode):
return None, True, False # defer later in the caller
# Support type aliases, like `_Meta: TypeAlias = type`
if (
isinstance(sym.node, TypeAlias)
and sym.node.no_args
and isinstance(sym.node.target, ProperType)
and isinstance(sym.node.target, Instance)
):
metaclass_info: Node | None = sym.node.target.type
else:
metaclass_info = sym.node
if not isinstance(metaclass_info, TypeInfo) or metaclass_info.tuple_type is not None:
self.fail(f'Invalid metaclass "{metaclass_name}"', metaclass_expr)
return None, False, False
if not metaclass_info.is_metaclass():
self.fail(
'Metaclasses not inheriting from "type" are not supported', metaclass_expr
)
return None, False, False
inst = fill_typevars(metaclass_info)
assert isinstance(inst, Instance)
declared_metaclass = inst
return declared_metaclass, False, False
def recalculate_metaclass(self, defn: ClassDef, declared_metaclass: Instance | None) -> None:
defn.info.declared_metaclass = declared_metaclass
defn.info.metaclass_type = defn.info.calculate_metaclass_type()
if any(info.is_protocol for info in defn.info.mro):
if (
not defn.info.metaclass_type
or defn.info.metaclass_type.type.fullname == "builtins.type"
):
# All protocols and their subclasses have ABCMeta metaclass by default.
# TODO: add a metaclass conflict check if there is another metaclass.
abc_meta = self.named_type_or_none("abc.ABCMeta", [])
if abc_meta is not None: # May be None in tests with incomplete lib-stub.
defn.info.metaclass_type = abc_meta
if defn.info.metaclass_type and defn.info.metaclass_type.type.has_base("enum.EnumMeta"):
defn.info.is_enum = True
if defn.type_vars:
self.fail("Enum class cannot be generic", defn)
#
# Imports
#
def visit_import(self, i: Import) -> None:
self.statement = i
for id, as_id in i.ids:
# Modules imported in a stub file without using 'import X as X' won't get exported
# When implicit re-exporting is disabled, we have the same behavior as stubs.
use_implicit_reexport = not self.is_stub_file and self.options.implicit_reexport
if as_id is not None:
base_id = id
imported_id = as_id
module_public = use_implicit_reexport or id == as_id
else:
base_id = id.split(".")[0]
imported_id = base_id
module_public = use_implicit_reexport
if base_id in self.modules:
node = self.modules[base_id]
if self.is_func_scope():
kind = LDEF
elif self.type is not None:
kind = MDEF
else:
kind = GDEF
symbol = SymbolTableNode(
kind, node, module_public=module_public, module_hidden=not module_public
)
self.add_imported_symbol(
imported_id,
symbol,
context=i,
module_public=module_public,
module_hidden=not module_public,
)
else:
self.add_unknown_imported_symbol(
imported_id,
context=i,
target_name=base_id,
module_public=module_public,
module_hidden=not module_public,
)
def visit_import_from(self, imp: ImportFrom) -> None:
self.statement = imp
module_id = self.correct_relative_import(imp)
module = self.modules.get(module_id)
for id, as_id in imp.names:
fullname = module_id + "." + id
self.set_future_import_flags(fullname)
if module is None:
node = None
elif module_id == self.cur_mod_id and fullname in self.modules:
# Submodule takes precedence over definition in surround package, for
# compatibility with runtime semantics in typical use cases. This
# could more precisely model runtime semantics by taking into account
# the line number beyond which the local definition should take
# precedence, but doesn't seem to be important in most use cases.
node = SymbolTableNode(GDEF, self.modules[fullname])
else:
if id == as_id == "__all__" and module_id in self.export_map:
self.all_exports[:] = self.export_map[module_id]
node = module.names.get(id)
missing_submodule = False
imported_id = as_id or id
# Modules imported in a stub file without using 'from Y import X as X' will
# not get exported.
# When implicit re-exporting is disabled, we have the same behavior as stubs.
use_implicit_reexport = not self.is_stub_file and self.options.implicit_reexport
module_public = use_implicit_reexport or (as_id is not None and id == as_id)
# If the module does not contain a symbol with the name 'id',
# try checking if it's a module instead.
if not node:
mod = self.modules.get(fullname)
if mod is not None:
kind = self.current_symbol_kind()
node = SymbolTableNode(kind, mod)
elif fullname in self.missing_modules:
missing_submodule = True
# If it is still not resolved, check for a module level __getattr__
if module and not node and "__getattr__" in module.names:
# We store the fullname of the original definition so that we can
# detect whether two imported names refer to the same thing.
fullname = module_id + "." + id
gvar = self.create_getattr_var(module.names["__getattr__"], imported_id, fullname)
if gvar:
self.add_symbol(
imported_id,
gvar,
imp,
module_public=module_public,
module_hidden=not module_public,
)
continue
if node:
self.process_imported_symbol(
node, module_id, id, imported_id, fullname, module_public, context=imp
)
if node.module_hidden:
self.report_missing_module_attribute(
module_id,
id,
imported_id,
module_public=module_public,
module_hidden=not module_public,
context=imp,
add_unknown_imported_symbol=False,
)
elif module and not missing_submodule:
# Target module exists but the imported name is missing or hidden.
self.report_missing_module_attribute(
module_id,
id,
imported_id,
module_public=module_public,
module_hidden=not module_public,
context=imp,
)
else:
# Import of a missing (sub)module.
self.add_unknown_imported_symbol(
imported_id,
imp,
target_name=fullname,
module_public=module_public,
module_hidden=not module_public,
)
def process_imported_symbol(
self,
node: SymbolTableNode,
module_id: str,
id: str,
imported_id: str,
fullname: str,
module_public: bool,
context: ImportBase,
) -> None:
module_hidden = not module_public and (
# `from package import submodule` should work regardless of whether package
# re-exports submodule, so we shouldn't hide it
not isinstance(node.node, MypyFile)
or fullname not in self.modules
# but given `from somewhere import random_unrelated_module` we should hide
# random_unrelated_module
or not fullname.startswith(self.cur_mod_id + ".")
)
if isinstance(node.node, PlaceholderNode):
if self.final_iteration:
self.report_missing_module_attribute(
module_id,
id,
imported_id,
module_public=module_public,
module_hidden=module_hidden,
context=context,
)
return
else:
# This might become a type.
self.mark_incomplete(
imported_id,
node.node,
module_public=module_public,
module_hidden=module_hidden,
becomes_typeinfo=True,
)
# NOTE: we take the original node even for final `Var`s. This is to support
# a common pattern when constants are re-exported (same applies to import *).
self.add_imported_symbol(
imported_id, node, context, module_public=module_public, module_hidden=module_hidden
)
def report_missing_module_attribute(
self,
import_id: str,
source_id: str,
imported_id: str,
module_public: bool,
module_hidden: bool,
context: Node,
add_unknown_imported_symbol: bool = True,
) -> None:
# Missing attribute.
if self.is_incomplete_namespace(import_id):
# We don't know whether the name will be there, since the namespace
# is incomplete. Defer the current target.
self.mark_incomplete(
imported_id, context, module_public=module_public, module_hidden=module_hidden
)
return
message = f'Module "{import_id}" has no attribute "{source_id}"'
# Suggest alternatives, if any match is found.
module = self.modules.get(import_id)
if module:
if source_id in module.names.keys() and not module.names[source_id].module_public:
message = (
f'Module "{import_id}" does not explicitly export attribute "{source_id}"'
)
else:
alternatives = set(module.names.keys()).difference({source_id})
matches = best_matches(source_id, alternatives, n=3)
if matches:
suggestion = f"; maybe {pretty_seq(matches, 'or')}?"
message += f"{suggestion}"
self.fail(message, context, code=codes.ATTR_DEFINED)
if add_unknown_imported_symbol:
self.add_unknown_imported_symbol(
imported_id,
context,
target_name=None,
module_public=module_public,
module_hidden=not module_public,
)
if import_id == "typing":
# The user probably has a missing definition in a test fixture. Let's verify.
fullname = f"builtins.{source_id.lower()}"
if (
self.lookup_fully_qualified_or_none(fullname) is None
and fullname in SUGGESTED_TEST_FIXTURES
):
# Yes. Generate a helpful note.
self.msg.add_fixture_note(fullname, context)
else:
typing_extensions = self.modules.get("typing_extensions")
if typing_extensions and source_id in typing_extensions.names:
self.msg.note(
f"Use `from typing_extensions import {source_id}` instead",
context,
code=codes.ATTR_DEFINED,
)
self.msg.note(
"See https://mypy.readthedocs.io/en/stable/runtime_troubles.html#using-new-additions-to-the-typing-module",
context,
code=codes.ATTR_DEFINED,
)
def process_import_over_existing_name(
self,
imported_id: str,
existing_symbol: SymbolTableNode,
module_symbol: SymbolTableNode,
import_node: ImportBase,
) -> bool:
if existing_symbol.node is module_symbol.node:
# We added this symbol on previous iteration.
return False
if existing_symbol.kind in (LDEF, GDEF, MDEF) and isinstance(
existing_symbol.node, (Var, FuncDef, TypeInfo, Decorator, TypeAlias)
):
# This is a valid import over an existing definition in the file. Construct a dummy
# assignment that we'll use to type check the import.
lvalue = NameExpr(imported_id)
lvalue.kind = existing_symbol.kind
lvalue.node = existing_symbol.node
rvalue = NameExpr(imported_id)
rvalue.kind = module_symbol.kind
rvalue.node = module_symbol.node
if isinstance(rvalue.node, TypeAlias):
# Suppress bogus errors from the dummy assignment if rvalue is an alias.
# Otherwise mypy may complain that alias is invalid in runtime context.
rvalue.is_alias_rvalue = True
assignment = AssignmentStmt([lvalue], rvalue)
for node in assignment, lvalue, rvalue:
node.set_line(import_node)
import_node.assignments.append(assignment)
return True
return False
def correct_relative_import(self, node: ImportFrom | ImportAll) -> str:
import_id, ok = correct_relative_import(
self.cur_mod_id, node.relative, node.id, self.cur_mod_node.is_package_init_file()
)
if not ok:
self.fail("Relative import climbs too many namespaces", node)
return import_id
def visit_import_all(self, i: ImportAll) -> None:
i_id = self.correct_relative_import(i)
if i_id in self.modules:
m = self.modules[i_id]
if self.is_incomplete_namespace(i_id):
# Any names could be missing from the current namespace if the target module
# namespace is incomplete.
self.mark_incomplete("*", i)
for name, node in m.names.items():
fullname = i_id + "." + name
self.set_future_import_flags(fullname)
if node is None:
continue
# if '__all__' exists, all nodes not included have had module_public set to
# False, and we can skip checking '_' because it's been explicitly included.
if node.module_public and (not name.startswith("_") or "__all__" in m.names):
if isinstance(node.node, MypyFile):
# Star import of submodule from a package, add it as a dependency.
self.imports.add(node.node.fullname)
# `from x import *` always reexports symbols
self.add_imported_symbol(
name, node, context=i, module_public=True, module_hidden=False
)
else:
# Don't add any dummy symbols for 'from x import *' if 'x' is unknown.
pass
#
# Assignment
#
def visit_assignment_expr(self, s: AssignmentExpr) -> None:
s.value.accept(self)
if self.is_func_scope():
if not self.check_valid_comprehension(s):
return
self.analyze_lvalue(s.target, escape_comprehensions=True, has_explicit_value=True)
def check_valid_comprehension(self, s: AssignmentExpr) -> bool:
"""Check that assignment expression is not nested within comprehension at class scope.
class C:
[(j := i) for i in [1, 2, 3]]
is a syntax error that is not enforced by Python parser, but at later steps.
"""
for i, scope_type in enumerate(reversed(self.scope_stack)):
if scope_type != SCOPE_COMPREHENSION and i < len(self.locals) - 1:
if self.locals[-1 - i] is None:
self.fail(
"Assignment expression within a comprehension"
" cannot be used in a class body",
s,
code=codes.SYNTAX,
serious=True,
blocker=True,
)
return False
break
return True
def visit_assignment_stmt(self, s: AssignmentStmt) -> None:
self.statement = s
# Special case assignment like X = X.
if self.analyze_identity_global_assignment(s):
return
tag = self.track_incomplete_refs()
# Here we have a chicken and egg problem: at this stage we can't call
# can_be_type_alias(), because we have not enough information about rvalue.
# But we can't use a full visit because it may emit extra incomplete refs (namely
# when analysing any type applications there) thus preventing the further analysis.
# To break the tie, we first analyse rvalue partially, if it can be a type alias.
if self.can_possibly_be_type_form(s):
old_basic_type_applications = self.basic_type_applications
self.basic_type_applications = True
with self.allow_unbound_tvars_set():
s.rvalue.accept(self)
self.basic_type_applications = old_basic_type_applications
elif self.can_possibly_be_typevarlike_declaration(s):
# Allow unbound tvars inside TypeVarLike defaults to be evaluated later
with self.allow_unbound_tvars_set():
s.rvalue.accept(self)
else:
s.rvalue.accept(self)
if self.found_incomplete_ref(tag) or self.should_wait_rhs(s.rvalue):
# Initializer couldn't be fully analyzed. Defer the current node and give up.
# Make sure that if we skip the definition of some local names, they can't be
# added later in this scope, since an earlier definition should take precedence.
for expr in names_modified_by_assignment(s):
self.mark_incomplete(expr.name, expr)
return
if self.can_possibly_be_type_form(s):
# Now re-visit those rvalues that were we skipped type applications above.
# This should be safe as generally semantic analyzer is idempotent.
with self.allow_unbound_tvars_set():
s.rvalue.accept(self)
# The r.h.s. is now ready to be classified, first check if it is a special form:
special_form = False
# * type alias
if self.check_and_set_up_type_alias(s):
s.is_alias_def = True
special_form = True
elif isinstance(s.rvalue, CallExpr):
# * type variable definition
if self.process_typevar_declaration(s):
special_form = True
elif self.process_paramspec_declaration(s):
special_form = True
elif self.process_typevartuple_declaration(s):
special_form = True
# * type constructors
elif self.analyze_namedtuple_assign(s):
special_form = True
elif self.analyze_typeddict_assign(s):
special_form = True
elif self.newtype_analyzer.process_newtype_declaration(s):
special_form = True
elif self.analyze_enum_assign(s):
special_form = True
if special_form:
self.record_special_form_lvalue(s)
return
# Clear the alias flag if assignment turns out not a special form after all. It
# may be set to True while there were still placeholders due to forward refs.
s.is_alias_def = False
# OK, this is a regular assignment, perform the necessary analysis steps.
s.is_final_def = self.unwrap_final(s)
self.analyze_lvalues(s)
self.check_final_implicit_def(s)
self.store_final_status(s)
self.check_classvar(s)
self.process_type_annotation(s)
self.apply_dynamic_class_hook(s)
if not s.type:
self.process_module_assignment(s.lvalues, s.rvalue, s)
self.process__all__(s)
self.process__deletable__(s)
self.process__slots__(s)
def analyze_identity_global_assignment(self, s: AssignmentStmt) -> bool:
"""Special case 'X = X' in global scope.
This allows supporting some important use cases.
Return true if special casing was applied.
"""
if not isinstance(s.rvalue, NameExpr) or len(s.lvalues) != 1:
# Not of form 'X = X'
return False
lvalue = s.lvalues[0]
if not isinstance(lvalue, NameExpr) or s.rvalue.name != lvalue.name:
# Not of form 'X = X'
return False
if self.type is not None or self.is_func_scope():
# Not in global scope
return False
# It's an assignment like 'X = X' in the global scope.
name = lvalue.name
sym = self.lookup(name, s)
if sym is None:
if self.final_iteration:
# Fall back to normal assignment analysis.
return False
else:
self.defer()
return True
else:
if sym.node is None:
# Something special -- fall back to normal assignment analysis.
return False
if name not in self.globals:
# The name is from builtins. Add an alias to the current module.
self.add_symbol(name, sym.node, s)
if not isinstance(sym.node, PlaceholderNode):
for node in s.rvalue, lvalue:
node.node = sym.node
node.kind = GDEF
node.fullname = sym.node.fullname
return True
def should_wait_rhs(self, rv: Expression) -> bool:
"""Can we already classify this r.h.s. of an assignment or should we wait?
This returns True if we don't have enough information to decide whether
an assignment is just a normal variable definition or a special form.
Always return False if this is a final iteration. This will typically cause
the lvalue to be classified as a variable plus emit an error.
"""
if self.final_iteration:
# No chance, nothing has changed.
return False
if isinstance(rv, NameExpr):
n = self.lookup(rv.name, rv)
if n and isinstance(n.node, PlaceholderNode) and not n.node.becomes_typeinfo:
return True
elif isinstance(rv, MemberExpr):
fname = get_member_expr_fullname(rv)
if fname:
n = self.lookup_qualified(fname, rv, suppress_errors=True)
if n and isinstance(n.node, PlaceholderNode) and not n.node.becomes_typeinfo:
return True
elif isinstance(rv, IndexExpr) and isinstance(rv.base, RefExpr):
return self.should_wait_rhs(rv.base)
elif isinstance(rv, CallExpr) and isinstance(rv.callee, RefExpr):
# This is only relevant for builtin SCC where things like 'TypeVar'
# may be not ready.
return self.should_wait_rhs(rv.callee)
return False
def can_be_type_alias(self, rv: Expression, allow_none: bool = False) -> bool:
"""Is this a valid r.h.s. for an alias definition?
Note: this function should be only called for expressions where self.should_wait_rhs()
returns False.
"""
if isinstance(rv, RefExpr) and self.is_type_ref(rv, bare=True):
return True
if isinstance(rv, IndexExpr) and self.is_type_ref(rv.base, bare=False):
return True
if self.is_none_alias(rv):
return True
if allow_none and isinstance(rv, NameExpr) and rv.fullname == "builtins.None":
return True
if isinstance(rv, OpExpr) and rv.op == "|":
if self.is_stub_file:
return True
if self.can_be_type_alias(rv.left, allow_none=True) and self.can_be_type_alias(
rv.right, allow_none=True
):
return True
return False
def can_possibly_be_type_form(self, s: AssignmentStmt) -> bool:
"""Like can_be_type_alias(), but simpler and doesn't require fully analyzed rvalue.
Instead, use lvalues/annotations structure to figure out whether this can potentially be
a type alias definition, NamedTuple, or TypedDict. Another difference from above function
is that we are only interested IndexExpr, CallExpr and OpExpr rvalues, since only those
can be potentially recursive (things like `A = A` are never valid).
"""
if len(s.lvalues) > 1:
return False
if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.callee, RefExpr):
ref = s.rvalue.callee.fullname
return ref in TPDICT_NAMES or ref in TYPED_NAMEDTUPLE_NAMES
if not isinstance(s.lvalues[0], NameExpr):
return False
if s.unanalyzed_type is not None and not self.is_pep_613(s):
return False
if not isinstance(s.rvalue, (IndexExpr, OpExpr)):
return False
# Something that looks like Foo = Bar[Baz, ...]
return True
def can_possibly_be_typevarlike_declaration(self, s: AssignmentStmt) -> bool:
"""Check if r.h.s. can be a TypeVarLike declaration."""
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], NameExpr):
return False
if not isinstance(s.rvalue, CallExpr) or not isinstance(s.rvalue.callee, NameExpr):
return False
ref = s.rvalue.callee
ref.accept(self)
return ref.fullname in TYPE_VAR_LIKE_NAMES
def is_type_ref(self, rv: Expression, bare: bool = False) -> bool:
"""Does this expression refer to a type?
This includes:
* Special forms, like Any or Union
* Classes (except subscripted enums)
* Other type aliases
* PlaceholderNodes with becomes_typeinfo=True (these can be not ready class
definitions, and not ready aliases).
If bare is True, this is not a base of an index expression, so some special
forms are not valid (like a bare Union).
Note: This method should be only used in context of a type alias definition.
This method can only return True for RefExprs, to check if C[int] is a valid
target for type alias call this method on expr.base (i.e. on C in C[int]).
See also can_be_type_alias().
"""
if not isinstance(rv, RefExpr):
return False
if isinstance(rv.node, TypeVarLikeExpr):
self.fail(f'Type variable "{rv.fullname}" is invalid as target for type alias', rv)
return False
if bare:
# These three are valid even if bare, for example
# A = Tuple is just equivalent to A = Tuple[Any, ...].
valid_refs = {"typing.Any", "typing.Tuple", "typing.Callable"}
else:
valid_refs = type_constructors
if isinstance(rv.node, TypeAlias) or rv.fullname in valid_refs:
return True
if isinstance(rv.node, TypeInfo):
if bare:
return True
# Assignment color = Color['RED'] defines a variable, not an alias.
return not rv.node.is_enum
if isinstance(rv.node, Var):
return rv.node.fullname in NEVER_NAMES
if isinstance(rv, NameExpr):
n = self.lookup(rv.name, rv)
if n and isinstance(n.node, PlaceholderNode) and n.node.becomes_typeinfo:
return True
elif isinstance(rv, MemberExpr):
fname = get_member_expr_fullname(rv)
if fname:
# The r.h.s. for variable definitions may not be a type reference but just
# an instance attribute, so suppress the errors.
n = self.lookup_qualified(fname, rv, suppress_errors=True)
if n and isinstance(n.node, PlaceholderNode) and n.node.becomes_typeinfo:
return True
return False
def is_none_alias(self, node: Expression) -> bool:
"""Is this a r.h.s. for a None alias?
We special case the assignments like Void = type(None), to allow using
Void in type annotations.
"""
if isinstance(node, CallExpr):
if (
isinstance(node.callee, NameExpr)
and len(node.args) == 1
and isinstance(node.args[0], NameExpr)
):
call = self.lookup_qualified(node.callee.name, node.callee)
arg = self.lookup_qualified(node.args[0].name, node.args[0])
if (
call is not None
and call.node
and call.node.fullname == "builtins.type"
and arg is not None
and arg.node
and arg.node.fullname == "builtins.None"
):
return True
return False
def record_special_form_lvalue(self, s: AssignmentStmt) -> None:
"""Record minimal necessary information about l.h.s. of a special form.
This exists mostly for compatibility with the old semantic analyzer.
"""
lvalue = s.lvalues[0]
assert isinstance(lvalue, NameExpr)
lvalue.is_special_form = True
if self.current_symbol_kind() == GDEF:
lvalue.fullname = self.qualified_name(lvalue.name)
lvalue.kind = self.current_symbol_kind()
def analyze_enum_assign(self, s: AssignmentStmt) -> bool:
"""Check if s defines an Enum."""
if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.analyzed, EnumCallExpr):
# Already analyzed enum -- nothing to do here.
return True
return self.enum_call_analyzer.process_enum_call(s, self.is_func_scope())
def analyze_namedtuple_assign(self, s: AssignmentStmt) -> bool:
"""Check if s defines a namedtuple."""
if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.analyzed, NamedTupleExpr):
if s.rvalue.analyzed.info.tuple_type and not has_placeholder(
s.rvalue.analyzed.info.tuple_type
):
return True # This is a valid and analyzed named tuple definition, nothing to do here.
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], (NameExpr, MemberExpr)):
return False
lvalue = s.lvalues[0]
if isinstance(lvalue, MemberExpr):
if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.callee, RefExpr):
fullname = s.rvalue.callee.fullname
if fullname == "collections.namedtuple" or fullname in TYPED_NAMEDTUPLE_NAMES:
self.fail("NamedTuple type as an attribute is not supported", lvalue)
return False
name = lvalue.name
namespace = self.qualified_name(name)
with self.tvar_scope_frame(self.tvar_scope.class_frame(namespace)):
internal_name, info, tvar_defs = self.named_tuple_analyzer.check_namedtuple(
s.rvalue, name, self.is_func_scope()
)
if internal_name is None:
return False
if internal_name != name:
self.fail(
'First argument to namedtuple() should be "{}", not "{}"'.format(
name, internal_name
),
s.rvalue,
code=codes.NAME_MATCH,
)
return True
# Yes, it's a valid namedtuple, but defer if it is not ready.
if not info:
self.mark_incomplete(name, lvalue, becomes_typeinfo=True)
else:
self.setup_type_vars(info.defn, tvar_defs)
self.setup_alias_type_vars(info.defn)
return True
def analyze_typeddict_assign(self, s: AssignmentStmt) -> bool:
"""Check if s defines a typed dict."""
if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.analyzed, TypedDictExpr):
if s.rvalue.analyzed.info.typeddict_type and not has_placeholder(
s.rvalue.analyzed.info.typeddict_type
):
# This is a valid and analyzed typed dict definition, nothing to do here.
return True
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], (NameExpr, MemberExpr)):
return False
lvalue = s.lvalues[0]
name = lvalue.name
namespace = self.qualified_name(name)
with self.tvar_scope_frame(self.tvar_scope.class_frame(namespace)):
is_typed_dict, info, tvar_defs = self.typed_dict_analyzer.check_typeddict(
s.rvalue, name, self.is_func_scope()
)
if not is_typed_dict:
return False
if isinstance(lvalue, MemberExpr):
self.fail("TypedDict type as attribute is not supported", lvalue)
return False
# Yes, it's a valid typed dict, but defer if it is not ready.
if not info:
self.mark_incomplete(name, lvalue, becomes_typeinfo=True)
else:
defn = info.defn
self.setup_type_vars(defn, tvar_defs)
self.setup_alias_type_vars(defn)
return True
def analyze_lvalues(self, s: AssignmentStmt) -> None:
# We cannot use s.type, because analyze_simple_literal_type() will set it.
explicit = s.unanalyzed_type is not None
if self.is_final_type(s.unanalyzed_type):
# We need to exclude bare Final.
assert isinstance(s.unanalyzed_type, UnboundType)
if not s.unanalyzed_type.args:
explicit = False
if s.rvalue:
if isinstance(s.rvalue, TempNode):
has_explicit_value = not s.rvalue.no_rhs
else:
has_explicit_value = True
else:
has_explicit_value = False
for lval in s.lvalues:
self.analyze_lvalue(
lval,
explicit_type=explicit,
is_final=s.is_final_def,
has_explicit_value=has_explicit_value,
)
def apply_dynamic_class_hook(self, s: AssignmentStmt) -> None:
if not isinstance(s.rvalue, CallExpr):
return
fname = ""
call = s.rvalue
while True:
if isinstance(call.callee, RefExpr):
fname = call.callee.fullname
# check if method call
if not fname and isinstance(call.callee, MemberExpr):
callee_expr = call.callee.expr
if isinstance(callee_expr, RefExpr) and callee_expr.fullname:
method_name = call.callee.name
fname = callee_expr.fullname + "." + method_name
elif (
isinstance(callee_expr, IndexExpr)
and isinstance(callee_expr.base, RefExpr)
and isinstance(callee_expr.analyzed, TypeApplication)
):
method_name = call.callee.name
fname = callee_expr.base.fullname + "." + method_name
elif isinstance(callee_expr, CallExpr):
# check if chain call
call = callee_expr
continue
break
if not fname:
return
hook = self.plugin.get_dynamic_class_hook(fname)
if not hook:
return
for lval in s.lvalues:
if not isinstance(lval, NameExpr):
continue
hook(DynamicClassDefContext(call, lval.name, self))
def unwrap_final(self, s: AssignmentStmt) -> bool:
"""Strip Final[...] if present in an assignment.
This is done to invoke type inference during type checking phase for this
assignment. Also, Final[...] doesn't affect type in any way -- it is rather an
access qualifier for given `Var`.
Also perform various consistency checks.
Returns True if Final[...] was present.
"""
if not s.unanalyzed_type or not self.is_final_type(s.unanalyzed_type):
return False
assert isinstance(s.unanalyzed_type, UnboundType)
if len(s.unanalyzed_type.args) > 1:
self.fail("Final[...] takes at most one type argument", s.unanalyzed_type)
invalid_bare_final = False
if not s.unanalyzed_type.args:
s.type = None
if isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs:
invalid_bare_final = True
self.fail("Type in Final[...] can only be omitted if there is an initializer", s)
else:
s.type = s.unanalyzed_type.args[0]
if s.type is not None and self.is_classvar(s.type):
self.fail("Variable should not be annotated with both ClassVar and Final", s)
return False
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], RefExpr):
self.fail("Invalid final declaration", s)
return False
lval = s.lvalues[0]
assert isinstance(lval, RefExpr)
# Reset inferred status if it was set due to simple literal rvalue on previous iteration.
# TODO: this is a best-effort quick fix, we should avoid the need to manually sync this,
# see https://github.com/python/mypy/issues/6458.
if lval.is_new_def:
lval.is_inferred_def = s.type is None
if self.loop_depth[-1] > 0:
self.fail("Cannot use Final inside a loop", s)
if self.type and self.type.is_protocol:
if self.is_class_scope():
self.msg.protocol_members_cant_be_final(s)
if (
isinstance(s.rvalue, TempNode)
and s.rvalue.no_rhs
and not self.is_stub_file
and not self.is_class_scope()
):
if not invalid_bare_final: # Skip extra error messages.
self.msg.final_without_value(s)
return True
def check_final_implicit_def(self, s: AssignmentStmt) -> None:
"""Do basic checks for final declaration on self in __init__.
Additional re-definition checks are performed by `analyze_lvalue`.
"""
if not s.is_final_def:
return
lval = s.lvalues[0]
assert isinstance(lval, RefExpr)
if isinstance(lval, MemberExpr):
if not self.is_self_member_ref(lval):
self.fail("Final can be only applied to a name or an attribute on self", s)
s.is_final_def = False
return
else:
assert self.function_stack
if self.function_stack[-1].name != "__init__":
self.fail("Can only declare a final attribute in class body or __init__", s)
s.is_final_def = False
return
def store_final_status(self, s: AssignmentStmt) -> None:
"""If this is a locally valid final declaration, set the corresponding flag on `Var`."""
if s.is_final_def:
if len(s.lvalues) == 1 and isinstance(s.lvalues[0], RefExpr):
node = s.lvalues[0].node
if isinstance(node, Var):
node.is_final = True
if s.type:
node.final_value = constant_fold_expr(s.rvalue, self.cur_mod_id)
if self.is_class_scope() and (
isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs
):
node.final_unset_in_class = True
else:
for lval in self.flatten_lvalues(s.lvalues):
# Special case: we are working with an `Enum`:
#
# class MyEnum(Enum):
# key = 'some value'
#
# Here `key` is implicitly final. In runtime, code like
#
# MyEnum.key = 'modified'
#
# will fail with `AttributeError: Cannot reassign members.`
# That's why we need to replicate this.
if (
isinstance(lval, NameExpr)
and isinstance(self.type, TypeInfo)
and self.type.is_enum
):
cur_node = self.type.names.get(lval.name, None)
if (
cur_node
and isinstance(cur_node.node, Var)
and not (isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs)
):
# Double underscored members are writable on an `Enum`.
# (Except read-only `__members__` but that is handled in type checker)
cur_node.node.is_final = s.is_final_def = not is_dunder(cur_node.node.name)
# Special case: deferred initialization of a final attribute in __init__.
# In this case we just pretend this is a valid final definition to suppress
# errors about assigning to final attribute.
if isinstance(lval, MemberExpr) and self.is_self_member_ref(lval):
assert self.type, "Self member outside a class"
cur_node = self.type.names.get(lval.name, None)
if cur_node and isinstance(cur_node.node, Var) and cur_node.node.is_final:
assert self.function_stack
top_function = self.function_stack[-1]
if (
top_function.name == "__init__"
and cur_node.node.final_unset_in_class
and not cur_node.node.final_set_in_init
and not (isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs)
):
cur_node.node.final_set_in_init = True
s.is_final_def = True
def flatten_lvalues(self, lvalues: list[Expression]) -> list[Expression]:
res: list[Expression] = []
for lv in lvalues:
if isinstance(lv, (TupleExpr, ListExpr)):
res.extend(self.flatten_lvalues(lv.items))
else:
res.append(lv)
return res
def process_type_annotation(self, s: AssignmentStmt) -> None:
"""Analyze type annotation or infer simple literal type."""
if s.type:
lvalue = s.lvalues[-1]
allow_tuple_literal = isinstance(lvalue, TupleExpr)
analyzed = self.anal_type(s.type, allow_tuple_literal=allow_tuple_literal)
# Don't store not ready types (including placeholders).
if analyzed is None or has_placeholder(analyzed):
self.defer(s)
return
s.type = analyzed
if (
self.type
and self.type.is_protocol
and isinstance(lvalue, NameExpr)
and isinstance(s.rvalue, TempNode)
and s.rvalue.no_rhs
):
if isinstance(lvalue.node, Var):
lvalue.node.is_abstract_var = True
else:
if (
self.type
and self.type.is_protocol
and self.is_annotated_protocol_member(s)
and not self.is_func_scope()
):
self.fail("All protocol members must have explicitly declared types", s)
# Set the type if the rvalue is a simple literal (even if the above error occurred).
if len(s.lvalues) == 1 and isinstance(s.lvalues[0], RefExpr):
ref_expr = s.lvalues[0]
safe_literal_inference = True
if self.type and isinstance(ref_expr, NameExpr) and len(self.type.mro) > 1:
# Check if there is a definition in supertype. If yes, we can't safely
# decide here what to infer: int or Literal[42].
safe_literal_inference = self.type.mro[1].get(ref_expr.name) is None
if safe_literal_inference and ref_expr.is_inferred_def:
s.type = self.analyze_simple_literal_type(s.rvalue, s.is_final_def)
if s.type:
# Store type into nodes.
for lvalue in s.lvalues:
self.store_declared_types(lvalue, s.type)
def is_annotated_protocol_member(self, s: AssignmentStmt) -> bool:
"""Check whether a protocol member is annotated.
There are some exceptions that can be left unannotated, like ``__slots__``."""
return any(
(isinstance(lv, NameExpr) and lv.name != "__slots__" and lv.is_inferred_def)
for lv in s.lvalues
)
def analyze_simple_literal_type(self, rvalue: Expression, is_final: bool) -> Type | None:
"""Return builtins.int if rvalue is an int literal, etc.
If this is a 'Final' context, we return "Literal[...]" instead.
"""
if self.function_stack:
# Skip inside a function; this is to avoid confusing
# the code that handles dead code due to isinstance()
# inside type variables with value restrictions (like
# AnyStr).
return None
value = constant_fold_expr(rvalue, self.cur_mod_id)
if value is None or isinstance(value, complex):
return None
if isinstance(value, bool):
type_name = "builtins.bool"
elif isinstance(value, int):
type_name = "builtins.int"
elif isinstance(value, str):
type_name = "builtins.str"
elif isinstance(value, float):
type_name = "builtins.float"
typ = self.named_type_or_none(type_name)
if typ and is_final:
return typ.copy_modified(last_known_value=LiteralType(value=value, fallback=typ))
return typ
def analyze_alias(
self,
name: str,
rvalue: Expression,
allow_placeholder: bool = False,
declared_type_vars: TypeVarLikeList | None = None,
all_declared_type_params_names: list[str] | None = None,
python_3_12_type_alias: bool = False,
) -> tuple[Type | None, list[TypeVarLikeType], set[str], list[str], bool]:
"""Check if 'rvalue' is a valid type allowed for aliasing (e.g. not a type variable).
If yes, return the corresponding type, a list of
qualified type variable names for generic aliases, a set of names the alias depends on,
and a list of type variables if the alias is generic.
A schematic example for the dependencies:
A = int
B = str
analyze_alias(Dict[A, B])[2] == {'__main__.A', '__main__.B'}
"""
dynamic = bool(self.function_stack and self.function_stack[-1].is_dynamic())
global_scope = not self.type and not self.function_stack
try:
typ = expr_to_unanalyzed_type(
rvalue, self.options, self.is_stub_file, lookup_qualified=self.lookup_qualified
)
except TypeTranslationError:
self.fail(
"Invalid type alias: expression is not a valid type", rvalue, code=codes.VALID_TYPE
)
return None, [], set(), [], False
found_type_vars = self.find_type_var_likes(typ)
tvar_defs: list[TypeVarLikeType] = []
namespace = self.qualified_name(name)
alias_type_vars = found_type_vars if declared_type_vars is None else declared_type_vars
last_tvar_name_with_default: str | None = None
with self.tvar_scope_frame(self.tvar_scope.class_frame(namespace)):
for name, tvar_expr in alias_type_vars:
tvar_expr.default = tvar_expr.default.accept(
TypeVarDefaultTranslator(self, tvar_expr.name, typ)
)
tvar_def = self.tvar_scope.bind_new(name, tvar_expr)
if last_tvar_name_with_default is not None and not tvar_def.has_default():
self.msg.tvar_without_default_type(
tvar_def.name, last_tvar_name_with_default, typ
)
tvar_def.default = AnyType(TypeOfAny.from_error)
elif tvar_def.has_default():
last_tvar_name_with_default = tvar_def.name
tvar_defs.append(tvar_def)
if python_3_12_type_alias:
with self.allow_unbound_tvars_set():
rvalue.accept(self)
analyzed, depends_on = analyze_type_alias(
typ,
self,
self.tvar_scope,
self.plugin,
self.options,
self.is_typeshed_stub_file,
allow_placeholder=allow_placeholder,
in_dynamic_func=dynamic,
global_scope=global_scope,
allowed_alias_tvars=tvar_defs,
alias_type_params_names=all_declared_type_params_names,
python_3_12_type_alias=python_3_12_type_alias,
)
# There can be only one variadic variable at most, the error is reported elsewhere.
new_tvar_defs = []
variadic = False
for td in tvar_defs:
if isinstance(td, TypeVarTupleType):
if variadic:
continue
variadic = True
new_tvar_defs.append(td)
qualified_tvars = [node.fullname for _name, node in alias_type_vars]
empty_tuple_index = typ.empty_tuple_index if isinstance(typ, UnboundType) else False
return analyzed, new_tvar_defs, depends_on, qualified_tvars, empty_tuple_index
def is_pep_613(self, s: AssignmentStmt) -> bool:
if s.unanalyzed_type is not None and isinstance(s.unanalyzed_type, UnboundType):
lookup = self.lookup_qualified(s.unanalyzed_type.name, s, suppress_errors=True)
if lookup and lookup.fullname in TYPE_ALIAS_NAMES:
return True
return False
def check_and_set_up_type_alias(self, s: AssignmentStmt) -> bool:
"""Check if assignment creates a type alias and set it up as needed.
Return True if it is a type alias (even if the target is not ready),
or False otherwise.
Note: the resulting types for subscripted (including generic) aliases
are also stored in rvalue.analyzed.
"""
if s.invalid_recursive_alias:
return True
lvalue = s.lvalues[0]
if len(s.lvalues) > 1 or not isinstance(lvalue, NameExpr):
# First rule: Only simple assignments like Alias = ... create aliases.
return False
pep_613 = self.is_pep_613(s)
if not pep_613 and s.unanalyzed_type is not None:
# Second rule: Explicit type (cls: Type[A] = A) always creates variable, not alias.
# unless using PEP 613 `cls: TypeAlias = A`
return False
# It can be `A = TypeAliasType('A', ...)` call, in this case,
# we just take the second argument and analyze it:
type_params: TypeVarLikeList | None
all_type_params_names: list[str] | None
if self.check_type_alias_type_call(s.rvalue, name=lvalue.name):
rvalue = s.rvalue.args[1]
pep_695 = True
type_params, all_type_params_names = self.analyze_type_alias_type_params(s.rvalue)
else:
rvalue = s.rvalue
pep_695 = False
type_params = None
all_type_params_names = None
if isinstance(rvalue, CallExpr) and rvalue.analyzed:
return False
existing = self.current_symbol_table().get(lvalue.name)
# Third rule: type aliases can't be re-defined. For example:
# A: Type[float] = int
# A = float # OK, but this doesn't define an alias
# B = int
# B = float # Error!
# Don't create an alias in these cases:
if existing and (
isinstance(existing.node, Var) # existing variable
or (isinstance(existing.node, TypeAlias) and not s.is_alias_def) # existing alias
or (isinstance(existing.node, PlaceholderNode) and existing.node.node.line < s.line)
): # previous incomplete definition
# TODO: find a more robust way to track the order of definitions.
# Note: if is_alias_def=True, this is just a node from previous iteration.
if isinstance(existing.node, TypeAlias) and not s.is_alias_def:
self.fail(
'Cannot assign multiple types to name "{}"'
' without an explicit "Type[...]" annotation'.format(lvalue.name),
lvalue,
)
return False
non_global_scope = self.type or self.is_func_scope()
if not pep_613 and not pep_695 and isinstance(rvalue, RefExpr) and non_global_scope:
# Fourth rule (special case): Non-subscripted right hand side creates a variable
# at class and function scopes. For example:
#
# class Model:
# ...
# class C:
# model = Model # this is automatically a variable with type 'Type[Model]'
#
# without this rule, this typical use case will require a lot of explicit
# annotations (see the second rule).
return False
if not pep_613 and not pep_695 and not self.can_be_type_alias(rvalue):
return False
if existing and not isinstance(existing.node, (PlaceholderNode, TypeAlias)):
# Cannot redefine existing node as type alias.
return False
res: Type | None = None
if self.is_none_alias(rvalue):
res = NoneType()
alias_tvars: list[TypeVarLikeType] = []
depends_on: set[str] = set()
qualified_tvars: list[str] = []
empty_tuple_index = False
else:
tag = self.track_incomplete_refs()
res, alias_tvars, depends_on, qualified_tvars, empty_tuple_index = self.analyze_alias(
lvalue.name,
rvalue,
allow_placeholder=True,
declared_type_vars=type_params,
all_declared_type_params_names=all_type_params_names,
)
if not res:
return False
if not self.is_func_scope():
# Only marking incomplete for top-level placeholders makes recursive aliases like
# `A = Sequence[str | A]` valid here, similar to how we treat base classes in class
# definitions, allowing `class str(Sequence[str]): ...`
incomplete_target = isinstance(res, ProperType) and isinstance(
res, PlaceholderType
)
else:
incomplete_target = has_placeholder(res)
if self.found_incomplete_ref(tag) or incomplete_target:
# Since we have got here, we know this must be a type alias (incomplete refs
# may appear in nested positions), therefore use becomes_typeinfo=True.
self.mark_incomplete(lvalue.name, rvalue, becomes_typeinfo=True)
return True
self.add_type_alias_deps(depends_on)
# In addition to the aliases used, we add deps on unbound
# type variables, since they are erased from target type.
self.add_type_alias_deps(qualified_tvars)
# The above are only direct deps on other aliases.
# For subscripted aliases, type deps from expansion are added in deps.py
# (because the type is stored).
check_for_explicit_any(res, self.options, self.is_typeshed_stub_file, self.msg, context=s)
# When this type alias gets "inlined", the Any is not explicit anymore,
# so we need to replace it with non-explicit Anys.
res = make_any_non_explicit(res)
if self.options.disallow_any_unimported and has_any_from_unimported_type(res):
# Only show error message once, when the type is fully analyzed.
if not has_placeholder(res):
self.msg.unimported_type_becomes_any("Type alias target", res, s)
res = make_any_non_unimported(res)
# Note: with the new (lazy) type alias representation we only need to set no_args to True
# if the expected number of arguments is non-zero, so that aliases like `A = List` work
# but not aliases like `A = TypeAliasType("A", List)` as these need explicit type params.
# However, eagerly expanding aliases like Text = str is a nice performance optimization.
no_args = (
isinstance(res, ProperType)
and isinstance(res, Instance)
and not res.args
and not empty_tuple_index
and not pep_695
)
if isinstance(res, ProperType) and isinstance(res, Instance):
if not validate_instance(res, self.fail, empty_tuple_index):
fix_instance(res, self.fail, self.note, disallow_any=False, options=self.options)
# Aliases defined within functions can't be accessed outside
# the function, since the symbol table will no longer
# exist. Work around by expanding them eagerly when used.
eager = self.is_func_scope()
alias_node = TypeAlias(
res,
self.qualified_name(lvalue.name),
s.line,
s.column,
alias_tvars=alias_tvars,
no_args=no_args,
eager=eager,
python_3_12_type_alias=pep_695,
)
if isinstance(s.rvalue, (IndexExpr, CallExpr, OpExpr)) and (
not isinstance(rvalue, OpExpr)
or (self.options.python_version >= (3, 10) or self.is_stub_file)
):
# Note: CallExpr is for "void = type(None)" and OpExpr is for "X | Y" union syntax.
if not isinstance(s.rvalue.analyzed, TypeAliasExpr):
# Any existing node will be updated in-place below.
s.rvalue.analyzed = TypeAliasExpr(alias_node)
s.rvalue.analyzed.line = s.line
# we use the column from resulting target, to get better location for errors
s.rvalue.analyzed.column = res.column
elif isinstance(s.rvalue, RefExpr):
s.rvalue.is_alias_rvalue = True
if existing:
# An alias gets updated.
updated = False
if isinstance(existing.node, TypeAlias):
if existing.node.target != res:
# Copy expansion to the existing alias, this matches how we update base classes
# for a TypeInfo _in place_ if there are nested placeholders.
existing.node.target = res
existing.node.alias_tvars = alias_tvars
existing.node.no_args = no_args
updated = True
# Invalidate recursive status cache in case it was previously set.
existing.node._is_recursive = None
else:
# Otherwise just replace existing placeholder with type alias.
existing.node = alias_node
updated = True
if updated:
if self.final_iteration:
self.cannot_resolve_name(lvalue.name, "name", s)
return True
else:
# We need to defer so that this change can get propagated to base classes.
self.defer(s, force_progress=True)
else:
self.add_symbol(lvalue.name, alias_node, s)
if isinstance(rvalue, RefExpr) and isinstance(rvalue.node, TypeAlias):
alias_node.normalized = rvalue.node.normalized
current_node = existing.node if existing else alias_node
assert isinstance(current_node, TypeAlias)
self.disable_invalid_recursive_aliases(s, current_node, s.rvalue)
if self.is_class_scope():
assert self.type is not None
if self.type.is_protocol:
self.fail("Type aliases are prohibited in protocol bodies", s)
if not lvalue.name[0].isupper():
self.note("Use variable annotation syntax to define protocol members", s)
return True
def check_type_alias_type_call(self, rvalue: Expression, *, name: str) -> TypeGuard[CallExpr]:
if not isinstance(rvalue, CallExpr):
return False
names = ["typing_extensions.TypeAliasType"]
if self.options.python_version >= (3, 12):
names.append("typing.TypeAliasType")
if not refers_to_fullname(rvalue.callee, tuple(names)):
return False
return self.check_typevarlike_name(rvalue, name, rvalue)
def analyze_type_alias_type_params(
self, rvalue: CallExpr
) -> tuple[TypeVarLikeList, list[str]]:
"""Analyze type_params of TypeAliasType.
Returns declared unbound type variable expressions and a list of all decalred type
variable names for error reporting.
"""
if "type_params" in rvalue.arg_names:
type_params_arg = rvalue.args[rvalue.arg_names.index("type_params")]
if not isinstance(type_params_arg, TupleExpr):
self.fail(
"Tuple literal expected as the type_params argument to TypeAliasType",
type_params_arg,
)
return [], []
type_params = type_params_arg.items
else:
return [], []
declared_tvars: TypeVarLikeList = []
all_declared_tvar_names: list[str] = [] # includes bound type variables
have_type_var_tuple = False
for tp_expr in type_params:
if isinstance(tp_expr, StarExpr):
tp_expr.valid = False
self.analyze_type_expr(tp_expr)
try:
base = self.expr_to_unanalyzed_type(tp_expr)
except TypeTranslationError:
continue
if not isinstance(base, UnboundType):
continue
tag = self.track_incomplete_refs()
tvar = self.analyze_unbound_tvar_impl(base, is_typealias_param=True)
if tvar:
if isinstance(tvar[1], TypeVarTupleExpr):
if have_type_var_tuple:
self.fail(
"Can only use one TypeVarTuple in type_params argument to TypeAliasType",
base,
code=codes.TYPE_VAR,
)
have_type_var_tuple = True
continue
have_type_var_tuple = True
elif not self.found_incomplete_ref(tag):
sym = self.lookup_qualified(base.name, base)
if sym and isinstance(sym.node, TypeVarLikeExpr):
all_declared_tvar_names.append(sym.node.name) # Error will be reported later
else:
self.fail(
"Free type variable expected in type_params argument to TypeAliasType",
base,
code=codes.TYPE_VAR,
)
if sym and sym.fullname in ("typing.Unpack", "typing_extensions.Unpack"):
self.note(
"Don't Unpack type variables in type_params", base, code=codes.TYPE_VAR
)
continue
if tvar in declared_tvars:
self.fail(
f'Duplicate type variable "{tvar[0]}" in type_params argument to TypeAliasType',
base,
code=codes.TYPE_VAR,
)
continue
if tvar:
all_declared_tvar_names.append(tvar[0])
declared_tvars.append(tvar)
return declared_tvars, all_declared_tvar_names
def disable_invalid_recursive_aliases(
self, s: AssignmentStmt | TypeAliasStmt, current_node: TypeAlias, ctx: Context
) -> None:
"""Prohibit and fix recursive type aliases that are invalid/unsupported."""
messages = []
if is_invalid_recursive_alias({current_node}, current_node.target):
target = (
"tuple" if isinstance(get_proper_type(current_node.target), TupleType) else "union"
)
messages.append(f"Invalid recursive alias: a {target} item of itself")
if detect_diverging_alias(
current_node, current_node.target, self.lookup_qualified, self.tvar_scope
):
messages.append("Invalid recursive alias: type variable nesting on right hand side")
if messages:
current_node.target = AnyType(TypeOfAny.from_error)
s.invalid_recursive_alias = True
for msg in messages:
self.fail(msg, ctx)
def analyze_lvalue(
self,
lval: Lvalue,
nested: bool = False,
explicit_type: bool = False,
is_final: bool = False,
escape_comprehensions: bool = False,
has_explicit_value: bool = False,
) -> None:
"""Analyze an lvalue or assignment target.
Args:
lval: The target lvalue
nested: If true, the lvalue is within a tuple or list lvalue expression
explicit_type: Assignment has type annotation
escape_comprehensions: If we are inside a comprehension, set the variable
in the enclosing scope instead. This implements
https://www.python.org/dev/peps/pep-0572/#scope-of-the-target
"""
if escape_comprehensions:
assert isinstance(lval, NameExpr), "assignment expression target must be NameExpr"
if isinstance(lval, NameExpr):
self.analyze_name_lvalue(
lval,
explicit_type,
is_final,
escape_comprehensions,
has_explicit_value=has_explicit_value,
)
elif isinstance(lval, MemberExpr):
self.analyze_member_lvalue(lval, explicit_type, is_final, has_explicit_value)
if explicit_type and not self.is_self_member_ref(lval):
self.fail("Type cannot be declared in assignment to non-self attribute", lval)
elif isinstance(lval, IndexExpr):
if explicit_type:
self.fail("Unexpected type declaration", lval)
lval.accept(self)
elif isinstance(lval, TupleExpr):
self.analyze_tuple_or_list_lvalue(lval, explicit_type)
elif isinstance(lval, StarExpr):
if nested:
self.analyze_lvalue(lval.expr, nested, explicit_type)
else:
self.fail("Starred assignment target must be in a list or tuple", lval)
else:
self.fail("Invalid assignment target", lval)
def analyze_name_lvalue(
self,
lvalue: NameExpr,
explicit_type: bool,
is_final: bool,
escape_comprehensions: bool,
has_explicit_value: bool,
) -> None:
"""Analyze an lvalue that targets a name expression.
Arguments are similar to "analyze_lvalue".
"""
if lvalue.node:
# This has been bound already in a previous iteration.
return
name = lvalue.name
if self.is_alias_for_final_name(name):
if is_final:
self.fail("Cannot redefine an existing name as final", lvalue)
else:
self.msg.cant_assign_to_final(name, self.type is not None, lvalue)
kind = self.current_symbol_kind()
names = self.current_symbol_table(escape_comprehensions=escape_comprehensions)
existing = names.get(name)
outer = self.is_global_or_nonlocal(name)
if (
kind == MDEF
and isinstance(self.type, TypeInfo)
and self.type.is_enum
and not name.startswith("__")
):
# Special case: we need to be sure that `Enum` keys are unique.
if existing is not None and not isinstance(existing.node, PlaceholderNode):
self.fail(
'Attempted to reuse member name "{}" in Enum definition "{}"'.format(
name, self.type.name
),
lvalue,
)
if (not existing or isinstance(existing.node, PlaceholderNode)) and not outer:
# Define new variable.
var = self.make_name_lvalue_var(lvalue, kind, not explicit_type, has_explicit_value)
added = self.add_symbol(name, var, lvalue, escape_comprehensions=escape_comprehensions)
# Only bind expression if we successfully added name to symbol table.
if added:
lvalue.is_new_def = True
lvalue.is_inferred_def = True
lvalue.kind = kind
lvalue.node = var
if kind == GDEF:
lvalue.fullname = var._fullname
else:
lvalue.fullname = lvalue.name
if self.is_func_scope():
if unmangle(name) == "_":
# Special case for assignment to local named '_': always infer 'Any'.
typ = AnyType(TypeOfAny.special_form)
self.store_declared_types(lvalue, typ)
if is_final and self.is_final_redefinition(kind, name):
self.fail("Cannot redefine an existing name as final", lvalue)
else:
self.make_name_lvalue_point_to_existing_def(lvalue, explicit_type, is_final)
def is_final_redefinition(self, kind: int, name: str) -> bool:
if kind == GDEF:
return self.is_mangled_global(name) and not self.is_initial_mangled_global(name)
elif kind == MDEF and self.type:
return unmangle(name) + "'" in self.type.names
return False
def is_alias_for_final_name(self, name: str) -> bool:
if self.is_func_scope():
if not name.endswith("'"):
# Not a mangled name -- can't be an alias
return False
name = unmangle(name)
assert self.locals[-1] is not None, "No locals at function scope"
existing = self.locals[-1].get(name)
return existing is not None and is_final_node(existing.node)
elif self.type is not None:
orig_name = unmangle(name) + "'"
if name == orig_name:
return False
existing = self.type.names.get(orig_name)
return existing is not None and is_final_node(existing.node)
else:
orig_name = unmangle(name) + "'"
if name == orig_name:
return False
existing = self.globals.get(orig_name)
return existing is not None and is_final_node(existing.node)
def make_name_lvalue_var(
self, lvalue: NameExpr, kind: int, inferred: bool, has_explicit_value: bool
) -> Var:
"""Return a Var node for an lvalue that is a name expression."""
name = lvalue.name
v = Var(name)
v.set_line(lvalue)
v.is_inferred = inferred
if kind == MDEF:
assert self.type is not None
v.info = self.type
v.is_initialized_in_class = True
v.allow_incompatible_override = name in ALLOW_INCOMPATIBLE_OVERRIDE
if kind != LDEF:
v._fullname = self.qualified_name(name)
else:
# fullanme should never stay None
v._fullname = name
v.is_ready = False # Type not inferred yet
v.has_explicit_value = has_explicit_value
return v
def make_name_lvalue_point_to_existing_def(
self, lval: NameExpr, explicit_type: bool, is_final: bool
) -> None:
"""Update an lvalue to point to existing definition in the same scope.
Arguments are similar to "analyze_lvalue".
Assume that an existing name exists.
"""
if is_final:
# Redefining an existing name with final is always an error.
self.fail("Cannot redefine an existing name as final", lval)
original_def = self.lookup(lval.name, lval, suppress_errors=True)
if original_def is None and self.type and not self.is_func_scope():
# Workaround to allow "x, x = ..." in class body.
original_def = self.type.get(lval.name)
if explicit_type:
# Don't re-bind if there is a type annotation.
self.name_already_defined(lval.name, lval, original_def)
else:
# Bind to an existing name.
if original_def:
self.bind_name_expr(lval, original_def)
else:
self.name_not_defined(lval.name, lval)
self.check_lvalue_validity(lval.node, lval)
def analyze_tuple_or_list_lvalue(self, lval: TupleExpr, explicit_type: bool = False) -> None:
"""Analyze an lvalue or assignment target that is a list or tuple."""
items = lval.items
star_exprs = [item for item in items if isinstance(item, StarExpr)]
if len(star_exprs) > 1:
self.fail("Two starred expressions in assignment", lval)
else:
if len(star_exprs) == 1:
star_exprs[0].valid = True
for i in items:
self.analyze_lvalue(
lval=i,
nested=True,
explicit_type=explicit_type,
# Lists and tuples always have explicit values defined:
# `a, b, c = value`
has_explicit_value=True,
)
def analyze_member_lvalue(
self, lval: MemberExpr, explicit_type: bool, is_final: bool, has_explicit_value: bool
) -> None:
"""Analyze lvalue that is a member expression.
Arguments:
lval: The target lvalue
explicit_type: Assignment has type annotation
is_final: Is the target final
"""
if lval.node:
# This has been bound already in a previous iteration.
return
lval.accept(self)
if self.is_self_member_ref(lval):
assert self.type, "Self member outside a class"
cur_node = self.type.names.get(lval.name)
node = self.type.get(lval.name)
if cur_node and is_final:
# Overrides will be checked in type checker.
self.fail("Cannot redefine an existing name as final", lval)
# On first encounter with this definition, if this attribute was defined before
# with an inferred type and it's marked with an explicit type now, give an error.
if (
not lval.node
and cur_node
and isinstance(cur_node.node, Var)
and cur_node.node.is_inferred
and explicit_type
):
self.attribute_already_defined(lval.name, lval, cur_node)
if self.type.is_protocol and has_explicit_value and cur_node is not None:
# Make this variable non-abstract, it would be safer to do this only if we
# are inside __init__, but we do this always to preserve historical behaviour.
if isinstance(cur_node.node, Var):
cur_node.node.is_abstract_var = False
if (
# If the attribute of self is not defined, create a new Var, ...
node is None
# ... or if it is defined as abstract in a *superclass*.
or (cur_node is None and isinstance(node.node, Var) and node.node.is_abstract_var)
# ... also an explicit declaration on self also creates a new Var.
# Note that `explicit_type` might have been erased for bare `Final`,
# so we also check if `is_final` is passed.
or (cur_node is None and (explicit_type or is_final))
):
if self.type.is_protocol and node is None:
self.fail("Protocol members cannot be defined via assignment to self", lval)
else:
# Implicit attribute definition in __init__.
lval.is_new_def = True
lval.is_inferred_def = True
v = Var(lval.name)
v.set_line(lval)
v._fullname = self.qualified_name(lval.name)
v.info = self.type
v.is_ready = False
v.explicit_self_type = explicit_type or is_final
lval.def_var = v
lval.node = v
# TODO: should we also set lval.kind = MDEF?
self.type.names[lval.name] = SymbolTableNode(MDEF, v, implicit=True)
self.check_lvalue_validity(lval.node, lval)
def is_self_member_ref(self, memberexpr: MemberExpr) -> bool:
"""Does memberexpr to refer to an attribute of self?"""
if not isinstance(memberexpr.expr, NameExpr):
return False
node = memberexpr.expr.node
return isinstance(node, Var) and node.is_self
def check_lvalue_validity(self, node: Expression | SymbolNode | None, ctx: Context) -> None:
if isinstance(node, TypeVarExpr):
self.fail("Invalid assignment target", ctx)
elif isinstance(node, TypeInfo):
self.fail(message_registry.CANNOT_ASSIGN_TO_TYPE, ctx)
def store_declared_types(self, lvalue: Lvalue, typ: Type) -> None:
if isinstance(lvalue, RefExpr):
lvalue.is_inferred_def = False
if isinstance(lvalue.node, Var):
var = lvalue.node
var.type = typ
var.is_ready = True
typ = get_proper_type(typ)
if (
var.is_final
and isinstance(typ, Instance)
and typ.last_known_value
and (not self.type or not self.type.is_enum)
):
var.final_value = typ.last_known_value.value
# If node is not a variable, we'll catch it elsewhere.
elif isinstance(lvalue, TupleExpr):
typ = get_proper_type(typ)
if isinstance(typ, TupleType):
if len(lvalue.items) != len(typ.items):
self.fail("Incompatible number of tuple items", lvalue)
return
for item, itemtype in zip(lvalue.items, typ.items):
self.store_declared_types(item, itemtype)
else:
self.fail("Tuple type expected for multiple variables", lvalue)
elif isinstance(lvalue, StarExpr):
# Historical behavior for the old parser
self.store_declared_types(lvalue.expr, typ)
else:
# This has been flagged elsewhere as an error, so just ignore here.
pass
def process_typevar_declaration(self, s: AssignmentStmt) -> bool:
"""Check if s declares a TypeVar; it yes, store it in symbol table.
Return True if this looks like a type variable declaration (but maybe
with errors), otherwise return False.
"""
call = self.get_typevarlike_declaration(s, ("typing.TypeVar", "typing_extensions.TypeVar"))
if not call:
return False
name = self.extract_typevarlike_name(s, call)
if name is None:
return False
# Constraining types
n_values = call.arg_kinds[1:].count(ARG_POS)
values = self.analyze_value_types(call.args[1 : 1 + n_values])
res = self.process_typevar_parameters(
call.args[1 + n_values :],
call.arg_names[1 + n_values :],
call.arg_kinds[1 + n_values :],
n_values,
s,
)
if res is None:
return False
variance, upper_bound, default = res
existing = self.current_symbol_table().get(name)
if existing and not (
isinstance(existing.node, PlaceholderNode)
or
# Also give error for another type variable with the same name.
(isinstance(existing.node, TypeVarExpr) and existing.node is call.analyzed)
):
self.fail(f'Cannot redefine "{name}" as a type variable', s)
return False
if self.options.disallow_any_unimported:
for idx, constraint in enumerate(values, start=1):
if has_any_from_unimported_type(constraint):
prefix = f"Constraint {idx}"
self.msg.unimported_type_becomes_any(prefix, constraint, s)
if has_any_from_unimported_type(upper_bound):
prefix = "Upper bound of type variable"
self.msg.unimported_type_becomes_any(prefix, upper_bound, s)
for t in values + [upper_bound, default]:
check_for_explicit_any(
t, self.options, self.is_typeshed_stub_file, self.msg, context=s
)
# mypyc suppresses making copies of a function to check each
# possible type, so set the upper bound to Any to prevent that
# from causing errors.
if values and self.options.mypyc:
upper_bound = AnyType(TypeOfAny.implementation_artifact)
# Yes, it's a valid type variable definition! Add it to the symbol table.
if not call.analyzed:
type_var = TypeVarExpr(
name, self.qualified_name(name), values, upper_bound, default, variance
)
type_var.line = call.line
call.analyzed = type_var
updated = True
else:
assert isinstance(call.analyzed, TypeVarExpr)
updated = (
values != call.analyzed.values
or upper_bound != call.analyzed.upper_bound
or default != call.analyzed.default
)
call.analyzed.upper_bound = upper_bound
call.analyzed.values = values
call.analyzed.default = default
if any(has_placeholder(v) for v in values):
self.process_placeholder(None, "TypeVar values", s, force_progress=updated)
elif has_placeholder(upper_bound):
self.process_placeholder(None, "TypeVar upper bound", s, force_progress=updated)
elif has_placeholder(default):
self.process_placeholder(None, "TypeVar default", s, force_progress=updated)
self.add_symbol(name, call.analyzed, s)
return True
def check_typevarlike_name(self, call: CallExpr, name: str, context: Context) -> bool:
"""Checks that the name of a TypeVar or ParamSpec matches its variable."""
name = unmangle(name)
assert isinstance(call.callee, RefExpr)
typevarlike_type = (
call.callee.name if isinstance(call.callee, NameExpr) else call.callee.fullname
)
if len(call.args) < 1:
self.fail(f"Too few arguments for {typevarlike_type}()", context)
return False
if not isinstance(call.args[0], StrExpr) or call.arg_kinds[0] != ARG_POS:
self.fail(f"{typevarlike_type}() expects a string literal as first argument", context)
return False
elif call.args[0].value != name:
msg = 'String argument 1 "{}" to {}(...) does not match variable name "{}"'
self.fail(msg.format(call.args[0].value, typevarlike_type, name), context)
return False
return True
def get_typevarlike_declaration(
self, s: AssignmentStmt, typevarlike_types: tuple[str, ...]
) -> CallExpr | None:
"""Returns the call expression if `s` is a declaration of `typevarlike_type`
(TypeVar or ParamSpec), or None otherwise.
"""
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], NameExpr):
return None
if not isinstance(s.rvalue, CallExpr):
return None
call = s.rvalue
callee = call.callee
if not isinstance(callee, RefExpr):
return None
if callee.fullname not in typevarlike_types:
return None
return call
def process_typevar_parameters(
self,
args: list[Expression],
names: list[str | None],
kinds: list[ArgKind],
num_values: int,
context: Context,
) -> tuple[int, Type, Type] | None:
has_values = num_values > 0
covariant = False
contravariant = False
upper_bound: Type = self.object_type()
default: Type = AnyType(TypeOfAny.from_omitted_generics)
for param_value, param_name, param_kind in zip(args, names, kinds):
if not param_kind.is_named():
self.fail(message_registry.TYPEVAR_UNEXPECTED_ARGUMENT, context)
return None
if param_name == "covariant":
if isinstance(param_value, NameExpr) and param_value.name in ("True", "False"):
covariant = param_value.name == "True"
else:
self.fail(message_registry.TYPEVAR_VARIANCE_DEF.format("covariant"), context)
return None
elif param_name == "contravariant":
if isinstance(param_value, NameExpr) and param_value.name in ("True", "False"):
contravariant = param_value.name == "True"
else:
self.fail(
message_registry.TYPEVAR_VARIANCE_DEF.format("contravariant"), context
)
return None
elif param_name == "bound":
if has_values:
self.fail("TypeVar cannot have both values and an upper bound", context)
return None
tv_arg = self.get_typevarlike_argument("TypeVar", param_name, param_value, context)
if tv_arg is None:
return None
upper_bound = tv_arg
elif param_name == "default":
tv_arg = self.get_typevarlike_argument(
"TypeVar", param_name, param_value, context, allow_unbound_tvars=True
)
default = tv_arg or AnyType(TypeOfAny.from_error)
elif param_name == "values":
# Probably using obsolete syntax with values=(...). Explain the current syntax.
self.fail('TypeVar "values" argument not supported', context)
self.fail(
"Use TypeVar('T', t, ...) instead of TypeVar('T', values=(t, ...))", context
)
return None
else:
self.fail(
f'{message_registry.TYPEVAR_UNEXPECTED_ARGUMENT}: "{param_name}"', context
)
return None
if covariant and contravariant:
self.fail("TypeVar cannot be both covariant and contravariant", context)
return None
elif num_values == 1:
self.fail(message_registry.TYPE_VAR_TOO_FEW_CONSTRAINED_TYPES, context)
return None
elif covariant:
variance = COVARIANT
elif contravariant:
variance = CONTRAVARIANT
else:
variance = INVARIANT
return variance, upper_bound, default
def get_typevarlike_argument(
self,
typevarlike_name: str,
param_name: str,
param_value: Expression,
context: Context,
*,
allow_unbound_tvars: bool = False,
allow_param_spec_literals: bool = False,
allow_unpack: bool = False,
report_invalid_typevar_arg: bool = True,
) -> ProperType | None:
try:
# We want to use our custom error message below, so we suppress
# the default error message for invalid types here.
analyzed = self.expr_to_analyzed_type(
param_value,
allow_placeholder=True,
report_invalid_types=False,
allow_unbound_tvars=allow_unbound_tvars,
allow_param_spec_literals=allow_param_spec_literals,
allow_unpack=allow_unpack,
)
if analyzed is None:
# Type variables are special: we need to place them in the symbol table
# soon, even if upper bound is not ready yet. Otherwise avoiding
# a "deadlock" in this common pattern would be tricky:
# T = TypeVar('T', bound=Custom[Any])
# class Custom(Generic[T]):
# ...
analyzed = PlaceholderType(None, [], context.line)
typ = get_proper_type(analyzed)
if report_invalid_typevar_arg and isinstance(typ, AnyType) and typ.is_from_error:
self.fail(
message_registry.TYPEVAR_ARG_MUST_BE_TYPE.format(typevarlike_name, param_name),
param_value,
)
# Note: we do not return 'None' here -- we want to continue
# using the AnyType.
return typ
except TypeTranslationError:
if report_invalid_typevar_arg:
self.fail(
message_registry.TYPEVAR_ARG_MUST_BE_TYPE.format(typevarlike_name, param_name),
param_value,
)
return None
def extract_typevarlike_name(self, s: AssignmentStmt, call: CallExpr) -> str | None:
if not call:
return None
lvalue = s.lvalues[0]
assert isinstance(lvalue, NameExpr)
if s.type:
self.fail("Cannot declare the type of a TypeVar or similar construct", s)
return None
if not self.check_typevarlike_name(call, lvalue.name, s):
return None
return lvalue.name
def process_paramspec_declaration(self, s: AssignmentStmt) -> bool:
"""Checks if s declares a ParamSpec; if yes, store it in symbol table.
Return True if this looks like a ParamSpec (maybe with errors), otherwise return False.
In the future, ParamSpec may accept bounds and variance arguments, in which
case more aggressive sharing of code with process_typevar_declaration should be pursued.
"""
call = self.get_typevarlike_declaration(
s, ("typing_extensions.ParamSpec", "typing.ParamSpec")
)
if not call:
return False
name = self.extract_typevarlike_name(s, call)
if name is None:
return False
n_values = call.arg_kinds[1:].count(ARG_POS)
if n_values != 0:
self.fail('Too many positional arguments for "ParamSpec"', s)
default: Type = AnyType(TypeOfAny.from_omitted_generics)
for param_value, param_name in zip(
call.args[1 + n_values :], call.arg_names[1 + n_values :]
):
if param_name == "default":
tv_arg = self.get_typevarlike_argument(
"ParamSpec",
param_name,
param_value,
s,
allow_unbound_tvars=True,
allow_param_spec_literals=True,
report_invalid_typevar_arg=False,
)
default = tv_arg or AnyType(TypeOfAny.from_error)
if isinstance(tv_arg, Parameters):
for i, arg_type in enumerate(tv_arg.arg_types):
typ = get_proper_type(arg_type)
if isinstance(typ, AnyType) and typ.is_from_error:
self.fail(
f"Argument {i} of ParamSpec default must be a type", param_value
)
elif (
isinstance(default, AnyType)
and default.is_from_error
or not isinstance(default, (AnyType, UnboundType))
):
self.fail(
"The default argument to ParamSpec must be a list expression, ellipsis, or a ParamSpec",
param_value,
)
default = AnyType(TypeOfAny.from_error)
else:
# ParamSpec is different from a regular TypeVar:
# arguments are not semantically valid. But, allowed in runtime.
# So, we need to warn users about possible invalid usage.
self.fail(
"The variance and bound arguments to ParamSpec do not have defined semantics yet",
s,
)
# PEP 612 reserves the right to define bound, covariant and contravariant arguments to
# ParamSpec in a later PEP. If and when that happens, we should do something
# on the lines of process_typevar_parameters
if not call.analyzed:
paramspec_var = ParamSpecExpr(
name, self.qualified_name(name), self.object_type(), default, INVARIANT
)
paramspec_var.line = call.line
call.analyzed = paramspec_var
updated = True
else:
assert isinstance(call.analyzed, ParamSpecExpr)
updated = default != call.analyzed.default
call.analyzed.default = default
if has_placeholder(default):
self.process_placeholder(None, "ParamSpec default", s, force_progress=updated)
self.add_symbol(name, call.analyzed, s)
return True
def process_typevartuple_declaration(self, s: AssignmentStmt) -> bool:
"""Checks if s declares a TypeVarTuple; if yes, store it in symbol table.
Return True if this looks like a TypeVarTuple (maybe with errors), otherwise return False.
"""
call = self.get_typevarlike_declaration(
s, ("typing_extensions.TypeVarTuple", "typing.TypeVarTuple")
)
if not call:
return False
n_values = call.arg_kinds[1:].count(ARG_POS)
if n_values != 0:
self.fail('Too many positional arguments for "TypeVarTuple"', s)
default: Type = AnyType(TypeOfAny.from_omitted_generics)
for param_value, param_name in zip(
call.args[1 + n_values :], call.arg_names[1 + n_values :]
):
if param_name == "default":
tv_arg = self.get_typevarlike_argument(
"TypeVarTuple",
param_name,
param_value,
s,
allow_unbound_tvars=True,
report_invalid_typevar_arg=False,
allow_unpack=True,
)
default = tv_arg or AnyType(TypeOfAny.from_error)
if not isinstance(default, UnpackType):
self.fail(
"The default argument to TypeVarTuple must be an Unpacked tuple",
param_value,
)
default = AnyType(TypeOfAny.from_error)
else:
self.fail(f'Unexpected keyword argument "{param_name}" for "TypeVarTuple"', s)
name = self.extract_typevarlike_name(s, call)
if name is None:
return False
# PEP 646 does not specify the behavior of variance, constraints, or bounds.
if not call.analyzed:
tuple_fallback = self.named_type("builtins.tuple", [self.object_type()])
typevartuple_var = TypeVarTupleExpr(
name,
self.qualified_name(name),
# Upper bound for *Ts is *tuple[object, ...], it can never be object.
tuple_fallback.copy_modified(),
tuple_fallback,
default,
INVARIANT,
)
typevartuple_var.line = call.line
call.analyzed = typevartuple_var
updated = True
else:
assert isinstance(call.analyzed, TypeVarTupleExpr)
updated = default != call.analyzed.default
call.analyzed.default = default
if has_placeholder(default):
self.process_placeholder(None, "TypeVarTuple default", s, force_progress=updated)
self.add_symbol(name, call.analyzed, s)
return True
def basic_new_typeinfo(self, name: str, basetype_or_fallback: Instance, line: int) -> TypeInfo:
if self.is_func_scope() and not self.type and "@" not in name:
name += "@" + str(line)
class_def = ClassDef(name, Block([]))
if self.is_func_scope() and not self.type:
# Full names of generated classes should always be prefixed with the module names
# even if they are nested in a function, since these classes will be (de-)serialized.
# (Note that the caller should append @line to the name to avoid collisions.)
# TODO: clean this up, see #6422.
class_def.fullname = self.cur_mod_id + "." + self.qualified_name(name)
else:
class_def.fullname = self.qualified_name(name)
info = TypeInfo(SymbolTable(), class_def, self.cur_mod_id)
class_def.info = info
mro = basetype_or_fallback.type.mro
if not mro:
# Probably an error, we should not crash so generate something meaningful.
mro = [basetype_or_fallback.type, self.object_type().type]
info.mro = [info] + mro
info.bases = [basetype_or_fallback]
return info
def analyze_value_types(self, items: list[Expression]) -> list[Type]:
"""Analyze types from values expressions in type variable definition."""
result: list[Type] = []
for node in items:
try:
analyzed = self.anal_type(
self.expr_to_unanalyzed_type(node), allow_placeholder=True
)
if analyzed is None:
# Type variables are special: we need to place them in the symbol table
# soon, even if some value is not ready yet, see process_typevar_parameters()
# for an example.
analyzed = PlaceholderType(None, [], node.line)
result.append(analyzed)
except TypeTranslationError:
self.fail("Type expected", node)
result.append(AnyType(TypeOfAny.from_error))
return result
def check_classvar(self, s: AssignmentStmt) -> None:
"""Check if assignment defines a class variable."""
lvalue = s.lvalues[0]
if len(s.lvalues) != 1 or not isinstance(lvalue, RefExpr):
return
if not s.type or not self.is_classvar(s.type):
return
if self.is_class_scope() and isinstance(lvalue, NameExpr):
node = lvalue.node
if isinstance(node, Var):
node.is_classvar = True
analyzed = self.anal_type(s.type)
assert self.type is not None
if analyzed is not None and set(get_type_vars(analyzed)) & set(
self.type.defn.type_vars
):
# This means that we have a type var defined inside of a ClassVar.
# This is not allowed by PEP526.
# See https://github.com/python/mypy/issues/11538
self.fail(message_registry.CLASS_VAR_WITH_TYPEVARS, s)
if (
analyzed is not None
and self.type.self_type in get_type_vars(analyzed)
and self.type.defn.type_vars
):
self.fail(message_registry.CLASS_VAR_WITH_GENERIC_SELF, s)
elif not isinstance(lvalue, MemberExpr) or self.is_self_member_ref(lvalue):
# In case of member access, report error only when assigning to self
# Other kinds of member assignments should be already reported
self.fail_invalid_classvar(lvalue)
def is_classvar(self, typ: Type) -> bool:
if not isinstance(typ, UnboundType):
return False
sym = self.lookup_qualified(typ.name, typ)
if not sym or not sym.node:
return False
return sym.node.fullname == "typing.ClassVar"
def is_final_type(self, typ: Type | None) -> bool:
if not isinstance(typ, UnboundType):
return False
sym = self.lookup_qualified(typ.name, typ)
if not sym or not sym.node:
return False
return sym.node.fullname in FINAL_TYPE_NAMES
def fail_invalid_classvar(self, context: Context) -> None:
self.fail(message_registry.CLASS_VAR_OUTSIDE_OF_CLASS, context)
def process_module_assignment(
self, lvals: list[Lvalue], rval: Expression, ctx: AssignmentStmt
) -> None:
"""Propagate module references across assignments.
Recursively handles the simple form of iterable unpacking; doesn't
handle advanced unpacking with *rest, dictionary unpacking, etc.
In an expression like x = y = z, z is the rval and lvals will be [x,
y].
"""
if isinstance(rval, (TupleExpr, ListExpr)) and all(
isinstance(v, TupleExpr) for v in lvals
):
# rval and all lvals are either list or tuple, so we are dealing
# with unpacking assignment like `x, y = a, b`. Mypy didn't
# understand our all(isinstance(...)), so cast them as TupleExpr
# so mypy knows it is safe to access their .items attribute.
seq_lvals = cast(List[TupleExpr], lvals)
# given an assignment like:
# (x, y) = (m, n) = (a, b)
# we now have:
# seq_lvals = [(x, y), (m, n)]
# seq_rval = (a, b)
# We now zip this into:
# elementwise_assignments = [(a, x, m), (b, y, n)]
# where each elementwise assignment includes one element of rval and the
# corresponding element of each lval. Basically we unpack
# (x, y) = (m, n) = (a, b)
# into elementwise assignments
# x = m = a
# y = n = b
# and then we recursively call this method for each of those assignments.
# If the rval and all lvals are not all of the same length, zip will just ignore
# extra elements, so no error will be raised here; mypy will later complain
# about the length mismatch in type-checking.
elementwise_assignments = zip(rval.items, *[v.items for v in seq_lvals])
for rv, *lvs in elementwise_assignments:
self.process_module_assignment(lvs, rv, ctx)
elif isinstance(rval, RefExpr):
rnode = self.lookup_type_node(rval)
if rnode and isinstance(rnode.node, MypyFile):
for lval in lvals:
if not isinstance(lval, RefExpr):
continue
# respect explicitly annotated type
if isinstance(lval.node, Var) and lval.node.type is not None:
continue
# We can handle these assignments to locals and to self
if isinstance(lval, NameExpr):
lnode = self.current_symbol_table().get(lval.name)
elif isinstance(lval, MemberExpr) and self.is_self_member_ref(lval):
assert self.type is not None
lnode = self.type.names.get(lval.name)
else:
continue
if lnode:
if isinstance(lnode.node, MypyFile) and lnode.node is not rnode.node:
assert isinstance(lval, (NameExpr, MemberExpr))
self.fail(
'Cannot assign multiple modules to name "{}" '
'without explicit "types.ModuleType" annotation'.format(lval.name),
ctx,
)
# never create module alias except on initial var definition
elif lval.is_inferred_def:
assert rnode.node is not None
lnode.node = rnode.node
def process__all__(self, s: AssignmentStmt) -> None:
"""Export names if argument is a __all__ assignment."""
if (
len(s.lvalues) == 1
and isinstance(s.lvalues[0], NameExpr)
and s.lvalues[0].name == "__all__"
and s.lvalues[0].kind == GDEF
and isinstance(s.rvalue, (ListExpr, TupleExpr))
):
self.add_exports(s.rvalue.items)
def process__deletable__(self, s: AssignmentStmt) -> None:
if not self.options.mypyc:
return
if (
len(s.lvalues) == 1
and isinstance(s.lvalues[0], NameExpr)
and s.lvalues[0].name == "__deletable__"
and s.lvalues[0].kind == MDEF
):
rvalue = s.rvalue
if not isinstance(rvalue, (ListExpr, TupleExpr)):
self.fail('"__deletable__" must be initialized with a list or tuple expression', s)
return
items = rvalue.items
attrs = []
for item in items:
if not isinstance(item, StrExpr):
self.fail('Invalid "__deletable__" item; string literal expected', item)
else:
attrs.append(item.value)
assert self.type
self.type.deletable_attributes = attrs
def process__slots__(self, s: AssignmentStmt) -> None:
"""
Processing ``__slots__`` if defined in type.
See: https://docs.python.org/3/reference/datamodel.html#slots
"""
# Later we can support `__slots__` defined as `__slots__ = other = ('a', 'b')`
if (
isinstance(self.type, TypeInfo)
and len(s.lvalues) == 1
and isinstance(s.lvalues[0], NameExpr)
and s.lvalues[0].name == "__slots__"
and s.lvalues[0].kind == MDEF
):
# We understand `__slots__` defined as string, tuple, list, set, and dict:
if not isinstance(s.rvalue, (StrExpr, ListExpr, TupleExpr, SetExpr, DictExpr)):
# For example, `__slots__` can be defined as a variable,
# we don't support it for now.
return
if any(p.slots is None for p in self.type.mro[1:-1]):
# At least one type in mro (excluding `self` and `object`)
# does not have concrete `__slots__` defined. Ignoring.
return
concrete_slots = True
rvalue: list[Expression] = []
if isinstance(s.rvalue, StrExpr):
rvalue.append(s.rvalue)
elif isinstance(s.rvalue, (ListExpr, TupleExpr, SetExpr)):
rvalue.extend(s.rvalue.items)
else:
# We have a special treatment of `dict` with possible `{**kwargs}` usage.
# In this case we consider all `__slots__` to be non-concrete.
for key, _ in s.rvalue.items:
if concrete_slots and key is not None:
rvalue.append(key)
else:
concrete_slots = False
slots = []
for item in rvalue:
# Special case for `'__dict__'` value:
# when specified it will still allow any attribute assignment.
if isinstance(item, StrExpr) and item.value != "__dict__":
slots.append(item.value)
else:
concrete_slots = False
if not concrete_slots:
# Some slot items are dynamic, we don't want any false positives,
# so, we just pretend that this type does not have any slots at all.
return
# We need to copy all slots from super types:
for super_type in self.type.mro[1:-1]:
assert super_type.slots is not None
slots.extend(super_type.slots)
self.type.slots = set(slots)
#
# Misc statements
#
def visit_block(self, b: Block) -> None:
if b.is_unreachable:
return
self.block_depth[-1] += 1
for s in b.body:
self.accept(s)
self.block_depth[-1] -= 1
def visit_block_maybe(self, b: Block | None) -> None:
if b:
self.visit_block(b)
def visit_expression_stmt(self, s: ExpressionStmt) -> None:
self.statement = s
s.expr.accept(self)
def visit_return_stmt(self, s: ReturnStmt) -> None:
self.statement = s
if not self.is_func_scope():
self.fail('"return" outside function', s)
if s.expr:
s.expr.accept(self)
def visit_raise_stmt(self, s: RaiseStmt) -> None:
self.statement = s
if s.expr:
s.expr.accept(self)
if s.from_expr:
s.from_expr.accept(self)
def visit_assert_stmt(self, s: AssertStmt) -> None:
self.statement = s
if s.expr:
s.expr.accept(self)
if s.msg:
s.msg.accept(self)
def visit_operator_assignment_stmt(self, s: OperatorAssignmentStmt) -> None:
self.statement = s
s.lvalue.accept(self)
s.rvalue.accept(self)
if (
isinstance(s.lvalue, NameExpr)
and s.lvalue.name == "__all__"
and s.lvalue.kind == GDEF
and isinstance(s.rvalue, (ListExpr, TupleExpr))
):
self.add_exports(s.rvalue.items)
def visit_while_stmt(self, s: WhileStmt) -> None:
self.statement = s
s.expr.accept(self)
self.loop_depth[-1] += 1
s.body.accept(self)
self.loop_depth[-1] -= 1
self.visit_block_maybe(s.else_body)
def visit_for_stmt(self, s: ForStmt) -> None:
if s.is_async:
if not self.is_func_scope() or not self.function_stack[-1].is_coroutine:
self.fail(message_registry.ASYNC_FOR_OUTSIDE_COROUTINE, s, code=codes.SYNTAX)
self.statement = s
s.expr.accept(self)
# Bind index variables and check if they define new names.
self.analyze_lvalue(s.index, explicit_type=s.index_type is not None)
if s.index_type:
if self.is_classvar(s.index_type):
self.fail_invalid_classvar(s.index)
allow_tuple_literal = isinstance(s.index, TupleExpr)
analyzed = self.anal_type(s.index_type, allow_tuple_literal=allow_tuple_literal)
if analyzed is not None:
self.store_declared_types(s.index, analyzed)
s.index_type = analyzed
self.loop_depth[-1] += 1
self.visit_block(s.body)
self.loop_depth[-1] -= 1
self.visit_block_maybe(s.else_body)
def visit_break_stmt(self, s: BreakStmt) -> None:
self.statement = s
if self.loop_depth[-1] == 0:
self.fail('"break" outside loop', s, serious=True, blocker=True)
def visit_continue_stmt(self, s: ContinueStmt) -> None:
self.statement = s
if self.loop_depth[-1] == 0:
self.fail('"continue" outside loop', s, serious=True, blocker=True)
def visit_if_stmt(self, s: IfStmt) -> None:
self.statement = s
infer_reachability_of_if_statement(s, self.options)
for i in range(len(s.expr)):
s.expr[i].accept(self)
self.visit_block(s.body[i])
self.visit_block_maybe(s.else_body)
def visit_try_stmt(self, s: TryStmt) -> None:
self.statement = s
self.analyze_try_stmt(s, self)
def analyze_try_stmt(self, s: TryStmt, visitor: NodeVisitor[None]) -> None:
s.body.accept(visitor)
for type, var, handler in zip(s.types, s.vars, s.handlers):
if type:
type.accept(visitor)
if var:
self.analyze_lvalue(var)
handler.accept(visitor)
if s.else_body:
s.else_body.accept(visitor)
if s.finally_body:
s.finally_body.accept(visitor)
def visit_with_stmt(self, s: WithStmt) -> None:
self.statement = s
types: list[Type] = []
if s.is_async:
if not self.is_func_scope() or not self.function_stack[-1].is_coroutine:
self.fail(message_registry.ASYNC_WITH_OUTSIDE_COROUTINE, s, code=codes.SYNTAX)
if s.unanalyzed_type:
assert isinstance(s.unanalyzed_type, ProperType)
actual_targets = [t for t in s.target if t is not None]
if len(actual_targets) == 0:
# We have a type for no targets
self.fail('Invalid type comment: "with" statement has no targets', s)
elif len(actual_targets) == 1:
# We have one target and one type
types = [s.unanalyzed_type]
elif isinstance(s.unanalyzed_type, TupleType):
# We have multiple targets and multiple types
if len(actual_targets) == len(s.unanalyzed_type.items):
types = s.unanalyzed_type.items.copy()
else:
# But it's the wrong number of items
self.fail('Incompatible number of types for "with" targets', s)
else:
# We have multiple targets and one type
self.fail('Multiple types expected for multiple "with" targets', s)
new_types: list[Type] = []
for e, n in zip(s.expr, s.target):
e.accept(self)
if n:
self.analyze_lvalue(n, explicit_type=s.unanalyzed_type is not None)
# Since we have a target, pop the next type from types
if types:
t = types.pop(0)
if self.is_classvar(t):
self.fail_invalid_classvar(n)
allow_tuple_literal = isinstance(n, TupleExpr)
analyzed = self.anal_type(t, allow_tuple_literal=allow_tuple_literal)
if analyzed is not None:
# TODO: Deal with this better
new_types.append(analyzed)
self.store_declared_types(n, analyzed)
s.analyzed_types = new_types
self.visit_block(s.body)
def visit_del_stmt(self, s: DelStmt) -> None:
self.statement = s
s.expr.accept(self)
if not self.is_valid_del_target(s.expr):
self.fail("Invalid delete target", s)
def is_valid_del_target(self, s: Expression) -> bool:
if isinstance(s, (IndexExpr, NameExpr, MemberExpr)):
return True
elif isinstance(s, (TupleExpr, ListExpr)):
return all(self.is_valid_del_target(item) for item in s.items)
else:
return False
def visit_global_decl(self, g: GlobalDecl) -> None:
self.statement = g
for name in g.names:
if name in self.nonlocal_decls[-1]:
self.fail(f'Name "{name}" is nonlocal and global', g)
self.global_decls[-1].add(name)
def visit_nonlocal_decl(self, d: NonlocalDecl) -> None:
self.statement = d
if self.is_module_scope():
self.fail("nonlocal declaration not allowed at module level", d)
else:
for name in d.names:
for table, scope_type in zip(
reversed(self.locals[:-1]), reversed(self.scope_stack[:-1])
):
if table is not None and name in table:
if scope_type == SCOPE_ANNOTATION:
self.fail(
f'nonlocal binding not allowed for type parameter "{name}"', d
)
break
else:
self.fail(f'No binding for nonlocal "{name}" found', d)
if self.locals[-1] is not None and name in self.locals[-1]:
self.fail(
'Name "{}" is already defined in local '
"scope before nonlocal declaration".format(name),
d,
)
if name in self.global_decls[-1]:
self.fail(f'Name "{name}" is nonlocal and global', d)
self.nonlocal_decls[-1].add(name)
def visit_match_stmt(self, s: MatchStmt) -> None:
self.statement = s
infer_reachability_of_match_statement(s, self.options)
s.subject.accept(self)
for i in range(len(s.patterns)):
s.patterns[i].accept(self)
guard = s.guards[i]
if guard is not None:
guard.accept(self)
self.visit_block(s.bodies[i])
def visit_type_alias_stmt(self, s: TypeAliasStmt) -> None:
if s.invalid_recursive_alias:
return
self.statement = s
type_params = self.push_type_args(s.type_args, s)
if type_params is None:
self.defer(s)
return
all_type_params_names = [p.name for p in s.type_args]
try:
existing = self.current_symbol_table().get(s.name.name)
if existing and not (
isinstance(existing.node, TypeAlias)
or (isinstance(existing.node, PlaceholderNode) and existing.node.line == s.line)
):
self.already_defined(s.name.name, s, existing, "Name")
return
tag = self.track_incomplete_refs()
res, alias_tvars, depends_on, qualified_tvars, empty_tuple_index = self.analyze_alias(
s.name.name,
s.value.expr(),
allow_placeholder=True,
declared_type_vars=type_params,
all_declared_type_params_names=all_type_params_names,
python_3_12_type_alias=True,
)
if not res:
res = AnyType(TypeOfAny.from_error)
if not self.is_func_scope():
# Only marking incomplete for top-level placeholders makes recursive aliases like
# `A = Sequence[str | A]` valid here, similar to how we treat base classes in class
# definitions, allowing `class str(Sequence[str]): ...`
incomplete_target = isinstance(res, ProperType) and isinstance(
res, PlaceholderType
)
else:
incomplete_target = has_placeholder(res)
if self.found_incomplete_ref(tag) or incomplete_target:
# Since we have got here, we know this must be a type alias (incomplete refs
# may appear in nested positions), therefore use becomes_typeinfo=True.
self.mark_incomplete(s.name.name, s.value, becomes_typeinfo=True)
return
self.add_type_alias_deps(depends_on)
# In addition to the aliases used, we add deps on unbound
# type variables, since they are erased from target type.
self.add_type_alias_deps(qualified_tvars)
# The above are only direct deps on other aliases.
# For subscripted aliases, type deps from expansion are added in deps.py
# (because the type is stored).
check_for_explicit_any(
res, self.options, self.is_typeshed_stub_file, self.msg, context=s
)
# When this type alias gets "inlined", the Any is not explicit anymore,
# so we need to replace it with non-explicit Anys.
res = make_any_non_explicit(res)
if self.options.disallow_any_unimported and has_any_from_unimported_type(res):
self.msg.unimported_type_becomes_any("Type alias target", res, s)
res = make_any_non_unimported(res)
eager = self.is_func_scope()
if isinstance(res, ProperType) and isinstance(res, Instance) and not res.args:
fix_instance(res, self.fail, self.note, disallow_any=False, options=self.options)
alias_node = TypeAlias(
res,
self.qualified_name(s.name.name),
s.line,
s.column,
alias_tvars=alias_tvars,
no_args=False,
eager=eager,
python_3_12_type_alias=True,
)
if (
existing
and isinstance(existing.node, (PlaceholderNode, TypeAlias))
and existing.node.line == s.line
):
updated = False
if isinstance(existing.node, TypeAlias):
if existing.node.target != res:
# Copy expansion to the existing alias, this matches how we update base classes
# for a TypeInfo _in place_ if there are nested placeholders.
existing.node.target = res
existing.node.alias_tvars = alias_tvars
updated = True
else:
# Otherwise just replace existing placeholder with type alias.
existing.node = alias_node
updated = True
if updated:
if self.final_iteration:
self.cannot_resolve_name(s.name.name, "name", s)
return
else:
# We need to defer so that this change can get propagated to base classes.
self.defer(s, force_progress=True)
else:
self.add_symbol(s.name.name, alias_node, s)
current_node = existing.node if existing else alias_node
assert isinstance(current_node, TypeAlias)
self.disable_invalid_recursive_aliases(s, current_node, s.value)
s.name.accept(self)
finally:
self.pop_type_args(s.type_args)
#
# Expressions
#
def visit_name_expr(self, expr: NameExpr) -> None:
n = self.lookup(expr.name, expr)
if n:
self.bind_name_expr(expr, n)
def bind_name_expr(self, expr: NameExpr, sym: SymbolTableNode) -> None:
"""Bind name expression to a symbol table node."""
if (
isinstance(sym.node, TypeVarExpr)
and self.tvar_scope.get_binding(sym)
and not self.allow_unbound_tvars
):
self.fail(f'"{expr.name}" is a type variable and only valid in type context', expr)
elif isinstance(sym.node, PlaceholderNode):
self.process_placeholder(expr.name, "name", expr)
else:
expr.kind = sym.kind
expr.node = sym.node
expr.fullname = sym.fullname or ""
def visit_super_expr(self, expr: SuperExpr) -> None:
if not self.type and not expr.call.args:
self.fail('"super" used outside class', expr)
return
expr.info = self.type
for arg in expr.call.args:
arg.accept(self)
def visit_tuple_expr(self, expr: TupleExpr) -> None:
for item in expr.items:
if isinstance(item, StarExpr):
item.valid = True
item.accept(self)
def visit_list_expr(self, expr: ListExpr) -> None:
for item in expr.items:
if isinstance(item, StarExpr):
item.valid = True
item.accept(self)
def visit_set_expr(self, expr: SetExpr) -> None:
for item in expr.items:
if isinstance(item, StarExpr):
item.valid = True
item.accept(self)
def visit_dict_expr(self, expr: DictExpr) -> None:
for key, value in expr.items:
if key is not None:
key.accept(self)
value.accept(self)
def visit_star_expr(self, expr: StarExpr) -> None:
if not expr.valid:
self.fail("can't use starred expression here", expr, blocker=True)
else:
expr.expr.accept(self)
def visit_yield_from_expr(self, e: YieldFromExpr) -> None:
if not self.is_func_scope():
self.fail('"yield from" outside function', e, serious=True, blocker=True)
elif self.scope_stack[-1] == SCOPE_COMPREHENSION:
self.fail(
'"yield from" inside comprehension or generator expression',
e,
serious=True,
blocker=True,
)
elif self.function_stack[-1].is_coroutine:
self.fail('"yield from" in async function', e, serious=True, blocker=True)
else:
self.function_stack[-1].is_generator = True
if e.expr:
e.expr.accept(self)
def visit_call_expr(self, expr: CallExpr) -> None:
"""Analyze a call expression.
Some call expressions are recognized as special forms, including
cast(...).
"""
expr.callee.accept(self)
if refers_to_fullname(expr.callee, "typing.cast"):
# Special form cast(...).
if not self.check_fixed_args(expr, 2, "cast"):
return
# Translate first argument to an unanalyzed type.
try:
target = self.expr_to_unanalyzed_type(expr.args[0])
except TypeTranslationError:
self.fail("Cast target is not a type", expr)
return
# Piggyback CastExpr object to the CallExpr object; it takes
# precedence over the CallExpr semantics.
expr.analyzed = CastExpr(expr.args[1], target)
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, ASSERT_TYPE_NAMES):
if not self.check_fixed_args(expr, 2, "assert_type"):
return
# Translate second argument to an unanalyzed type.
try:
target = self.expr_to_unanalyzed_type(expr.args[1])
except TypeTranslationError:
self.fail("assert_type() type is not a type", expr)
return
expr.analyzed = AssertTypeExpr(expr.args[0], target)
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, REVEAL_TYPE_NAMES):
if not self.check_fixed_args(expr, 1, "reveal_type"):
return
reveal_imported = False
reveal_type_node = self.lookup("reveal_type", expr, suppress_errors=True)
if (
reveal_type_node
and isinstance(reveal_type_node.node, FuncBase)
and reveal_type_node.fullname in IMPORTED_REVEAL_TYPE_NAMES
):
reveal_imported = True
expr.analyzed = RevealExpr(
kind=REVEAL_TYPE, expr=expr.args[0], is_imported=reveal_imported
)
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, "builtins.reveal_locals"):
# Store the local variable names into the RevealExpr for use in the
# type checking pass
local_nodes: list[Var] = []
if self.is_module_scope():
# try to determine just the variable declarations in module scope
# self.globals.values() contains SymbolTableNode's
# Each SymbolTableNode has an attribute node that is nodes.Var
# look for variable nodes that marked as is_inferred
# Each symboltable node has a Var node as .node
local_nodes = [
n.node
for name, n in self.globals.items()
if getattr(n.node, "is_inferred", False) and isinstance(n.node, Var)
]
elif self.is_class_scope():
# type = None # type: Optional[TypeInfo]
if self.type is not None:
local_nodes = [
st.node for st in self.type.names.values() if isinstance(st.node, Var)
]
elif self.is_func_scope():
# locals = None # type: List[Optional[SymbolTable]]
if self.locals is not None:
symbol_table = self.locals[-1]
if symbol_table is not None:
local_nodes = [
st.node for st in symbol_table.values() if isinstance(st.node, Var)
]
expr.analyzed = RevealExpr(kind=REVEAL_LOCALS, local_nodes=local_nodes)
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, "typing.Any"):
# Special form Any(...) no longer supported.
self.fail("Any(...) is no longer supported. Use cast(Any, ...) instead", expr)
elif refers_to_fullname(expr.callee, "typing._promote"):
# Special form _promote(...).
if not self.check_fixed_args(expr, 1, "_promote"):
return
# Translate first argument to an unanalyzed type.
try:
target = self.expr_to_unanalyzed_type(expr.args[0])
except TypeTranslationError:
self.fail("Argument 1 to _promote is not a type", expr)
return
expr.analyzed = PromoteExpr(target)
expr.analyzed.line = expr.line
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, "builtins.dict"):
expr.analyzed = self.translate_dict_call(expr)
elif refers_to_fullname(expr.callee, "builtins.divmod"):
if not self.check_fixed_args(expr, 2, "divmod"):
return
expr.analyzed = OpExpr("divmod", expr.args[0], expr.args[1])
expr.analyzed.line = expr.line
expr.analyzed.accept(self)
elif refers_to_fullname(
expr.callee, ("typing.TypeAliasType", "typing_extensions.TypeAliasType")
):
with self.allow_unbound_tvars_set():
for a in expr.args:
a.accept(self)
else:
# Normal call expression.
for a in expr.args:
a.accept(self)
if (
isinstance(expr.callee, MemberExpr)
and isinstance(expr.callee.expr, NameExpr)
and expr.callee.expr.name == "__all__"
and expr.callee.expr.kind == GDEF
and expr.callee.name in ("append", "extend", "remove")
):
if expr.callee.name == "append" and expr.args:
self.add_exports(expr.args[0])
elif (
expr.callee.name == "extend"
and expr.args
and isinstance(expr.args[0], (ListExpr, TupleExpr))
):
self.add_exports(expr.args[0].items)
elif (
expr.callee.name == "remove"
and expr.args
and isinstance(expr.args[0], StrExpr)
):
self.all_exports = [n for n in self.all_exports if n != expr.args[0].value]
def translate_dict_call(self, call: CallExpr) -> DictExpr | None:
"""Translate 'dict(x=y, ...)' to {'x': y, ...} and 'dict()' to {}.
For other variants of dict(...), return None.
"""
if not all(kind in (ARG_NAMED, ARG_STAR2) for kind in call.arg_kinds):
# Must still accept those args.
for a in call.args:
a.accept(self)
return None
expr = DictExpr(
[
(StrExpr(key) if key is not None else None, value)
for key, value in zip(call.arg_names, call.args)
]
)
expr.set_line(call)
expr.accept(self)
return expr
def check_fixed_args(self, expr: CallExpr, numargs: int, name: str) -> bool:
"""Verify that expr has specified number of positional args.
Return True if the arguments are valid.
"""
s = "s"
if numargs == 1:
s = ""
if len(expr.args) != numargs:
self.fail('"%s" expects %d argument%s' % (name, numargs, s), expr)
return False
if expr.arg_kinds != [ARG_POS] * numargs:
self.fail(f'"{name}" must be called with {numargs} positional argument{s}', expr)
return False
return True
def visit_member_expr(self, expr: MemberExpr) -> None:
base = expr.expr
base.accept(self)
if isinstance(base, RefExpr) and isinstance(base.node, MypyFile):
# Handle module attribute.
sym = self.get_module_symbol(base.node, expr.name)
if sym:
if isinstance(sym.node, PlaceholderNode):
self.process_placeholder(expr.name, "attribute", expr)
return
expr.kind = sym.kind
expr.fullname = sym.fullname or ""
expr.node = sym.node
elif isinstance(base, RefExpr):
# This branch handles the case C.bar (or cls.bar or self.bar inside
# a classmethod/method), where C is a class and bar is a type
# definition or a module resulting from `import bar` (or a module
# assignment) inside class C. We look up bar in the class' TypeInfo
# namespace. This is done only when bar is a module or a type;
# other things (e.g. methods) are handled by other code in
# checkmember.
type_info = None
if isinstance(base.node, TypeInfo):
# C.bar where C is a class
type_info = base.node
elif isinstance(base.node, Var) and self.type and self.function_stack:
# check for self.bar or cls.bar in method/classmethod
func_def = self.function_stack[-1]
if not func_def.is_static and isinstance(func_def.type, CallableType):
formal_arg = func_def.type.argument_by_name(base.node.name)
if formal_arg and formal_arg.pos == 0:
type_info = self.type
elif isinstance(base.node, TypeAlias) and base.node.no_args:
assert isinstance(base.node.target, ProperType)
if isinstance(base.node.target, Instance):
type_info = base.node.target.type
if type_info:
n = type_info.names.get(expr.name)
if n is not None and isinstance(n.node, (MypyFile, TypeInfo, TypeAlias)):
if not n:
return
expr.kind = n.kind
expr.fullname = n.fullname or ""
expr.node = n.node
def visit_op_expr(self, expr: OpExpr) -> None:
expr.left.accept(self)
if expr.op in ("and", "or"):
inferred = infer_condition_value(expr.left, self.options)
if (inferred in (ALWAYS_FALSE, MYPY_FALSE) and expr.op == "and") or (
inferred in (ALWAYS_TRUE, MYPY_TRUE) and expr.op == "or"
):
expr.right_unreachable = True
return
elif (inferred in (ALWAYS_TRUE, MYPY_TRUE) and expr.op == "and") or (
inferred in (ALWAYS_FALSE, MYPY_FALSE) and expr.op == "or"
):
expr.right_always = True
expr.right.accept(self)
def visit_comparison_expr(self, expr: ComparisonExpr) -> None:
for operand in expr.operands:
operand.accept(self)
def visit_unary_expr(self, expr: UnaryExpr) -> None:
expr.expr.accept(self)
def visit_index_expr(self, expr: IndexExpr) -> None:
base = expr.base
base.accept(self)
if (
isinstance(base, RefExpr)
and isinstance(base.node, TypeInfo)
and not base.node.is_generic()
):
expr.index.accept(self)
elif (
isinstance(base, RefExpr) and isinstance(base.node, TypeAlias)
) or refers_to_class_or_function(base):
# We need to do full processing on every iteration, since some type
# arguments may contain placeholder types.
self.analyze_type_application(expr)
else:
expr.index.accept(self)
def analyze_type_application(self, expr: IndexExpr) -> None:
"""Analyze special form -- type application (either direct or via type aliasing)."""
types = self.analyze_type_application_args(expr)
if types is None:
return
base = expr.base
expr.analyzed = TypeApplication(base, types)
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
# Types list, dict, set are not subscriptable, prohibit this if
# subscripted either via type alias...
if isinstance(base, RefExpr) and isinstance(base.node, TypeAlias):
alias = base.node
target = get_proper_type(alias.target)
if isinstance(target, Instance):
name = target.type.fullname
if (
alias.no_args
and name # this avoids bogus errors for already reported aliases
in get_nongen_builtins(self.options.python_version)
and not self.is_stub_file
and not alias.normalized
):
self.fail(no_subscript_builtin_alias(name, propose_alt=False), expr)
# ...or directly.
else:
n = self.lookup_type_node(base)
if (
n
and n.fullname in get_nongen_builtins(self.options.python_version)
and not self.is_stub_file
):
self.fail(no_subscript_builtin_alias(n.fullname, propose_alt=False), expr)
def analyze_type_application_args(self, expr: IndexExpr) -> list[Type] | None:
"""Analyze type arguments (index) in a type application.
Return None if anything was incomplete.
"""
index = expr.index
tag = self.track_incomplete_refs()
self.analyze_type_expr(index)
if self.found_incomplete_ref(tag):
return None
if self.basic_type_applications:
# Postpone the rest until we have more information (for r.h.s. of an assignment)
return None
types: list[Type] = []
if isinstance(index, TupleExpr):
items = index.items
is_tuple = isinstance(expr.base, RefExpr) and expr.base.fullname == "builtins.tuple"
if is_tuple and len(items) == 2 and isinstance(items[-1], EllipsisExpr):
items = items[:-1]
else:
items = [index]
# TODO: this needs a clean-up.
# Probably always allow Parameters literals, and validate in semanal_typeargs.py
base = expr.base
if isinstance(base, RefExpr) and isinstance(base.node, TypeAlias):
allow_unpack = base.node.tvar_tuple_index is not None
alias = base.node
if any(isinstance(t, ParamSpecType) for t in alias.alias_tvars):
has_param_spec = True
num_args = len(alias.alias_tvars)
else:
has_param_spec = False
num_args = -1
elif isinstance(base, RefExpr) and isinstance(base.node, TypeInfo):
allow_unpack = (
base.node.has_type_var_tuple_type or base.node.fullname == "builtins.tuple"
)
has_param_spec = base.node.has_param_spec_type
num_args = len(base.node.type_vars)
else:
allow_unpack = False
has_param_spec = False
num_args = -1
for item in items:
try:
typearg = self.expr_to_unanalyzed_type(item, allow_unpack=True)
except TypeTranslationError:
self.fail("Type expected within [...]", expr)
return None
analyzed = self.anal_type(
typearg,
# The type application may appear in base class expression,
# where type variables are not bound yet. Or when accepting
# r.h.s. of type alias before we figured out it is a type alias.
allow_unbound_tvars=self.allow_unbound_tvars,
allow_placeholder=True,
allow_param_spec_literals=has_param_spec,
allow_unpack=allow_unpack,
)
if analyzed is None:
return None
types.append(analyzed)
if has_param_spec and num_args == 1 and types:
first_arg = get_proper_type(types[0])
single_any = len(types) == 1 and isinstance(first_arg, AnyType)
if not (single_any or any(isinstance(t, (Parameters, ParamSpecType)) for t in types)):
types = [Parameters(types, [ARG_POS] * len(types), [None] * len(types))]
return types
def visit_slice_expr(self, expr: SliceExpr) -> None:
if expr.begin_index:
expr.begin_index.accept(self)
if expr.end_index:
expr.end_index.accept(self)
if expr.stride:
expr.stride.accept(self)
def visit_cast_expr(self, expr: CastExpr) -> None:
expr.expr.accept(self)
analyzed = self.anal_type(expr.type)
if analyzed is not None:
expr.type = analyzed
def visit_assert_type_expr(self, expr: AssertTypeExpr) -> None:
expr.expr.accept(self)
analyzed = self.anal_type(expr.type)
if analyzed is not None:
expr.type = analyzed
def visit_reveal_expr(self, expr: RevealExpr) -> None:
if expr.kind == REVEAL_TYPE:
if expr.expr is not None:
expr.expr.accept(self)
else:
# Reveal locals doesn't have an inner expression, there's no
# need to traverse inside it
pass
def visit_type_application(self, expr: TypeApplication) -> None:
expr.expr.accept(self)
for i in range(len(expr.types)):
analyzed = self.anal_type(expr.types[i])
if analyzed is not None:
expr.types[i] = analyzed
def visit_list_comprehension(self, expr: ListComprehension) -> None:
if any(expr.generator.is_async):
if not self.is_func_scope() or not self.function_stack[-1].is_coroutine:
self.fail(message_registry.ASYNC_FOR_OUTSIDE_COROUTINE, expr, code=codes.SYNTAX)
expr.generator.accept(self)
def visit_set_comprehension(self, expr: SetComprehension) -> None:
if any(expr.generator.is_async):
if not self.is_func_scope() or not self.function_stack[-1].is_coroutine:
self.fail(message_registry.ASYNC_FOR_OUTSIDE_COROUTINE, expr, code=codes.SYNTAX)
expr.generator.accept(self)
def visit_dictionary_comprehension(self, expr: DictionaryComprehension) -> None:
if any(expr.is_async):
if not self.is_func_scope() or not self.function_stack[-1].is_coroutine:
self.fail(message_registry.ASYNC_FOR_OUTSIDE_COROUTINE, expr, code=codes.SYNTAX)
with self.enter(expr):
self.analyze_comp_for(expr)
expr.key.accept(self)
expr.value.accept(self)
self.analyze_comp_for_2(expr)
def visit_generator_expr(self, expr: GeneratorExpr) -> None:
with self.enter(expr):
self.analyze_comp_for(expr)
expr.left_expr.accept(self)
self.analyze_comp_for_2(expr)
def analyze_comp_for(self, expr: GeneratorExpr | DictionaryComprehension) -> None:
"""Analyses the 'comp_for' part of comprehensions (part 1).
That is the part after 'for' in (x for x in l if p). This analyzes
variables and conditions which are analyzed in a local scope.
"""
for i, (index, sequence, conditions) in enumerate(
zip(expr.indices, expr.sequences, expr.condlists)
):
if i > 0:
sequence.accept(self)
# Bind index variables.
self.analyze_lvalue(index)
for cond in conditions:
cond.accept(self)
def analyze_comp_for_2(self, expr: GeneratorExpr | DictionaryComprehension) -> None:
"""Analyses the 'comp_for' part of comprehensions (part 2).
That is the part after 'for' in (x for x in l if p). This analyzes
the 'l' part which is analyzed in the surrounding scope.
"""
expr.sequences[0].accept(self)
def visit_lambda_expr(self, expr: LambdaExpr) -> None:
self.analyze_arg_initializers(expr)
self.analyze_function_body(expr)
def visit_conditional_expr(self, expr: ConditionalExpr) -> None:
expr.if_expr.accept(self)
expr.cond.accept(self)
expr.else_expr.accept(self)
def visit__promote_expr(self, expr: PromoteExpr) -> None:
analyzed = self.anal_type(expr.type)
if analyzed is not None:
assert isinstance(analyzed, ProperType), "Cannot use type aliases for promotions"
expr.type = analyzed
def visit_yield_expr(self, e: YieldExpr) -> None:
if not self.is_func_scope():
self.fail('"yield" outside function', e, serious=True, blocker=True)
elif self.scope_stack[-1] == SCOPE_COMPREHENSION:
self.fail(
'"yield" inside comprehension or generator expression',
e,
serious=True,
blocker=True,
)
elif self.function_stack[-1].is_coroutine:
self.function_stack[-1].is_generator = True
self.function_stack[-1].is_async_generator = True
else:
self.function_stack[-1].is_generator = True
if e.expr:
e.expr.accept(self)
def visit_await_expr(self, expr: AwaitExpr) -> None:
if not self.is_func_scope() or not self.function_stack:
# We check both because is_function_scope() returns True inside comprehensions.
# This is not a blocker, because some enviroments (like ipython)
# support top level awaits.
self.fail('"await" outside function', expr, serious=True, code=codes.TOP_LEVEL_AWAIT)
elif not self.function_stack[-1].is_coroutine:
self.fail(
'"await" outside coroutine ("async def")',
expr,
serious=True,
code=codes.AWAIT_NOT_ASYNC,
)
expr.expr.accept(self)
#
# Patterns
#
def visit_as_pattern(self, p: AsPattern) -> None:
if p.pattern is not None:
p.pattern.accept(self)
if p.name is not None:
self.analyze_lvalue(p.name)
def visit_or_pattern(self, p: OrPattern) -> None:
for pattern in p.patterns:
pattern.accept(self)
def visit_value_pattern(self, p: ValuePattern) -> None:
p.expr.accept(self)
def visit_sequence_pattern(self, p: SequencePattern) -> None:
for pattern in p.patterns:
pattern.accept(self)
def visit_starred_pattern(self, p: StarredPattern) -> None:
if p.capture is not None:
self.analyze_lvalue(p.capture)
def visit_mapping_pattern(self, p: MappingPattern) -> None:
for key in p.keys:
key.accept(self)
for value in p.values:
value.accept(self)
if p.rest is not None:
self.analyze_lvalue(p.rest)
def visit_class_pattern(self, p: ClassPattern) -> None:
p.class_ref.accept(self)
for pos in p.positionals:
pos.accept(self)
for v in p.keyword_values:
v.accept(self)
#
# Lookup functions
#
def lookup(
self, name: str, ctx: Context, suppress_errors: bool = False
) -> SymbolTableNode | None:
"""Look up an unqualified (no dots) name in all active namespaces.
Note that the result may contain a PlaceholderNode. The caller may
want to defer in that case.
Generate an error if the name is not defined unless suppress_errors
is true or the current namespace is incomplete. In the latter case
defer.
"""
implicit_name = False
# 1a. Name declared using 'global x' takes precedence
if name in self.global_decls[-1]:
if name in self.globals:
return self.globals[name]
if not suppress_errors:
self.name_not_defined(name, ctx)
return None
# 1b. Name declared using 'nonlocal x' takes precedence
if name in self.nonlocal_decls[-1]:
for table in reversed(self.locals[:-1]):
if table is not None and name in table:
return table[name]
if not suppress_errors:
self.name_not_defined(name, ctx)
return None
# 2a. Class attributes (if within class definition)
if self.type and not self.is_func_scope() and name in self.type.names:
node = self.type.names[name]
if not node.implicit:
if self.is_active_symbol_in_class_body(node.node):
return node
else:
# Defined through self.x assignment
implicit_name = True
implicit_node = node
# 2b. Class attributes __qualname__ and __module__
if self.type and not self.is_func_scope() and name in {"__qualname__", "__module__"}:
return SymbolTableNode(MDEF, Var(name, self.str_type()))
# 3. Local (function) scopes
for table in reversed(self.locals):
if table is not None and name in table:
return table[name]
# 4. Current file global scope
if name in self.globals:
return self.globals[name]
# 5. Builtins
b = self.globals.get("__builtins__", None)
if b:
assert isinstance(b.node, MypyFile)
table = b.node.names
if name in table:
if len(name) > 1 and name[0] == "_" and name[1] != "_":
if not suppress_errors:
self.name_not_defined(name, ctx)
return None
node = table[name]
return node
# Give up.
if not implicit_name and not suppress_errors:
self.name_not_defined(name, ctx)
else:
if implicit_name:
return implicit_node
return None
def is_active_symbol_in_class_body(self, node: SymbolNode | None) -> bool:
"""Can a symbol defined in class body accessed at current statement?
Only allow access to class attributes textually after
the definition, so that it's possible to fall back to the
outer scope. Example:
class X: ...
class C:
X = X # Initializer refers to outer scope
Nested classes are an exception, since we want to support
arbitrary forward references in type annotations. Also, we
allow forward references to type aliases to support recursive
types.
"""
# TODO: Forward reference to name imported in class body is not
# caught.
if self.statement is None:
# Assume it's fine -- don't have enough context to check
return True
return (
node is None
or self.is_textually_before_statement(node)
or not self.is_defined_in_current_module(node.fullname)
or isinstance(node, (TypeInfo, TypeAlias))
or (isinstance(node, PlaceholderNode) and node.becomes_typeinfo)
)
def is_textually_before_statement(self, node: SymbolNode) -> bool:
"""Check if a node is defined textually before the current statement
Note that decorated functions' line number are the same as
the top decorator.
"""
assert self.statement
line_diff = self.statement.line - node.line
# The first branch handles reference an overloaded function variant inside itself,
# this is a corner case where mypy technically deviates from runtime name resolution,
# but it is fine because we want an overloaded function to be treated as a single unit.
if self.is_overloaded_item(node, self.statement):
return False
elif isinstance(node, Decorator) and not node.is_overload:
return line_diff > len(node.original_decorators)
else:
return line_diff > 0
def is_overloaded_item(self, node: SymbolNode, statement: Statement) -> bool:
"""Check whether the function belongs to the overloaded variants"""
if isinstance(node, OverloadedFuncDef) and isinstance(statement, FuncDef):
in_items = statement in {
item.func if isinstance(item, Decorator) else item for item in node.items
}
in_impl = node.impl is not None and (
(isinstance(node.impl, Decorator) and statement is node.impl.func)
or statement is node.impl
)
return in_items or in_impl
return False
def is_defined_in_current_module(self, fullname: str | None) -> bool:
if not fullname:
return False
return module_prefix(self.modules, fullname) == self.cur_mod_id
def lookup_qualified(
self, name: str, ctx: Context, suppress_errors: bool = False
) -> SymbolTableNode | None:
"""Lookup a qualified name in all activate namespaces.
Note that the result may contain a PlaceholderNode. The caller may
want to defer in that case.
Generate an error if the name is not defined unless suppress_errors
is true or the current namespace is incomplete. In the latter case
defer.
"""
if "." not in name:
# Simple case: look up a short name.
return self.lookup(name, ctx, suppress_errors=suppress_errors)
parts = name.split(".")
namespace = self.cur_mod_id
sym = self.lookup(parts[0], ctx, suppress_errors=suppress_errors)
if sym:
for i in range(1, len(parts)):
node = sym.node
part = parts[i]
if isinstance(node, TypeInfo):
nextsym = node.get(part)
elif isinstance(node, MypyFile):
nextsym = self.get_module_symbol(node, part)
namespace = node.fullname
elif isinstance(node, PlaceholderNode):
return sym
elif isinstance(node, TypeAlias) and node.no_args:
assert isinstance(node.target, ProperType)
if isinstance(node.target, Instance):
nextsym = node.target.type.get(part)
else:
nextsym = None
else:
if isinstance(node, Var):
typ = get_proper_type(node.type)
if isinstance(typ, AnyType):
# Allow access through Var with Any type without error.
return self.implicit_symbol(sym, name, parts[i:], typ)
# This might be something like valid `P.args` or invalid `P.__bound__` access.
# Important note that `ParamSpecExpr` is also ignored in other places.
# See https://github.com/python/mypy/pull/13468
if isinstance(node, ParamSpecExpr) and part in ("args", "kwargs"):
return None
# Lookup through invalid node, such as variable or function
nextsym = None
if not nextsym or nextsym.module_hidden:
if not suppress_errors:
self.name_not_defined(name, ctx, namespace=namespace)
return None
sym = nextsym
return sym
def lookup_type_node(self, expr: Expression) -> SymbolTableNode | None:
try:
t = self.expr_to_unanalyzed_type(expr)
except TypeTranslationError:
return None
if isinstance(t, UnboundType):
n = self.lookup_qualified(t.name, expr, suppress_errors=True)
return n
return None
def get_module_symbol(self, node: MypyFile, name: str) -> SymbolTableNode | None:
"""Look up a symbol from a module.
Return None if no matching symbol could be bound.
"""
module = node.fullname
names = node.names
sym = names.get(name)
if not sym:
fullname = module + "." + name
if fullname in self.modules:
sym = SymbolTableNode(GDEF, self.modules[fullname])
elif self.is_incomplete_namespace(module):
self.record_incomplete_ref()
elif "__getattr__" in names:
gvar = self.create_getattr_var(names["__getattr__"], name, fullname)
if gvar:
sym = SymbolTableNode(GDEF, gvar)
elif self.is_missing_module(fullname):
# We use the fullname of the original definition so that we can
# detect whether two names refer to the same thing.
var_type = AnyType(TypeOfAny.from_unimported_type)
v = Var(name, type=var_type)
v._fullname = fullname
sym = SymbolTableNode(GDEF, v)
elif sym.module_hidden:
sym = None
return sym
def is_missing_module(self, module: str) -> bool:
return module in self.missing_modules
def implicit_symbol(
self, sym: SymbolTableNode, name: str, parts: list[str], source_type: AnyType
) -> SymbolTableNode:
"""Create symbol for a qualified name reference through Any type."""
if sym.node is None:
basename = None
else:
basename = sym.node.fullname
if basename is None:
fullname = name
else:
fullname = basename + "." + ".".join(parts)
var_type = AnyType(TypeOfAny.from_another_any, source_type)
var = Var(parts[-1], var_type)
var._fullname = fullname
return SymbolTableNode(GDEF, var)
def create_getattr_var(
self, getattr_defn: SymbolTableNode, name: str, fullname: str
) -> Var | None:
"""Create a dummy variable using module-level __getattr__ return type.
If not possible, return None.
Note that multiple Var nodes can be created for a single name. We
can use the from_module_getattr and the fullname attributes to
check if two dummy Var nodes refer to the same thing. Reusing Var
nodes would require non-local mutable state, which we prefer to
avoid.
"""
if isinstance(getattr_defn.node, (FuncDef, Var)):
node_type = get_proper_type(getattr_defn.node.type)
if isinstance(node_type, CallableType):
typ = node_type.ret_type
else:
typ = AnyType(TypeOfAny.from_error)
v = Var(name, type=typ)
v._fullname = fullname
v.from_module_getattr = True
return v
return None
def lookup_fully_qualified(self, fullname: str) -> SymbolTableNode:
ret = self.lookup_fully_qualified_or_none(fullname)
assert ret is not None, fullname
return ret
def lookup_fully_qualified_or_none(self, fullname: str) -> SymbolTableNode | None:
"""Lookup a fully qualified name that refers to a module-level definition.
Don't assume that the name is defined. This happens in the global namespace --
the local module namespace is ignored. This does not dereference indirect
refs.
Note that this can't be used for names nested in class namespaces.
"""
# TODO: unify/clean-up/simplify lookup methods, see #4157.
# TODO: support nested classes (but consider performance impact,
# we might keep the module level only lookup for thing like 'builtins.int').
assert "." in fullname
module, name = fullname.rsplit(".", maxsplit=1)
if module not in self.modules:
return None
filenode = self.modules[module]
result = filenode.names.get(name)
if result is None and self.is_incomplete_namespace(module):
# TODO: More explicit handling of incomplete refs?
self.record_incomplete_ref()
return result
def object_type(self) -> Instance:
return self.named_type("builtins.object")
def str_type(self) -> Instance:
return self.named_type("builtins.str")
def named_type(self, fullname: str, args: list[Type] | None = None) -> Instance:
sym = self.lookup_fully_qualified(fullname)
assert sym, "Internal error: attempted to construct unknown type"
node = sym.node
assert isinstance(node, TypeInfo)
if args:
# TODO: assert len(args) == len(node.defn.type_vars)
return Instance(node, args)
return Instance(node, [AnyType(TypeOfAny.special_form)] * len(node.defn.type_vars))
def named_type_or_none(self, fullname: str, args: list[Type] | None = None) -> Instance | None:
sym = self.lookup_fully_qualified_or_none(fullname)
if not sym or isinstance(sym.node, PlaceholderNode):
return None
node = sym.node
if isinstance(node, TypeAlias):
assert isinstance(node.target, Instance) # type: ignore[misc]
node = node.target.type
assert isinstance(node, TypeInfo), node
if args is not None:
# TODO: assert len(args) == len(node.defn.type_vars)
return Instance(node, args)
return Instance(node, [AnyType(TypeOfAny.unannotated)] * len(node.defn.type_vars))
def builtin_type(self, fully_qualified_name: str) -> Instance:
"""Legacy function -- use named_type() instead."""
return self.named_type(fully_qualified_name)
def lookup_current_scope(self, name: str) -> SymbolTableNode | None:
if self.locals[-1] is not None:
return self.locals[-1].get(name)
elif self.type is not None:
return self.type.names.get(name)
else:
return self.globals.get(name)
#
# Adding symbols
#
def add_symbol(
self,
name: str,
node: SymbolNode,
context: Context,
module_public: bool = True,
module_hidden: bool = False,
can_defer: bool = True,
escape_comprehensions: bool = False,
no_progress: bool = False,
type_param: bool = False,
) -> bool:
"""Add symbol to the currently active symbol table.
Generally additions to symbol table should go through this method or
one of the methods below so that kinds, redefinitions, conditional
definitions, and skipped names are handled consistently.
Return True if we actually added the symbol, or False if we refused to do so
(because something is not ready).
If can_defer is True, defer current target if adding a placeholder.
"""
if self.is_func_scope():
kind = LDEF
elif self.type is not None:
kind = MDEF
else:
kind = GDEF
symbol = SymbolTableNode(
kind, node, module_public=module_public, module_hidden=module_hidden
)
return self.add_symbol_table_node(
name, symbol, context, can_defer, escape_comprehensions, no_progress, type_param
)
def add_symbol_skip_local(self, name: str, node: SymbolNode) -> None:
"""Same as above, but skipping the local namespace.
This doesn't check for previous definition and is only used
for serialization of method-level classes.
Classes defined within methods can be exposed through an
attribute type, but method-level symbol tables aren't serialized.
This method can be used to add such classes to an enclosing,
serialized symbol table.
"""
# TODO: currently this is only used by named tuples and typed dicts.
# Use this method also by normal classes, see issue #6422.
if self.type is not None:
names = self.type.names
kind = MDEF
else:
names = self.globals
kind = GDEF
symbol = SymbolTableNode(kind, node)
names[name] = symbol
def add_symbol_table_node(
self,
name: str,
symbol: SymbolTableNode,
context: Context | None = None,
can_defer: bool = True,
escape_comprehensions: bool = False,
no_progress: bool = False,
type_param: bool = False,
) -> bool:
"""Add symbol table node to the currently active symbol table.
Return True if we actually added the symbol, or False if we refused
to do so (because something is not ready or it was a no-op).
Generate an error if there is an invalid redefinition.
If context is None, unconditionally add node, since we can't report
an error. Note that this is used by plugins to forcibly replace nodes!
TODO: Prevent plugins from replacing nodes, as it could cause problems?
Args:
name: short name of symbol
symbol: Node to add
can_defer: if True, defer current target if adding a placeholder
context: error context (see above about None value)
"""
names = self.current_symbol_table(
escape_comprehensions=escape_comprehensions, type_param=type_param
)
existing = names.get(name)
if isinstance(symbol.node, PlaceholderNode) and can_defer:
if context is not None:
self.process_placeholder(name, "name", context)
else:
# see note in docstring describing None contexts
self.defer()
if (
existing is not None
and context is not None
and not is_valid_replacement(existing, symbol)
):
# There is an existing node, so this may be a redefinition.
# If the new node points to the same node as the old one,
# or if both old and new nodes are placeholders, we don't
# need to do anything.
old = existing.node
new = symbol.node
if isinstance(new, PlaceholderNode):
# We don't know whether this is okay. Let's wait until the next iteration.
return False
if not is_same_symbol(old, new):
if isinstance(new, (FuncDef, Decorator, OverloadedFuncDef, TypeInfo)):
self.add_redefinition(names, name, symbol)
if not (isinstance(new, (FuncDef, Decorator)) and self.set_original_def(old, new)):
self.name_already_defined(name, context, existing)
elif name not in self.missing_names[-1] and "*" not in self.missing_names[-1]:
names[name] = symbol
if not no_progress:
self.progress = True
return True
return False
def add_redefinition(self, names: SymbolTable, name: str, symbol: SymbolTableNode) -> None:
"""Add a symbol table node that reflects a redefinition as a function or a class.
Redefinitions need to be added to the symbol table so that they can be found
through AST traversal, but they have dummy names of form 'name-redefinition[N]',
where N ranges over 2, 3, ... (omitted for the first redefinition).
Note: we always store redefinitions independently of whether they are valid or not
(so they will be semantically analyzed), the caller should give an error for invalid
redefinitions (such as e.g. variable redefined as a class).
"""
i = 1
# Don't serialize redefined nodes. They are likely to have
# busted internal references which can cause problems with
# serialization and they can't have any external references to
# them.
symbol.no_serialize = True
while True:
if i == 1:
new_name = f"{name}-redefinition"
else:
new_name = f"{name}-redefinition{i}"
existing = names.get(new_name)
if existing is None:
names[new_name] = symbol
return
elif existing.node is symbol.node:
# Already there
return
i += 1
def add_local(self, node: Var | FuncDef | OverloadedFuncDef, context: Context) -> None:
"""Add local variable or function."""
assert self.is_func_scope()
name = node.name
node._fullname = name
self.add_symbol(name, node, context)
def _get_node_for_class_scoped_import(
self, name: str, symbol_node: SymbolNode | None, context: Context
) -> SymbolNode | None:
if symbol_node is None:
return None
# I promise this type checks; I'm just making mypyc issues go away.
# mypyc is absolutely convinced that `symbol_node` narrows to a Var in the following,
# when it can also be a FuncBase. Once fixed, `f` in the following can be removed.
# See also https://github.com/mypyc/mypyc/issues/892
f: Callable[[object], Any] = lambda x: x
if isinstance(f(symbol_node), (Decorator, FuncBase, Var)):
# For imports in class scope, we construct a new node to represent the symbol and
# set its `info` attribute to `self.type`.
existing = self.current_symbol_table().get(name)
if (
# The redefinition checks in `add_symbol_table_node` don't work for our
# constructed Var / FuncBase, so check for possible redefinitions here.
existing is not None
and isinstance(f(existing.node), (Decorator, FuncBase, Var))
and (
isinstance(f(existing.type), f(AnyType))
or f(existing.type) == f(symbol_node).type
)
):
return existing.node
# Construct the new node
if isinstance(f(symbol_node), (FuncBase, Decorator)):
# In theory we could construct a new node here as well, but in practice
# it doesn't work well, see #12197
typ: Type | None = AnyType(TypeOfAny.from_error)
self.fail("Unsupported class scoped import", context)
else:
typ = f(symbol_node).type
symbol_node = Var(name, typ)
symbol_node._fullname = self.qualified_name(name)
assert self.type is not None # guaranteed by is_class_scope
symbol_node.info = self.type
symbol_node.line = context.line
symbol_node.column = context.column
return symbol_node
def add_imported_symbol(
self,
name: str,
node: SymbolTableNode,
context: ImportBase,
module_public: bool,
module_hidden: bool,
) -> None:
"""Add an alias to an existing symbol through import."""
assert not module_hidden or not module_public
existing_symbol = self.lookup_current_scope(name)
if (
existing_symbol
and not isinstance(existing_symbol.node, PlaceholderNode)
and not isinstance(node.node, PlaceholderNode)
):
# Import can redefine a variable. They get special treatment.
if self.process_import_over_existing_name(name, existing_symbol, node, context):
return
symbol_node: SymbolNode | None = node.node
if self.is_class_scope():
symbol_node = self._get_node_for_class_scoped_import(name, symbol_node, context)
symbol = SymbolTableNode(
node.kind, symbol_node, module_public=module_public, module_hidden=module_hidden
)
self.add_symbol_table_node(name, symbol, context)
def add_unknown_imported_symbol(
self,
name: str,
context: Context,
target_name: str | None,
module_public: bool,
module_hidden: bool,
) -> None:
"""Add symbol that we don't know what it points to because resolving an import failed.
This can happen if a module is missing, or it is present, but doesn't have
the imported attribute. The `target_name` is the name of symbol in the namespace
it is imported from. For example, for 'from mod import x as y' the target_name is
'mod.x'. This is currently used only to track logical dependencies.
"""
existing = self.current_symbol_table().get(name)
if existing and isinstance(existing.node, Var) and existing.node.is_suppressed_import:
# This missing import was already added -- nothing to do here.
return
var = Var(name)
if self.options.logical_deps and target_name is not None:
# This makes it possible to add logical fine-grained dependencies
# from a missing module. We can't use this by default, since in a
# few places we assume that the full name points to a real
# definition, but this name may point to nothing.
var._fullname = target_name
elif self.type:
var._fullname = self.type.fullname + "." + name
var.info = self.type
else:
var._fullname = self.qualified_name(name)
var.is_ready = True
any_type = AnyType(TypeOfAny.from_unimported_type, missing_import_name=var._fullname)
var.type = any_type
var.is_suppressed_import = True
self.add_symbol(
name, var, context, module_public=module_public, module_hidden=module_hidden
)
#
# Other helpers
#
@contextmanager
def tvar_scope_frame(self, frame: TypeVarLikeScope) -> Iterator[None]:
old_scope = self.tvar_scope
self.tvar_scope = frame
yield
self.tvar_scope = old_scope
def defer(self, debug_context: Context | None = None, force_progress: bool = False) -> None:
"""Defer current analysis target to be analyzed again.
This must be called if something in the current target is
incomplete or has a placeholder node. However, this must *not*
be called during the final analysis iteration! Instead, an error
should be generated. Often 'process_placeholder' is a good
way to either defer or generate an error.
NOTE: Some methods, such as 'anal_type', 'mark_incomplete' and
'record_incomplete_ref', call this implicitly, or when needed.
They are usually preferable to a direct defer() call.
"""
assert not self.final_iteration, "Must not defer during final iteration"
if force_progress:
# Usually, we report progress if we have replaced a placeholder node
# with an actual valid node. However, sometimes we need to update an
# existing node *in-place*. For example, this is used by type aliases
# in context of forward references and/or recursive aliases, and in
# similar situations (recursive named tuples etc).
self.progress = True
self.deferred = True
# Store debug info for this deferral.
line = (
debug_context.line if debug_context else self.statement.line if self.statement else -1
)
self.deferral_debug_context.append((self.cur_mod_id, line))
def track_incomplete_refs(self) -> Tag:
"""Return tag that can be used for tracking references to incomplete names."""
return self.num_incomplete_refs
def found_incomplete_ref(self, tag: Tag) -> bool:
"""Have we encountered an incomplete reference since starting tracking?"""
return self.num_incomplete_refs != tag
def record_incomplete_ref(self) -> None:
"""Record the encounter of an incomplete reference and defer current analysis target."""
self.defer()
self.num_incomplete_refs += 1
def mark_incomplete(
self,
name: str,
node: Node,
becomes_typeinfo: bool = False,
module_public: bool = True,
module_hidden: bool = False,
) -> None:
"""Mark a definition as incomplete (and defer current analysis target).
Also potentially mark the current namespace as incomplete.
Args:
name: The name that we weren't able to define (or '*' if the name is unknown)
node: The node that refers to the name (definition or lvalue)
becomes_typeinfo: Pass this to PlaceholderNode (used by special forms like
named tuples that will create TypeInfos).
"""
self.defer(node)
if name == "*":
self.incomplete = True
elif not self.is_global_or_nonlocal(name):
fullname = self.qualified_name(name)
assert self.statement
placeholder = PlaceholderNode(
fullname, node, self.statement.line, becomes_typeinfo=becomes_typeinfo
)
self.add_symbol(
name,
placeholder,
module_public=module_public,
module_hidden=module_hidden,
context=dummy_context(),
)
self.missing_names[-1].add(name)
def is_incomplete_namespace(self, fullname: str) -> bool:
"""Is a module or class namespace potentially missing some definitions?
If a name is missing from an incomplete namespace, we'll need to defer the
current analysis target.
"""
return fullname in self.incomplete_namespaces
def process_placeholder(
self, name: str | None, kind: str, ctx: Context, force_progress: bool = False
) -> None:
"""Process a reference targeting placeholder node.
If this is not a final iteration, defer current node,
otherwise report an error.
The 'kind' argument indicates if this a name or attribute expression
(used for better error message).
"""
if self.final_iteration:
self.cannot_resolve_name(name, kind, ctx)
else:
self.defer(ctx, force_progress=force_progress)
def cannot_resolve_name(self, name: str | None, kind: str, ctx: Context) -> None:
name_format = f' "{name}"' if name else ""
self.fail(f"Cannot resolve {kind}{name_format} (possible cyclic definition)", ctx)
if self.is_func_scope():
self.note("Recursive types are not allowed at function scope", ctx)
def qualified_name(self, name: str) -> str:
if self.type is not None:
return self.type._fullname + "." + name
elif self.is_func_scope():
return name
else:
return self.cur_mod_id + "." + name
@contextmanager
def enter(
self, function: FuncItem | GeneratorExpr | DictionaryComprehension
) -> Iterator[None]:
"""Enter a function, generator or comprehension scope."""
names = self.saved_locals.setdefault(function, SymbolTable())
self.locals.append(names)
is_comprehension = isinstance(function, (GeneratorExpr, DictionaryComprehension))
self.scope_stack.append(SCOPE_FUNC if not is_comprehension else SCOPE_COMPREHENSION)
self.global_decls.append(set())
self.nonlocal_decls.append(set())
# -1 since entering block will increment this to 0.
self.block_depth.append(-1)
self.loop_depth.append(0)
self.missing_names.append(set())
try:
yield
finally:
self.locals.pop()
self.scope_stack.pop()
self.global_decls.pop()
self.nonlocal_decls.pop()
self.block_depth.pop()
self.loop_depth.pop()
self.missing_names.pop()
def is_func_scope(self) -> bool:
scope_type = self.scope_stack[-1]
if scope_type == SCOPE_ANNOTATION:
scope_type = self.scope_stack[-2]
return scope_type in (SCOPE_FUNC, SCOPE_COMPREHENSION)
def is_nested_within_func_scope(self) -> bool:
"""Are we underneath a function scope, even if we are in a nested class also?"""
return any(s in (SCOPE_FUNC, SCOPE_COMPREHENSION) for s in self.scope_stack)
def is_class_scope(self) -> bool:
return self.type is not None and not self.is_func_scope()
def is_module_scope(self) -> bool:
return not (self.is_class_scope() or self.is_func_scope())
def current_symbol_kind(self) -> int:
if self.is_class_scope():
kind = MDEF
elif self.is_func_scope():
kind = LDEF
else:
kind = GDEF
return kind
def current_symbol_table(
self, escape_comprehensions: bool = False, type_param: bool = False
) -> SymbolTable:
if type_param and self.scope_stack[-1] == SCOPE_ANNOTATION:
n = self.locals[-1]
assert n is not None
return n
elif self.is_func_scope():
if self.scope_stack[-1] == SCOPE_ANNOTATION:
n = self.locals[-2]
else:
n = self.locals[-1]
assert n is not None
if escape_comprehensions:
assert len(self.locals) == len(self.scope_stack)
# Retrieve the symbol table from the enclosing non-comprehension scope.
for i, scope_type in enumerate(reversed(self.scope_stack)):
if scope_type != SCOPE_COMPREHENSION:
if i == len(self.locals) - 1: # The last iteration.
# The caller of the comprehension is in the global space.
names = self.globals
else:
names_candidate = self.locals[-1 - i]
assert (
names_candidate is not None
), "Escaping comprehension from invalid scope"
names = names_candidate
break
else:
assert False, "Should have at least one non-comprehension scope"
else:
names = n
assert names is not None
elif self.type is not None:
names = self.type.names
else:
names = self.globals
return names
def is_global_or_nonlocal(self, name: str) -> bool:
return self.is_func_scope() and (
name in self.global_decls[-1] or name in self.nonlocal_decls[-1]
)
def add_exports(self, exp_or_exps: Iterable[Expression] | Expression) -> None:
exps = [exp_or_exps] if isinstance(exp_or_exps, Expression) else exp_or_exps
for exp in exps:
if isinstance(exp, StrExpr):
self.all_exports.append(exp.value)
def name_not_defined(self, name: str, ctx: Context, namespace: str | None = None) -> None:
incomplete = self.is_incomplete_namespace(namespace or self.cur_mod_id)
if (
namespace is None
and self.type
and not self.is_func_scope()
and self.incomplete_type_stack
and self.incomplete_type_stack[-1]
and not self.final_iteration
):
# We are processing a class body for the first time, so it is incomplete.
incomplete = True
if incomplete:
# Target namespace is incomplete, so it's possible that the name will be defined
# later on. Defer current target.
self.record_incomplete_ref()
return
message = f'Name "{name}" is not defined'
self.fail(message, ctx, code=codes.NAME_DEFINED)
if f"builtins.{name}" in SUGGESTED_TEST_FIXTURES:
# The user probably has a missing definition in a test fixture. Let's verify.
fullname = f"builtins.{name}"
if self.lookup_fully_qualified_or_none(fullname) is None:
# Yes. Generate a helpful note.
self.msg.add_fixture_note(fullname, ctx)
modules_with_unimported_hints = {
name.split(".", 1)[0] for name in TYPES_FOR_UNIMPORTED_HINTS
}
lowercased = {name.lower(): name for name in TYPES_FOR_UNIMPORTED_HINTS}
for module in modules_with_unimported_hints:
fullname = f"{module}.{name}".lower()
if fullname not in lowercased:
continue
# User probably forgot to import these types.
hint = (
'Did you forget to import it from "{module}"?'
' (Suggestion: "from {module} import {name}")'
).format(module=module, name=lowercased[fullname].rsplit(".", 1)[-1])
self.note(hint, ctx, code=codes.NAME_DEFINED)
def already_defined(
self, name: str, ctx: Context, original_ctx: SymbolTableNode | SymbolNode | None, noun: str
) -> None:
if isinstance(original_ctx, SymbolTableNode):
node: SymbolNode | None = original_ctx.node
elif isinstance(original_ctx, SymbolNode):
node = original_ctx
else:
node = None
if isinstance(original_ctx, SymbolTableNode) and isinstance(original_ctx.node, MypyFile):
# Since this is an import, original_ctx.node points to the module definition.
# Therefore its line number is always 1, which is not useful for this
# error message.
extra_msg = " (by an import)"
elif node and node.line != -1 and self.is_local_name(node.fullname):
# TODO: Using previous symbol node may give wrong line. We should use
# the line number where the binding was established instead.
extra_msg = f" on line {node.line}"
else:
extra_msg = " (possibly by an import)"
self.fail(
f'{noun} "{unmangle(name)}" already defined{extra_msg}', ctx, code=codes.NO_REDEF
)
def name_already_defined(
self, name: str, ctx: Context, original_ctx: SymbolTableNode | SymbolNode | None = None
) -> None:
self.already_defined(name, ctx, original_ctx, noun="Name")
def attribute_already_defined(
self, name: str, ctx: Context, original_ctx: SymbolTableNode | SymbolNode | None = None
) -> None:
self.already_defined(name, ctx, original_ctx, noun="Attribute")
def is_local_name(self, name: str) -> bool:
"""Does name look like reference to a definition in the current module?"""
return self.is_defined_in_current_module(name) or "." not in name
def in_checked_function(self) -> bool:
"""Should we type-check the current function?
- Yes if --check-untyped-defs is set.
- Yes outside functions.
- Yes in annotated functions.
- No otherwise.
"""
if self.options.check_untyped_defs or not self.function_stack:
return True
current_index = len(self.function_stack) - 1
while current_index >= 0:
current_func = self.function_stack[current_index]
if not isinstance(current_func, LambdaExpr):
return not current_func.is_dynamic()
# Special case, `lambda` inherits the "checked" state from its parent.
# Because `lambda` itself cannot be annotated.
# `lambdas` can be deeply nested, so we try to find at least one other parent.
current_index -= 1
# This means that we only have a stack of `lambda` functions,
# no regular functions.
return True
def fail(
self,
msg: str | ErrorMessage,
ctx: Context,
serious: bool = False,
*,
code: ErrorCode | None = None,
blocker: bool = False,
) -> None:
if not serious and not self.in_checked_function():
return
# In case it's a bug and we don't really have context
assert ctx is not None, msg
if isinstance(msg, ErrorMessage):
if code is None:
code = msg.code
msg = msg.value
self.errors.report(ctx.line, ctx.column, msg, blocker=blocker, code=code)
def note(self, msg: str, ctx: Context, code: ErrorCode | None = None) -> None:
if not self.in_checked_function():
return
self.errors.report(ctx.line, ctx.column, msg, severity="note", code=code)
def incomplete_feature_enabled(self, feature: str, ctx: Context) -> bool:
if feature not in self.options.enable_incomplete_feature:
self.fail(
f'"{feature}" support is experimental,'
f" use --enable-incomplete-feature={feature} to enable",
ctx,
)
return False
return True
def accept(self, node: Node) -> None:
try:
node.accept(self)
except Exception as err:
report_internal_error(err, self.errors.file, node.line, self.errors, self.options)
def expr_to_analyzed_type(
self,
expr: Expression,
report_invalid_types: bool = True,
allow_placeholder: bool = False,
allow_type_any: bool = False,
allow_unbound_tvars: bool = False,
allow_param_spec_literals: bool = False,
allow_unpack: bool = False,
) -> Type | None:
if isinstance(expr, CallExpr):
# This is a legacy syntax intended mostly for Python 2, we keep it for
# backwards compatibility, but new features like generic named tuples
# and recursive named tuples will be not supported.
expr.accept(self)
internal_name, info, tvar_defs = self.named_tuple_analyzer.check_namedtuple(
expr, None, self.is_func_scope()
)
if tvar_defs:
self.fail("Generic named tuples are not supported for legacy class syntax", expr)
self.note("Use either Python 3 class syntax, or the assignment syntax", expr)
if internal_name is None:
# Some form of namedtuple is the only valid type that looks like a call
# expression. This isn't a valid type.
raise TypeTranslationError()
elif not info:
self.defer(expr)
return None
assert info.tuple_type, "NamedTuple without tuple type"
fallback = Instance(info, [])
return TupleType(info.tuple_type.items, fallback=fallback)
typ = self.expr_to_unanalyzed_type(expr)
return self.anal_type(
typ,
report_invalid_types=report_invalid_types,
allow_placeholder=allow_placeholder,
allow_type_any=allow_type_any,
allow_unbound_tvars=allow_unbound_tvars,
allow_param_spec_literals=allow_param_spec_literals,
allow_unpack=allow_unpack,
)
def analyze_type_expr(self, expr: Expression) -> None:
# There are certain expressions that mypy does not need to semantically analyze,
# since they analyzed solely as type. (For example, indexes in type alias definitions
# and base classes in class defs). External consumers of the mypy AST may need
# them semantically analyzed, however, if they need to treat it as an expression
# and not a type. (Which is to say, mypyc needs to do this.) Do the analysis
# in a fresh tvar scope in order to suppress any errors about using type variables.
with self.tvar_scope_frame(TypeVarLikeScope()), self.allow_unbound_tvars_set():
expr.accept(self)
def type_analyzer(
self,
*,
tvar_scope: TypeVarLikeScope | None = None,
allow_tuple_literal: bool = False,
allow_unbound_tvars: bool = False,
allow_placeholder: bool = False,
allow_typed_dict_special_forms: bool = False,
allow_param_spec_literals: bool = False,
allow_unpack: bool = False,
report_invalid_types: bool = True,
prohibit_self_type: str | None = None,
allow_type_any: bool = False,
) -> TypeAnalyser:
if tvar_scope is None:
tvar_scope = self.tvar_scope
tpan = TypeAnalyser(
self,
tvar_scope,
self.plugin,
self.options,
self.is_typeshed_stub_file,
allow_unbound_tvars=allow_unbound_tvars,
allow_tuple_literal=allow_tuple_literal,
report_invalid_types=report_invalid_types,
allow_placeholder=allow_placeholder,
allow_typed_dict_special_forms=allow_typed_dict_special_forms,
allow_param_spec_literals=allow_param_spec_literals,
allow_unpack=allow_unpack,
prohibit_self_type=prohibit_self_type,
allow_type_any=allow_type_any,
)
tpan.in_dynamic_func = bool(self.function_stack and self.function_stack[-1].is_dynamic())
tpan.global_scope = not self.type and not self.function_stack
return tpan
def expr_to_unanalyzed_type(self, node: Expression, allow_unpack: bool = False) -> ProperType:
return expr_to_unanalyzed_type(
node, self.options, self.is_stub_file, allow_unpack=allow_unpack
)
def anal_type(
self,
typ: Type,
*,
tvar_scope: TypeVarLikeScope | None = None,
allow_tuple_literal: bool = False,
allow_unbound_tvars: bool = False,
allow_placeholder: bool = False,
allow_typed_dict_special_forms: bool = False,
allow_param_spec_literals: bool = False,
allow_unpack: bool = False,
report_invalid_types: bool = True,
prohibit_self_type: str | None = None,
allow_type_any: bool = False,
) -> Type | None:
"""Semantically analyze a type.
Args:
typ: Type to analyze (if already analyzed, this is a no-op)
allow_placeholder: If True, may return PlaceholderType if
encountering an incomplete definition
Return None only if some part of the type couldn't be bound *and* it
referred to an incomplete namespace or definition. In this case also
defer as needed. During a final iteration this won't return None;
instead report an error if the type can't be analyzed and return
AnyType.
In case of other errors, report an error message and return AnyType.
NOTE: The caller shouldn't defer even if this returns None or a
placeholder type.
"""
has_self_type = find_self_type(
typ, lambda name: self.lookup_qualified(name, typ, suppress_errors=True)
)
if has_self_type and self.type and prohibit_self_type is None:
self.setup_self_type()
a = self.type_analyzer(
tvar_scope=tvar_scope,
allow_unbound_tvars=allow_unbound_tvars,
allow_tuple_literal=allow_tuple_literal,
allow_placeholder=allow_placeholder,
allow_typed_dict_special_forms=allow_typed_dict_special_forms,
allow_param_spec_literals=allow_param_spec_literals,
allow_unpack=allow_unpack,
report_invalid_types=report_invalid_types,
prohibit_self_type=prohibit_self_type,
allow_type_any=allow_type_any,
)
tag = self.track_incomplete_refs()
typ = typ.accept(a)
if self.found_incomplete_ref(tag):
# Something could not be bound yet.
return None
self.add_type_alias_deps(a.aliases_used)
return typ
def class_type(self, self_type: Type) -> Type:
return TypeType.make_normalized(self_type)
def schedule_patch(self, priority: int, patch: Callable[[], None]) -> None:
self.patches.append((priority, patch))
def report_hang(self) -> None:
print("Deferral trace:")
for mod, line in self.deferral_debug_context:
print(f" {mod}:{line}")
self.errors.report(
-1,
-1,
"INTERNAL ERROR: maximum semantic analysis iteration count reached",
blocker=True,
)
def add_plugin_dependency(self, trigger: str, target: str | None = None) -> None:
"""Add dependency from trigger to a target.
If the target is not given explicitly, use the current target.
"""
if target is None:
target = self.scope.current_target()
self.cur_mod_node.plugin_deps.setdefault(trigger, set()).add(target)
def add_type_alias_deps(
self, aliases_used: Collection[str], target: str | None = None
) -> None:
"""Add full names of type aliases on which the current node depends.
This is used by fine-grained incremental mode to re-check the corresponding nodes.
If `target` is None, then the target node used will be the current scope.
"""
if not aliases_used:
# A basic optimization to avoid adding targets with no dependencies to
# the `alias_deps` dict.
return
if target is None:
target = self.scope.current_target()
self.cur_mod_node.alias_deps[target].update(aliases_used)
def is_mangled_global(self, name: str) -> bool:
# A global is mangled if there exists at least one renamed variant.
return unmangle(name) + "'" in self.globals
def is_initial_mangled_global(self, name: str) -> bool:
# If there are renamed definitions for a global, the first one has exactly one prime.
return name == unmangle(name) + "'"
def parse_bool(self, expr: Expression) -> bool | None:
# This wrapper is preserved for plugins.
return parse_bool(expr)
def parse_str_literal(self, expr: Expression) -> str | None:
"""Attempt to find the string literal value of the given expression. Returns `None` if no
literal value can be found."""
if isinstance(expr, StrExpr):
return expr.value
if isinstance(expr, RefExpr) and isinstance(expr.node, Var) and expr.node.type is not None:
values = try_getting_str_literals_from_type(expr.node.type)
if values is not None and len(values) == 1:
return values[0]
return None
def set_future_import_flags(self, module_name: str) -> None:
if module_name in FUTURE_IMPORTS:
self.modules[self.cur_mod_id].future_import_flags.add(FUTURE_IMPORTS[module_name])
def is_future_flag_set(self, flag: str) -> bool:
return self.modules[self.cur_mod_id].is_future_flag_set(flag)
def parse_dataclass_transform_spec(self, call: CallExpr) -> DataclassTransformSpec:
"""Build a DataclassTransformSpec from the arguments passed to the given call to
typing.dataclass_transform."""
parameters = DataclassTransformSpec()
for name, value in zip(call.arg_names, call.args):
# Skip any positional args. Note that any such args are invalid, but we can rely on
# typeshed to enforce this and don't need an additional error here.
if name is None:
continue
# field_specifiers is currently the only non-boolean argument; check for it first so
# so the rest of the block can fail through to handling booleans
if name == "field_specifiers":
parameters.field_specifiers = self.parse_dataclass_transform_field_specifiers(
value
)
continue
boolean = require_bool_literal_argument(self, value, name)
if boolean is None:
continue
if name == "eq_default":
parameters.eq_default = boolean
elif name == "order_default":
parameters.order_default = boolean
elif name == "kw_only_default":
parameters.kw_only_default = boolean
elif name == "frozen_default":
parameters.frozen_default = boolean
else:
self.fail(f'Unrecognized dataclass_transform parameter "{name}"', call)
return parameters
def parse_dataclass_transform_field_specifiers(self, arg: Expression) -> tuple[str, ...]:
if not isinstance(arg, TupleExpr):
self.fail('"field_specifiers" argument must be a tuple literal', arg)
return ()
names = []
for specifier in arg.items:
if not isinstance(specifier, RefExpr):
self.fail('"field_specifiers" must only contain identifiers', specifier)
return ()
names.append(specifier.fullname)
return tuple(names)
def replace_implicit_first_type(sig: FunctionLike, new: Type) -> FunctionLike:
if isinstance(sig, CallableType):
if len(sig.arg_types) == 0:
return sig
return sig.copy_modified(arg_types=[new] + sig.arg_types[1:])
elif isinstance(sig, Overloaded):
return Overloaded(
[cast(CallableType, replace_implicit_first_type(i, new)) for i in sig.items]
)
else:
assert False
def refers_to_fullname(node: Expression, fullnames: str | tuple[str, ...]) -> bool:
"""Is node a name or member expression with the given full name?"""
if not isinstance(fullnames, tuple):
fullnames = (fullnames,)
if not isinstance(node, RefExpr):
return False
if node.fullname in fullnames:
return True
if isinstance(node.node, TypeAlias):
return is_named_instance(node.node.target, fullnames)
return False
def refers_to_class_or_function(node: Expression) -> bool:
"""Does semantically analyzed node refer to a class?"""
return isinstance(node, RefExpr) and isinstance(
node.node, (TypeInfo, FuncDef, OverloadedFuncDef)
)
def find_duplicate(list: list[T]) -> T | None:
"""If the list has duplicates, return one of the duplicates.
Otherwise, return None.
"""
for i in range(1, len(list)):
if list[i] in list[:i]:
return list[i]
return None
def remove_imported_names_from_symtable(names: SymbolTable, module: str) -> None:
"""Remove all imported names from the symbol table of a module."""
removed: list[str] = []
for name, node in names.items():
if node.node is None:
continue
fullname = node.node.fullname
prefix = fullname[: fullname.rfind(".")]
if prefix != module:
removed.append(name)
for name in removed:
del names[name]
def make_any_non_explicit(t: Type) -> Type:
"""Replace all Any types within in with Any that has attribute 'explicit' set to False"""
return t.accept(MakeAnyNonExplicit())
class MakeAnyNonExplicit(TrivialSyntheticTypeTranslator):
def visit_any(self, t: AnyType) -> Type:
if t.type_of_any == TypeOfAny.explicit:
return t.copy_modified(TypeOfAny.special_form)
return t
def visit_type_alias_type(self, t: TypeAliasType) -> Type:
return t.copy_modified(args=[a.accept(self) for a in t.args])
def make_any_non_unimported(t: Type) -> Type:
"""Replace all Any types that come from unimported types with special form Any."""
return t.accept(MakeAnyNonUnimported())
class MakeAnyNonUnimported(TrivialSyntheticTypeTranslator):
def visit_any(self, t: AnyType) -> Type:
if t.type_of_any == TypeOfAny.from_unimported_type:
return t.copy_modified(TypeOfAny.special_form, missing_import_name=None)
return t
def visit_type_alias_type(self, t: TypeAliasType) -> Type:
return t.copy_modified(args=[a.accept(self) for a in t.args])
def apply_semantic_analyzer_patches(patches: list[tuple[int, Callable[[], None]]]) -> None:
"""Call patch callbacks in the right order.
This should happen after semantic analyzer pass 3.
"""
patches_by_priority = sorted(patches, key=lambda x: x[0])
for priority, patch_func in patches_by_priority:
patch_func()
def names_modified_by_assignment(s: AssignmentStmt) -> list[NameExpr]:
"""Return all unqualified (short) names assigned to in an assignment statement."""
result: list[NameExpr] = []
for lvalue in s.lvalues:
result += names_modified_in_lvalue(lvalue)
return result
def names_modified_in_lvalue(lvalue: Lvalue) -> list[NameExpr]:
"""Return all NameExpr assignment targets in an Lvalue."""
if isinstance(lvalue, NameExpr):
return [lvalue]
elif isinstance(lvalue, StarExpr):
return names_modified_in_lvalue(lvalue.expr)
elif isinstance(lvalue, (ListExpr, TupleExpr)):
result: list[NameExpr] = []
for item in lvalue.items:
result += names_modified_in_lvalue(item)
return result
return []
def is_same_var_from_getattr(n1: SymbolNode | None, n2: SymbolNode | None) -> bool:
"""Do n1 and n2 refer to the same Var derived from module-level __getattr__?"""
return (
isinstance(n1, Var)
and n1.from_module_getattr
and isinstance(n2, Var)
and n2.from_module_getattr
and n1.fullname == n2.fullname
)
def dummy_context() -> Context:
return TempNode(AnyType(TypeOfAny.special_form))
def is_valid_replacement(old: SymbolTableNode, new: SymbolTableNode) -> bool:
"""Can symbol table node replace an existing one?
These are the only valid cases:
1. Placeholder gets replaced with a non-placeholder
2. Placeholder that isn't known to become type replaced with a
placeholder that can become a type
"""
if isinstance(old.node, PlaceholderNode):
if isinstance(new.node, PlaceholderNode):
return not old.node.becomes_typeinfo and new.node.becomes_typeinfo
else:
return True
return False
def is_same_symbol(a: SymbolNode | None, b: SymbolNode | None) -> bool:
return (
a == b
or (isinstance(a, PlaceholderNode) and isinstance(b, PlaceholderNode))
or is_same_var_from_getattr(a, b)
)
def is_trivial_body(block: Block) -> bool:
"""Returns 'true' if the given body is "trivial" -- if it contains just a "pass",
"..." (ellipsis), or "raise NotImplementedError()". A trivial body may also
start with a statement containing just a string (e.g. a docstring).
Note: Functions that raise other kinds of exceptions do not count as
"trivial". We use this function to help us determine when it's ok to
relax certain checks on body, but functions that raise arbitrary exceptions
are more likely to do non-trivial work. For example:
def halt(self, reason: str = ...) -> NoReturn:
raise MyCustomError("Fatal error: " + reason, self.line, self.context)
A function that raises just NotImplementedError is much less likely to be
this complex.
Note: If you update this, you may also need to update
mypy.fastparse.is_possible_trivial_body!
"""
body = block.body
if not body:
# Functions have empty bodies only if the body is stripped or the function is
# generated or deserialized. In these cases the body is unknown.
return False
# Skip a docstring
if isinstance(body[0], ExpressionStmt) and isinstance(body[0].expr, StrExpr):
body = block.body[1:]
if len(body) == 0:
# There's only a docstring (or no body at all).
return True
elif len(body) > 1:
return False
stmt = body[0]
if isinstance(stmt, RaiseStmt):
expr = stmt.expr
if expr is None:
return False
if isinstance(expr, CallExpr):
expr = expr.callee
return isinstance(expr, NameExpr) and expr.fullname == "builtins.NotImplementedError"
return isinstance(stmt, PassStmt) or (
isinstance(stmt, ExpressionStmt) and isinstance(stmt.expr, EllipsisExpr)
)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/semanal.py
|
Python
|
NOASSERTION
| 326,393 |
"""Calculate some properties of classes.
These happen after semantic analysis and before type checking.
"""
from __future__ import annotations
from typing import Final
from mypy.errors import Errors
from mypy.nodes import (
IMPLICITLY_ABSTRACT,
IS_ABSTRACT,
CallExpr,
Decorator,
FuncDef,
Node,
OverloadedFuncDef,
PromoteExpr,
SymbolTable,
TypeInfo,
Var,
)
from mypy.options import Options
from mypy.types import MYPYC_NATIVE_INT_NAMES, Instance, ProperType
# Hard coded type promotions (shared between all Python versions).
# These add extra ad-hoc edges to the subtyping relation. For example,
# int is considered a subtype of float, even though there is no
# subclass relationship.
# Note that the bytearray -> bytes promotion is a little unsafe
# as some functions only accept bytes objects. Here convenience
# trumps safety.
TYPE_PROMOTIONS: Final = {
"builtins.int": "float",
"builtins.float": "complex",
"builtins.bytearray": "bytes",
"builtins.memoryview": "bytes",
}
def calculate_class_abstract_status(typ: TypeInfo, is_stub_file: bool, errors: Errors) -> None:
"""Calculate abstract status of a class.
Set is_abstract of the type to True if the type has an unimplemented
abstract attribute. Also compute a list of abstract attributes.
Report error is required ABCMeta metaclass is missing.
"""
typ.is_abstract = False
typ.abstract_attributes = []
if typ.typeddict_type:
return # TypedDict can't be abstract
concrete: set[str] = set()
# List of abstract attributes together with their abstract status
abstract: list[tuple[str, int]] = []
abstract_in_this_class: list[str] = []
if typ.is_newtype:
# Special case: NewTypes are considered as always non-abstract, so they can be used as:
# Config = NewType('Config', Mapping[str, str])
# default = Config({'cannot': 'modify'}) # OK
return
for base in typ.mro:
for name, symnode in base.names.items():
node = symnode.node
if isinstance(node, OverloadedFuncDef):
# Unwrap an overloaded function definition. We can just
# check arbitrarily the first overload item. If the
# different items have a different abstract status, there
# should be an error reported elsewhere.
if node.items: # can be empty for invalid overloads
func: Node | None = node.items[0]
else:
func = None
else:
func = node
if isinstance(func, Decorator):
func = func.func
if isinstance(func, FuncDef):
if (
func.abstract_status in (IS_ABSTRACT, IMPLICITLY_ABSTRACT)
and name not in concrete
):
typ.is_abstract = True
abstract.append((name, func.abstract_status))
if base is typ:
abstract_in_this_class.append(name)
elif isinstance(node, Var):
if node.is_abstract_var and name not in concrete:
typ.is_abstract = True
abstract.append((name, IS_ABSTRACT))
if base is typ:
abstract_in_this_class.append(name)
concrete.add(name)
# In stubs, abstract classes need to be explicitly marked because it is too
# easy to accidentally leave a concrete class abstract by forgetting to
# implement some methods.
typ.abstract_attributes = sorted(abstract)
if is_stub_file:
if typ.declared_metaclass and typ.declared_metaclass.type.has_base("abc.ABCMeta"):
return
if typ.is_protocol:
return
if abstract and not abstract_in_this_class:
def report(message: str, severity: str) -> None:
errors.report(typ.line, typ.column, message, severity=severity)
attrs = ", ".join(f'"{attr}"' for attr, _ in sorted(abstract))
report(f"Class {typ.fullname} has abstract attributes {attrs}", "error")
report(
"If it is meant to be abstract, add 'abc.ABCMeta' as an explicit metaclass", "note"
)
if typ.is_final and abstract:
attrs = ", ".join(f'"{attr}"' for attr, _ in sorted(abstract))
errors.report(
typ.line, typ.column, f"Final class {typ.fullname} has abstract attributes {attrs}"
)
def check_protocol_status(info: TypeInfo, errors: Errors) -> None:
"""Check that all classes in MRO of a protocol are protocols"""
if info.is_protocol:
for type in info.bases:
if not type.type.is_protocol and type.type.fullname != "builtins.object":
errors.report(
info.line,
info.column,
"All bases of a protocol must be protocols",
severity="error",
)
def calculate_class_vars(info: TypeInfo) -> None:
"""Try to infer additional class variables.
Subclass attribute assignments with no type annotation are assumed
to be classvar if overriding a declared classvar from the base
class.
This must happen after the main semantic analysis pass, since
this depends on base class bodies having been fully analyzed.
"""
for name, sym in info.names.items():
node = sym.node
if isinstance(node, Var) and node.info and node.is_inferred and not node.is_classvar:
for base in info.mro[1:]:
member = base.names.get(name)
if member is not None and isinstance(member.node, Var) and member.node.is_classvar:
node.is_classvar = True
def add_type_promotion(
info: TypeInfo, module_names: SymbolTable, options: Options, builtin_names: SymbolTable
) -> None:
"""Setup extra, ad-hoc subtyping relationships between classes (promotion).
This includes things like 'int' being compatible with 'float'.
"""
defn = info.defn
promote_targets: list[ProperType] = []
for decorator in defn.decorators:
if isinstance(decorator, CallExpr):
analyzed = decorator.analyzed
if isinstance(analyzed, PromoteExpr):
# _promote class decorator (undocumented feature).
promote_targets.append(analyzed.type)
if not promote_targets:
if defn.fullname in TYPE_PROMOTIONS:
target_sym = module_names.get(TYPE_PROMOTIONS[defn.fullname])
if defn.fullname == "builtins.bytearray" and options.disable_bytearray_promotion:
target_sym = None
elif defn.fullname == "builtins.memoryview" and options.disable_memoryview_promotion:
target_sym = None
# With test stubs, the target may not exist.
if target_sym:
target_info = target_sym.node
assert isinstance(target_info, TypeInfo)
promote_targets.append(Instance(target_info, []))
# Special case the promotions between 'int' and native integer types.
# These have promotions going both ways, such as from 'int' to 'i64'
# and 'i64' to 'int', for convenience.
if defn.fullname in MYPYC_NATIVE_INT_NAMES:
int_sym = builtin_names["int"]
assert isinstance(int_sym.node, TypeInfo)
int_sym.node._promote.append(Instance(defn.info, []))
defn.info.alt_promote = Instance(int_sym.node, [])
if promote_targets:
defn.info._promote.extend(promote_targets)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/semanal_classprop.py
|
Python
|
NOASSERTION
| 7,673 |
"""Semantic analysis of call-based Enum definitions.
This is conceptually part of mypy.semanal (semantic analyzer pass 2).
"""
from __future__ import annotations
from typing import Final, cast
from mypy.nodes import (
ARG_NAMED,
ARG_POS,
MDEF,
AssignmentStmt,
CallExpr,
Context,
DictExpr,
EnumCallExpr,
Expression,
ListExpr,
MemberExpr,
NameExpr,
RefExpr,
StrExpr,
SymbolTableNode,
TupleExpr,
TypeInfo,
Var,
is_StrExpr_list,
)
from mypy.options import Options
from mypy.semanal_shared import SemanticAnalyzerInterface
from mypy.types import ENUM_REMOVED_PROPS, LiteralType, get_proper_type
# Note: 'enum.EnumMeta' is deliberately excluded from this list. Classes that directly use
# enum.EnumMeta do not necessarily automatically have the 'name' and 'value' attributes.
ENUM_BASES: Final = frozenset(
("enum.Enum", "enum.IntEnum", "enum.Flag", "enum.IntFlag", "enum.StrEnum")
)
ENUM_SPECIAL_PROPS: Final = frozenset(
(
"name",
"value",
"_name_",
"_value_",
*ENUM_REMOVED_PROPS,
# Also attributes from `object`:
"__module__",
"__annotations__",
"__doc__",
"__slots__",
"__dict__",
)
)
class EnumCallAnalyzer:
def __init__(self, options: Options, api: SemanticAnalyzerInterface) -> None:
self.options = options
self.api = api
def process_enum_call(self, s: AssignmentStmt, is_func_scope: bool) -> bool:
"""Check if s defines an Enum; if yes, store the definition in symbol table.
Return True if this looks like an Enum definition (but maybe with errors),
otherwise return False.
"""
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], (NameExpr, MemberExpr)):
return False
lvalue = s.lvalues[0]
name = lvalue.name
enum_call = self.check_enum_call(s.rvalue, name, is_func_scope)
if enum_call is None:
return False
if isinstance(lvalue, MemberExpr):
self.fail("Enum type as attribute is not supported", lvalue)
return False
# Yes, it's a valid Enum definition. Add it to the symbol table.
self.api.add_symbol(name, enum_call, s)
return True
def check_enum_call(
self, node: Expression, var_name: str, is_func_scope: bool
) -> TypeInfo | None:
"""Check if a call defines an Enum.
Example:
A = enum.Enum('A', 'foo bar')
is equivalent to:
class A(enum.Enum):
foo = 1
bar = 2
"""
if not isinstance(node, CallExpr):
return None
call = node
callee = call.callee
if not isinstance(callee, RefExpr):
return None
fullname = callee.fullname
if fullname not in ENUM_BASES:
return None
new_class_name, items, values, ok = self.parse_enum_call_args(
call, fullname.split(".")[-1]
)
if not ok:
# Error. Construct dummy return value.
name = var_name
if is_func_scope:
name += "@" + str(call.line)
info = self.build_enum_call_typeinfo(name, [], fullname, node.line)
else:
if new_class_name != var_name:
msg = f'String argument 1 "{new_class_name}" to {fullname}(...) does not match variable name "{var_name}"'
self.fail(msg, call)
name = cast(StrExpr, call.args[0]).value
if name != var_name or is_func_scope:
# Give it a unique name derived from the line number.
name += "@" + str(call.line)
info = self.build_enum_call_typeinfo(name, items, fullname, call.line)
# Store generated TypeInfo under both names, see semanal_namedtuple for more details.
if name != var_name or is_func_scope:
self.api.add_symbol_skip_local(name, info)
call.analyzed = EnumCallExpr(info, items, values)
call.analyzed.set_line(call)
info.line = node.line
return info
def build_enum_call_typeinfo(
self, name: str, items: list[str], fullname: str, line: int
) -> TypeInfo:
base = self.api.named_type_or_none(fullname)
assert base is not None
info = self.api.basic_new_typeinfo(name, base, line)
info.metaclass_type = info.calculate_metaclass_type()
info.is_enum = True
for item in items:
var = Var(item)
var.info = info
var.is_property = True
var._fullname = f"{info.fullname}.{item}"
info.names[item] = SymbolTableNode(MDEF, var)
return info
def parse_enum_call_args(
self, call: CallExpr, class_name: str
) -> tuple[str, list[str], list[Expression | None], bool]:
"""Parse arguments of an Enum call.
Return a tuple of fields, values, was there an error.
"""
args = call.args
if not all(arg_kind in [ARG_POS, ARG_NAMED] for arg_kind in call.arg_kinds):
return self.fail_enum_call_arg(f"Unexpected arguments to {class_name}()", call)
if len(args) < 2:
return self.fail_enum_call_arg(f"Too few arguments for {class_name}()", call)
if len(args) > 6:
return self.fail_enum_call_arg(f"Too many arguments for {class_name}()", call)
valid_name = [None, "value", "names", "module", "qualname", "type", "start"]
for arg_name in call.arg_names:
if arg_name not in valid_name:
self.fail_enum_call_arg(f'Unexpected keyword argument "{arg_name}"', call)
value, names = None, None
for arg_name, arg in zip(call.arg_names, args):
if arg_name == "value":
value = arg
if arg_name == "names":
names = arg
if value is None:
value = args[0]
if names is None:
names = args[1]
if not isinstance(value, StrExpr):
return self.fail_enum_call_arg(
f"{class_name}() expects a string literal as the first argument", call
)
new_class_name = value.value
items = []
values: list[Expression | None] = []
if isinstance(names, StrExpr):
fields = names.value
for field in fields.replace(",", " ").split():
items.append(field)
elif isinstance(names, (TupleExpr, ListExpr)):
seq_items = names.items
if is_StrExpr_list(seq_items):
items = [seq_item.value for seq_item in seq_items]
elif all(
isinstance(seq_item, (TupleExpr, ListExpr))
and len(seq_item.items) == 2
and isinstance(seq_item.items[0], StrExpr)
for seq_item in seq_items
):
for seq_item in seq_items:
assert isinstance(seq_item, (TupleExpr, ListExpr))
name, value = seq_item.items
assert isinstance(name, StrExpr)
items.append(name.value)
values.append(value)
else:
return self.fail_enum_call_arg(
"%s() with tuple or list expects strings or (name, value) pairs" % class_name,
call,
)
elif isinstance(names, DictExpr):
for key, value in names.items:
if not isinstance(key, StrExpr):
return self.fail_enum_call_arg(
f"{class_name}() with dict literal requires string literals", call
)
items.append(key.value)
values.append(value)
elif isinstance(args[1], RefExpr) and isinstance(args[1].node, Var):
proper_type = get_proper_type(args[1].node.type)
if (
proper_type is not None
and isinstance(proper_type, LiteralType)
and isinstance(proper_type.value, str)
):
fields = proper_type.value
for field in fields.replace(",", " ").split():
items.append(field)
elif args[1].node.is_final and isinstance(args[1].node.final_value, str):
fields = args[1].node.final_value
for field in fields.replace(",", " ").split():
items.append(field)
else:
return self.fail_enum_call_arg(
"Second argument of %s() must be string, tuple, list or dict literal for mypy to determine Enum members"
% class_name,
call,
)
else:
# TODO: Allow dict(x=1, y=2) as a substitute for {'x': 1, 'y': 2}?
return self.fail_enum_call_arg(
"Second argument of %s() must be string, tuple, list or dict literal for mypy to determine Enum members"
% class_name,
call,
)
if not items:
return self.fail_enum_call_arg(f"{class_name}() needs at least one item", call)
if not values:
values = [None] * len(items)
assert len(items) == len(values)
return new_class_name, items, values, True
def fail_enum_call_arg(
self, message: str, context: Context
) -> tuple[str, list[str], list[Expression | None], bool]:
self.fail(message, context)
return "", [], [], False
# Helpers
def fail(self, msg: str, ctx: Context) -> None:
self.api.fail(msg, ctx)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/semanal_enum.py
|
Python
|
NOASSERTION
| 9,706 |
"""Simple type inference for decorated functions during semantic analysis."""
from __future__ import annotations
from mypy.nodes import ARG_POS, CallExpr, Decorator, Expression, FuncDef, RefExpr, Var
from mypy.semanal_shared import SemanticAnalyzerInterface
from mypy.typeops import function_type
from mypy.types import (
AnyType,
CallableType,
ProperType,
Type,
TypeOfAny,
TypeVarType,
get_proper_type,
)
from mypy.typevars import has_no_typevars
def infer_decorator_signature_if_simple(
dec: Decorator, analyzer: SemanticAnalyzerInterface
) -> None:
"""Try to infer the type of the decorated function.
This lets us resolve additional references to decorated functions
during type checking. Otherwise the type might not be available
when we need it, since module top levels can't be deferred.
This basically uses a simple special-purpose type inference
engine just for decorators.
"""
if dec.var.is_property:
# Decorators are expected to have a callable type (it's a little odd).
if dec.func.type is None:
dec.var.type = CallableType(
[AnyType(TypeOfAny.special_form)],
[ARG_POS],
[None],
AnyType(TypeOfAny.special_form),
analyzer.named_type("builtins.function"),
name=dec.var.name,
)
elif isinstance(dec.func.type, CallableType):
dec.var.type = dec.func.type
return
decorator_preserves_type = True
for expr in dec.decorators:
preserve_type = False
if isinstance(expr, RefExpr) and isinstance(expr.node, FuncDef):
if expr.node.type and is_identity_signature(expr.node.type):
preserve_type = True
if not preserve_type:
decorator_preserves_type = False
break
if decorator_preserves_type:
# No non-identity decorators left. We can trivially infer the type
# of the function here.
dec.var.type = function_type(dec.func, analyzer.named_type("builtins.function"))
if dec.decorators:
return_type = calculate_return_type(dec.decorators[0])
if return_type and isinstance(return_type, AnyType):
# The outermost decorator will return Any so we know the type of the
# decorated function.
dec.var.type = AnyType(TypeOfAny.from_another_any, source_any=return_type)
sig = find_fixed_callable_return(dec.decorators[0])
if sig:
# The outermost decorator always returns the same kind of function,
# so we know that this is the type of the decorated function.
orig_sig = function_type(dec.func, analyzer.named_type("builtins.function"))
sig.name = orig_sig.items[0].name
dec.var.type = sig
def is_identity_signature(sig: Type) -> bool:
"""Is type a callable of form T -> T (where T is a type variable)?"""
sig = get_proper_type(sig)
if isinstance(sig, CallableType) and sig.arg_kinds == [ARG_POS]:
if isinstance(sig.arg_types[0], TypeVarType) and isinstance(sig.ret_type, TypeVarType):
return sig.arg_types[0].id == sig.ret_type.id
return False
def calculate_return_type(expr: Expression) -> ProperType | None:
"""Return the return type if we can calculate it.
This only uses information available during semantic analysis so this
will sometimes return None because of insufficient information (as
type inference hasn't run yet).
"""
if isinstance(expr, RefExpr):
if isinstance(expr.node, FuncDef):
typ = expr.node.type
if typ is None:
# No signature -> default to Any.
return AnyType(TypeOfAny.unannotated)
# Explicit Any return?
if isinstance(typ, CallableType):
return get_proper_type(typ.ret_type)
return None
elif isinstance(expr.node, Var):
return get_proper_type(expr.node.type)
elif isinstance(expr, CallExpr):
return calculate_return_type(expr.callee)
return None
def find_fixed_callable_return(expr: Expression) -> CallableType | None:
"""Return the return type, if expression refers to a callable that returns a callable.
But only do this if the return type has no type variables. Return None otherwise.
This approximates things a lot as this is supposed to be called before type checking
when full type information is not available yet.
"""
if isinstance(expr, RefExpr):
if isinstance(expr.node, FuncDef):
typ = expr.node.type
if typ:
if isinstance(typ, CallableType) and has_no_typevars(typ.ret_type):
ret_type = get_proper_type(typ.ret_type)
if isinstance(ret_type, CallableType):
return ret_type
elif isinstance(expr, CallExpr):
t = find_fixed_callable_return(expr.callee)
if t:
ret_type = get_proper_type(t.ret_type)
if isinstance(ret_type, CallableType):
return ret_type
return None
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/semanal_infer.py
|
Python
|
NOASSERTION
| 5,180 |
"""Top-level logic for the semantic analyzer.
The semantic analyzer binds names, resolves imports, detects various
special constructs that don't have dedicated AST nodes after parse
(such as 'cast' which looks like a call), populates symbol tables, and
performs various simple consistency checks.
Semantic analysis of each SCC (strongly connected component; import
cycle) is performed in one unit. Each module is analyzed as multiple
separate *targets*; the module top level is one target and each function
is a target. Nested functions are not separate targets, however. This is
mostly identical to targets used by mypy daemon (but classes aren't
targets in semantic analysis).
We first analyze each module top level in an SCC. If we encounter some
names that we can't bind because the target of the name may not have
been processed yet, we *defer* the current target for further
processing. Deferred targets will be analyzed additional times until
everything can be bound, or we reach a maximum number of iterations.
We keep track of a set of incomplete namespaces, i.e. namespaces that we
haven't finished populating yet. References to these namespaces cause a
deferral if they can't be satisfied. Initially every module in the SCC
will be incomplete.
"""
from __future__ import annotations
from contextlib import nullcontext
from typing import TYPE_CHECKING, Callable, Final, List, Optional, Tuple, Union
from typing_extensions import TypeAlias as _TypeAlias
import mypy.build
import mypy.state
from mypy.checker import FineGrainedDeferredNode
from mypy.errors import Errors
from mypy.nodes import Decorator, FuncDef, MypyFile, OverloadedFuncDef, TypeInfo, Var
from mypy.options import Options
from mypy.plugin import ClassDefContext
from mypy.plugins import dataclasses as dataclasses_plugin
from mypy.semanal import (
SemanticAnalyzer,
apply_semantic_analyzer_patches,
remove_imported_names_from_symtable,
)
from mypy.semanal_classprop import (
add_type_promotion,
calculate_class_abstract_status,
calculate_class_vars,
check_protocol_status,
)
from mypy.semanal_infer import infer_decorator_signature_if_simple
from mypy.semanal_shared import find_dataclass_transform_spec
from mypy.semanal_typeargs import TypeArgumentAnalyzer
from mypy.server.aststrip import SavedAttributes
from mypy.util import is_typeshed_file
if TYPE_CHECKING:
from mypy.build import Graph, State
Patches: _TypeAlias = List[Tuple[int, Callable[[], None]]]
# If we perform this many iterations, raise an exception since we are likely stuck.
MAX_ITERATIONS: Final = 20
# Number of passes over core modules before going on to the rest of the builtin SCC.
CORE_WARMUP: Final = 2
core_modules: Final = [
"typing",
"_collections_abc",
"builtins",
"abc",
"collections",
"collections.abc",
]
def semantic_analysis_for_scc(graph: Graph, scc: list[str], errors: Errors) -> None:
"""Perform semantic analysis for all modules in a SCC (import cycle).
Assume that reachability analysis has already been performed.
The scc will be processed roughly in the order the modules are included
in the list.
"""
patches: Patches = []
# Note that functions can't define new module-level attributes
# using 'global x', since module top levels are fully processed
# before functions. This limitation is unlikely to go away soon.
process_top_levels(graph, scc, patches)
process_functions(graph, scc, patches)
# We use patch callbacks to fix up things when we expect relatively few
# callbacks to be required.
apply_semantic_analyzer_patches(patches)
# Run class decorator hooks (they requite complete MROs and no placeholders).
apply_class_plugin_hooks(graph, scc, errors)
# This pass might need fallbacks calculated above and the results of hooks.
check_type_arguments(graph, scc, errors)
calculate_class_properties(graph, scc, errors)
check_blockers(graph, scc)
# Clean-up builtins, so that TypeVar etc. are not accessible without importing.
if "builtins" in scc:
cleanup_builtin_scc(graph["builtins"])
def cleanup_builtin_scc(state: State) -> None:
"""Remove imported names from builtins namespace.
This way names imported from typing in builtins.pyi aren't available
by default (without importing them). We can only do this after processing
the whole SCC is finished, when the imported names aren't needed for
processing builtins.pyi itself.
"""
assert state.tree is not None
remove_imported_names_from_symtable(state.tree.names, "builtins")
def semantic_analysis_for_targets(
state: State, nodes: list[FineGrainedDeferredNode], graph: Graph, saved_attrs: SavedAttributes
) -> None:
"""Semantically analyze only selected nodes in a given module.
This essentially mirrors the logic of semantic_analysis_for_scc()
except that we process only some targets. This is used in fine grained
incremental mode, when propagating an update.
The saved_attrs are implicitly declared instance attributes (attributes
defined on self) removed by AST stripper that may need to be reintroduced
here. They must be added before any methods are analyzed.
"""
patches: Patches = []
if any(isinstance(n.node, MypyFile) for n in nodes):
# Process module top level first (if needed).
process_top_levels(graph, [state.id], patches)
restore_saved_attrs(saved_attrs)
analyzer = state.manager.semantic_analyzer
for n in nodes:
if isinstance(n.node, MypyFile):
# Already done above.
continue
process_top_level_function(
analyzer, state, state.id, n.node.fullname, n.node, n.active_typeinfo, patches
)
apply_semantic_analyzer_patches(patches)
apply_class_plugin_hooks(graph, [state.id], state.manager.errors)
check_type_arguments_in_targets(nodes, state, state.manager.errors)
calculate_class_properties(graph, [state.id], state.manager.errors)
def restore_saved_attrs(saved_attrs: SavedAttributes) -> None:
"""Restore instance variables removed during AST strip that haven't been added yet."""
for (cdef, name), sym in saved_attrs.items():
info = cdef.info
existing = info.get(name)
defined_in_this_class = name in info.names
assert isinstance(sym.node, Var)
# This needs to mimic the logic in SemanticAnalyzer.analyze_member_lvalue()
# regarding the existing variable in class body or in a superclass:
# If the attribute of self is not defined in superclasses, create a new Var.
if (
existing is None
or
# (An abstract Var is considered as not defined.)
(isinstance(existing.node, Var) and existing.node.is_abstract_var)
or
# Also an explicit declaration on self creates a new Var unless
# there is already one defined in the class body.
sym.node.explicit_self_type
and not defined_in_this_class
):
info.names[name] = sym
def process_top_levels(graph: Graph, scc: list[str], patches: Patches) -> None:
# Process top levels until everything has been bound.
# Reverse order of the scc so the first modules in the original list will be
# be processed first. This helps with performance.
scc = list(reversed(scc))
# Initialize ASTs and symbol tables.
for id in scc:
state = graph[id]
assert state.tree is not None
state.manager.semantic_analyzer.prepare_file(state.tree)
# Initially all namespaces in the SCC are incomplete (well they are empty).
state.manager.incomplete_namespaces.update(scc)
worklist = scc.copy()
# HACK: process core stuff first. This is mostly needed to support defining
# named tuples in builtin SCC.
if all(m in worklist for m in core_modules):
worklist += list(reversed(core_modules)) * CORE_WARMUP
final_iteration = False
iteration = 0
analyzer = state.manager.semantic_analyzer
analyzer.deferral_debug_context.clear()
while worklist:
iteration += 1
if iteration > MAX_ITERATIONS:
# Just pick some module inside the current SCC for error context.
assert state.tree is not None
with analyzer.file_context(state.tree, state.options):
analyzer.report_hang()
break
if final_iteration:
# Give up. It's impossible to bind all names.
state.manager.incomplete_namespaces.clear()
all_deferred: list[str] = []
any_progress = False
while worklist:
next_id = worklist.pop()
state = graph[next_id]
assert state.tree is not None
deferred, incomplete, progress = semantic_analyze_target(
next_id, next_id, state, state.tree, None, final_iteration, patches
)
all_deferred += deferred
any_progress = any_progress or progress
if not incomplete:
state.manager.incomplete_namespaces.discard(next_id)
if final_iteration:
assert not all_deferred, "Must not defer during final iteration"
# Reverse to process the targets in the same order on every iteration. This avoids
# processing the same target twice in a row, which is inefficient.
worklist = list(reversed(all_deferred))
final_iteration = not any_progress
def process_functions(graph: Graph, scc: list[str], patches: Patches) -> None:
# Process functions.
for module in scc:
tree = graph[module].tree
assert tree is not None
analyzer = graph[module].manager.semantic_analyzer
# In principle, functions can be processed in arbitrary order,
# but _methods_ must be processed in the order they are defined,
# because some features (most notably partial types) depend on
# order of definitions on self.
#
# There can be multiple generated methods per line. Use target
# name as the second sort key to get a repeatable sort order on
# Python 3.5, which doesn't preserve dictionary order.
targets = sorted(get_all_leaf_targets(tree), key=lambda x: (x[1].line, x[0]))
for target, node, active_type in targets:
assert isinstance(node, (FuncDef, OverloadedFuncDef, Decorator))
process_top_level_function(
analyzer, graph[module], module, target, node, active_type, patches
)
def process_top_level_function(
analyzer: SemanticAnalyzer,
state: State,
module: str,
target: str,
node: FuncDef | OverloadedFuncDef | Decorator,
active_type: TypeInfo | None,
patches: Patches,
) -> None:
"""Analyze single top-level function or method.
Process the body of the function (including nested functions) again and again,
until all names have been resolved (or iteration limit reached).
"""
# We need one more iteration after incomplete is False (e.g. to report errors, if any).
final_iteration = False
incomplete = True
# Start in the incomplete state (no missing names will be reported on first pass).
# Note that we use module name, since functions don't create qualified names.
deferred = [module]
analyzer.deferral_debug_context.clear()
analyzer.incomplete_namespaces.add(module)
iteration = 0
while deferred:
iteration += 1
if iteration == MAX_ITERATIONS:
# Just pick some module inside the current SCC for error context.
assert state.tree is not None
with analyzer.file_context(state.tree, state.options):
analyzer.report_hang()
break
if not (deferred or incomplete) or final_iteration:
# OK, this is one last pass, now missing names will be reported.
analyzer.incomplete_namespaces.discard(module)
deferred, incomplete, progress = semantic_analyze_target(
target, module, state, node, active_type, final_iteration, patches
)
if not incomplete:
state.manager.incomplete_namespaces.discard(module)
if final_iteration:
assert not deferred, "Must not defer during final iteration"
if not progress:
final_iteration = True
analyzer.incomplete_namespaces.discard(module)
# After semantic analysis is done, discard local namespaces
# to avoid memory hoarding.
analyzer.saved_locals.clear()
TargetInfo: _TypeAlias = Tuple[
str, Union[MypyFile, FuncDef, OverloadedFuncDef, Decorator], Optional[TypeInfo]
]
def get_all_leaf_targets(file: MypyFile) -> list[TargetInfo]:
"""Return all leaf targets in a symbol table (module-level and methods)."""
result: list[TargetInfo] = []
for fullname, node, active_type in file.local_definitions():
if isinstance(node.node, (FuncDef, OverloadedFuncDef, Decorator)):
result.append((fullname, node.node, active_type))
return result
def semantic_analyze_target(
target: str,
module: str,
state: State,
node: MypyFile | FuncDef | OverloadedFuncDef | Decorator,
active_type: TypeInfo | None,
final_iteration: bool,
patches: Patches,
) -> tuple[list[str], bool, bool]:
"""Semantically analyze a single target.
Return tuple with these items:
- list of deferred targets
- was some definition incomplete (need to run another pass)
- were any new names defined (or placeholders replaced)
"""
state.manager.processed_targets.append((module, target))
tree = state.tree
assert tree is not None
analyzer = state.manager.semantic_analyzer
# TODO: Move initialization to somewhere else
analyzer.global_decls = [set()]
analyzer.nonlocal_decls = [set()]
analyzer.globals = tree.names
analyzer.progress = False
with state.wrap_context(check_blockers=False):
refresh_node = node
if isinstance(refresh_node, Decorator):
# Decorator expressions will be processed as part of the module top level.
refresh_node = refresh_node.func
analyzer.refresh_partial(
refresh_node,
patches,
final_iteration,
file_node=tree,
options=state.options,
active_type=active_type,
)
if isinstance(node, Decorator):
infer_decorator_signature_if_simple(node, analyzer)
for dep in analyzer.imports:
state.add_dependency(dep)
priority = mypy.build.PRI_LOW
if priority <= state.priorities.get(dep, priority):
state.priorities[dep] = priority
# Clear out some stale data to avoid memory leaks and astmerge
# validity check confusion
analyzer.statement = None
del analyzer.cur_mod_node
if analyzer.deferred:
return [target], analyzer.incomplete, analyzer.progress
else:
return [], analyzer.incomplete, analyzer.progress
def check_type_arguments(graph: Graph, scc: list[str], errors: Errors) -> None:
for module in scc:
state = graph[module]
assert state.tree
analyzer = TypeArgumentAnalyzer(
errors,
state.options,
state.tree.is_typeshed_file(state.options),
state.manager.semantic_analyzer.named_type,
)
with state.wrap_context():
with mypy.state.state.strict_optional_set(state.options.strict_optional):
state.tree.accept(analyzer)
def check_type_arguments_in_targets(
targets: list[FineGrainedDeferredNode], state: State, errors: Errors
) -> None:
"""Check type arguments against type variable bounds and restrictions.
This mirrors the logic in check_type_arguments() except that we process only
some targets. This is used in fine grained incremental mode.
"""
analyzer = TypeArgumentAnalyzer(
errors,
state.options,
is_typeshed_file(state.options.abs_custom_typeshed_dir, state.path or ""),
state.manager.semantic_analyzer.named_type,
)
with state.wrap_context():
with mypy.state.state.strict_optional_set(state.options.strict_optional):
for target in targets:
func: FuncDef | OverloadedFuncDef | None = None
if isinstance(target.node, (FuncDef, OverloadedFuncDef)):
func = target.node
saved = (state.id, target.active_typeinfo, func) # module, class, function
with errors.scope.saved_scope(saved) if errors.scope else nullcontext():
analyzer.recurse_into_functions = func is not None
target.node.accept(analyzer)
def apply_class_plugin_hooks(graph: Graph, scc: list[str], errors: Errors) -> None:
"""Apply class plugin hooks within a SCC.
We run these after to the main semantic analysis so that the hooks
don't need to deal with incomplete definitions such as placeholder
types.
Note that some hooks incorrectly run during the main semantic
analysis pass, for historical reasons.
"""
num_passes = 0
incomplete = True
# If we encounter a base class that has not been processed, we'll run another
# pass. This should eventually reach a fixed point.
while incomplete:
assert num_passes < 10, "Internal error: too many class plugin hook passes"
num_passes += 1
incomplete = False
for module in scc:
state = graph[module]
tree = state.tree
assert tree
for _, node, _ in tree.local_definitions():
if isinstance(node.node, TypeInfo):
if not apply_hooks_to_class(
state.manager.semantic_analyzer,
module,
node.node,
state.options,
tree,
errors,
):
incomplete = True
def apply_hooks_to_class(
self: SemanticAnalyzer,
module: str,
info: TypeInfo,
options: Options,
file_node: MypyFile,
errors: Errors,
) -> bool:
# TODO: Move more class-related hooks here?
defn = info.defn
ok = True
for decorator in defn.decorators:
with self.file_context(file_node, options, info):
hook = None
decorator_name = self.get_fullname_for_hook(decorator)
if decorator_name:
hook = self.plugin.get_class_decorator_hook_2(decorator_name)
# Special case: if the decorator is itself decorated with
# typing.dataclass_transform, apply the hook for the dataclasses plugin
# TODO: remove special casing here
if hook is None and find_dataclass_transform_spec(decorator):
hook = dataclasses_plugin.dataclass_class_maker_callback
if hook:
ok = ok and hook(ClassDefContext(defn, decorator, self))
# Check if the class definition itself triggers a dataclass transform (via a parent class/
# metaclass)
spec = find_dataclass_transform_spec(info)
if spec is not None:
with self.file_context(file_node, options, info):
# We can't use the normal hook because reason = defn, and ClassDefContext only accepts
# an Expression for reason
ok = ok and dataclasses_plugin.DataclassTransformer(defn, defn, spec, self).transform()
return ok
def calculate_class_properties(graph: Graph, scc: list[str], errors: Errors) -> None:
builtins = graph["builtins"].tree
assert builtins
for module in scc:
state = graph[module]
tree = state.tree
assert tree
for _, node, _ in tree.local_definitions():
if isinstance(node.node, TypeInfo):
with state.manager.semantic_analyzer.file_context(tree, state.options, node.node):
calculate_class_abstract_status(node.node, tree.is_stub, errors)
check_protocol_status(node.node, errors)
calculate_class_vars(node.node)
add_type_promotion(
node.node, tree.names, graph[module].options, builtins.names
)
def check_blockers(graph: Graph, scc: list[str]) -> None:
for module in scc:
graph[module].check_blockers()
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/semanal_main.py
|
Python
|
NOASSERTION
| 20,521 |
"""Semantic analysis of named tuple definitions.
This is conceptually part of mypy.semanal.
"""
from __future__ import annotations
import keyword
from contextlib import contextmanager
from typing import Container, Final, Iterator, List, Mapping, cast
from mypy.errorcodes import ARG_TYPE, ErrorCode
from mypy.exprtotype import TypeTranslationError, expr_to_unanalyzed_type
from mypy.messages import MessageBuilder
from mypy.nodes import (
ARG_NAMED_OPT,
ARG_OPT,
ARG_POS,
MDEF,
Argument,
AssignmentStmt,
Block,
CallExpr,
ClassDef,
Context,
Decorator,
EllipsisExpr,
Expression,
ExpressionStmt,
FuncBase,
FuncDef,
ListExpr,
NamedTupleExpr,
NameExpr,
PassStmt,
RefExpr,
Statement,
StrExpr,
SymbolTable,
SymbolTableNode,
TempNode,
TupleExpr,
TypeInfo,
TypeVarExpr,
Var,
is_StrExpr_list,
)
from mypy.options import Options
from mypy.semanal_shared import (
PRIORITY_FALLBACKS,
SemanticAnalyzerInterface,
calculate_tuple_fallback,
has_placeholder,
set_callable_name,
)
from mypy.types import (
TYPED_NAMEDTUPLE_NAMES,
AnyType,
CallableType,
LiteralType,
TupleType,
Type,
TypeOfAny,
TypeType,
TypeVarId,
TypeVarLikeType,
TypeVarType,
UnboundType,
has_type_vars,
)
from mypy.util import get_unique_redefinition_name
# Matches "_prohibited" in typing.py, but adds __annotations__, which works at runtime but can't
# easily be supported in a static checker.
NAMEDTUPLE_PROHIBITED_NAMES: Final = (
"__new__",
"__init__",
"__slots__",
"__getnewargs__",
"_fields",
"_field_defaults",
"_field_types",
"_make",
"_replace",
"_asdict",
"_source",
"__annotations__",
)
NAMEDTUP_CLASS_ERROR: Final = (
'Invalid statement in NamedTuple definition; expected "field_name: field_type [= default]"'
)
SELF_TVAR_NAME: Final = "_NT"
class NamedTupleAnalyzer:
def __init__(
self, options: Options, api: SemanticAnalyzerInterface, msg: MessageBuilder
) -> None:
self.options = options
self.api = api
self.msg = msg
def analyze_namedtuple_classdef(
self, defn: ClassDef, is_stub_file: bool, is_func_scope: bool
) -> tuple[bool, TypeInfo | None]:
"""Analyze if given class definition can be a named tuple definition.
Return a tuple where first item indicates whether this can possibly be a named tuple,
and the second item is the corresponding TypeInfo (may be None if not ready and should be
deferred).
"""
for base_expr in defn.base_type_exprs:
if isinstance(base_expr, RefExpr):
self.api.accept(base_expr)
if base_expr.fullname in TYPED_NAMEDTUPLE_NAMES:
result = self.check_namedtuple_classdef(defn, is_stub_file)
if result is None:
# This is a valid named tuple, but some types are incomplete.
return True, None
items, types, default_items, statements = result
if is_func_scope and "@" not in defn.name:
defn.name += "@" + str(defn.line)
existing_info = None
if isinstance(defn.analyzed, NamedTupleExpr):
existing_info = defn.analyzed.info
info = self.build_namedtuple_typeinfo(
defn.name, items, types, default_items, defn.line, existing_info
)
defn.analyzed = NamedTupleExpr(info, is_typed=True)
defn.analyzed.line = defn.line
defn.analyzed.column = defn.column
defn.defs.body = statements
# All done: this is a valid named tuple with all types known.
return True, info
# This can't be a valid named tuple.
return False, None
def check_namedtuple_classdef(
self, defn: ClassDef, is_stub_file: bool
) -> tuple[list[str], list[Type], dict[str, Expression], list[Statement]] | None:
"""Parse and validate fields in named tuple class definition.
Return a four tuple:
* field names
* field types
* field default values
* valid statements
or None, if any of the types are not ready.
"""
if len(defn.base_type_exprs) > 1:
self.fail("NamedTuple should be a single base", defn)
items: list[str] = []
types: list[Type] = []
default_items: dict[str, Expression] = {}
statements: list[Statement] = []
for stmt in defn.defs.body:
statements.append(stmt)
if not isinstance(stmt, AssignmentStmt):
# Still allow pass or ... (for empty namedtuples).
if isinstance(stmt, PassStmt) or (
isinstance(stmt, ExpressionStmt) and isinstance(stmt.expr, EllipsisExpr)
):
continue
# Also allow methods, including decorated ones.
if isinstance(stmt, (Decorator, FuncBase)):
continue
# And docstrings.
if isinstance(stmt, ExpressionStmt) and isinstance(stmt.expr, StrExpr):
continue
statements.pop()
defn.removed_statements.append(stmt)
self.fail(NAMEDTUP_CLASS_ERROR, stmt)
elif len(stmt.lvalues) > 1 or not isinstance(stmt.lvalues[0], NameExpr):
# An assignment, but an invalid one.
statements.pop()
defn.removed_statements.append(stmt)
self.fail(NAMEDTUP_CLASS_ERROR, stmt)
else:
# Append name and type in this case...
name = stmt.lvalues[0].name
items.append(name)
if stmt.type is None:
types.append(AnyType(TypeOfAny.unannotated))
else:
# We never allow recursive types at function scope. Although it is
# possible to support this for named tuples, it is still tricky, and
# it would be inconsistent with type aliases.
analyzed = self.api.anal_type(
stmt.type,
allow_placeholder=not self.api.is_func_scope(),
prohibit_self_type="NamedTuple item type",
)
if analyzed is None:
# Something is incomplete. We need to defer this named tuple.
return None
types.append(analyzed)
# ...despite possible minor failures that allow further analyzis.
if name.startswith("_"):
self.fail(
f"NamedTuple field name cannot start with an underscore: {name}", stmt
)
if stmt.type is None or hasattr(stmt, "new_syntax") and not stmt.new_syntax:
self.fail(NAMEDTUP_CLASS_ERROR, stmt)
elif isinstance(stmt.rvalue, TempNode):
# x: int assigns rvalue to TempNode(AnyType())
if default_items:
self.fail(
"Non-default NamedTuple fields cannot follow default fields", stmt
)
else:
default_items[name] = stmt.rvalue
if defn.keywords:
for_function = ' for "__init_subclass__" of "NamedTuple"'
for key in defn.keywords:
self.msg.unexpected_keyword_argument_for_function(for_function, key, defn)
return items, types, default_items, statements
def check_namedtuple(
self, node: Expression, var_name: str | None, is_func_scope: bool
) -> tuple[str | None, TypeInfo | None, list[TypeVarLikeType]]:
"""Check if a call defines a namedtuple.
The optional var_name argument is the name of the variable to
which this is assigned, if any.
Return a tuple of two items:
* Internal name of the named tuple (e.g. the name passed as an argument to namedtuple)
or None if it is not a valid named tuple
* Corresponding TypeInfo, or None if not ready.
If the definition is invalid but looks like a namedtuple,
report errors but return (some) TypeInfo.
"""
if not isinstance(node, CallExpr):
return None, None, []
call = node
callee = call.callee
if not isinstance(callee, RefExpr):
return None, None, []
fullname = callee.fullname
if fullname == "collections.namedtuple":
is_typed = False
elif fullname in TYPED_NAMEDTUPLE_NAMES:
is_typed = True
else:
return None, None, []
result = self.parse_namedtuple_args(call, fullname)
if result:
items, types, defaults, typename, tvar_defs, ok = result
else:
# Error. Construct dummy return value.
if var_name:
name = var_name
if is_func_scope:
name += "@" + str(call.line)
else:
name = var_name = "namedtuple@" + str(call.line)
info = self.build_namedtuple_typeinfo(name, [], [], {}, node.line, None)
self.store_namedtuple_info(info, var_name, call, is_typed)
if name != var_name or is_func_scope:
# NOTE: we skip local namespaces since they are not serialized.
self.api.add_symbol_skip_local(name, info)
return var_name, info, []
if not ok:
# This is a valid named tuple but some types are not ready.
return typename, None, []
# We use the variable name as the class name if it exists. If
# it doesn't, we use the name passed as an argument. We prefer
# the variable name because it should be unique inside a
# module, and so we don't need to disambiguate it with a line
# number.
if var_name:
name = var_name
else:
name = typename
if var_name is None or is_func_scope:
# There are two special cases where need to give it a unique name derived
# from the line number:
# * This is a base class expression, since it often matches the class name:
# class NT(NamedTuple('NT', [...])):
# ...
# * This is a local (function or method level) named tuple, since
# two methods of a class can define a named tuple with the same name,
# and they will be stored in the same namespace (see below).
name += "@" + str(call.line)
if defaults:
default_items = {
arg_name: default for arg_name, default in zip(items[-len(defaults) :], defaults)
}
else:
default_items = {}
existing_info = None
if isinstance(node.analyzed, NamedTupleExpr):
existing_info = node.analyzed.info
info = self.build_namedtuple_typeinfo(
name, items, types, default_items, node.line, existing_info
)
# If var_name is not None (i.e. this is not a base class expression), we always
# store the generated TypeInfo under var_name in the current scope, so that
# other definitions can use it.
if var_name:
self.store_namedtuple_info(info, var_name, call, is_typed)
else:
call.analyzed = NamedTupleExpr(info, is_typed=is_typed)
call.analyzed.set_line(call)
# There are three cases where we need to store the generated TypeInfo
# second time (for the purpose of serialization):
# * If there is a name mismatch like One = NamedTuple('Other', [...])
# we also store the info under name 'Other@lineno', this is needed
# because classes are (de)serialized using their actual fullname, not
# the name of l.h.s.
# * If this is a method level named tuple. It can leak from the method
# via assignment to self attribute and therefore needs to be serialized
# (local namespaces are not serialized).
# * If it is a base class expression. It was not stored above, since
# there is no var_name (but it still needs to be serialized
# since it is in MRO of some class).
if name != var_name or is_func_scope:
# NOTE: we skip local namespaces since they are not serialized.
self.api.add_symbol_skip_local(name, info)
return typename, info, tvar_defs
def store_namedtuple_info(
self, info: TypeInfo, name: str, call: CallExpr, is_typed: bool
) -> None:
self.api.add_symbol(name, info, call)
call.analyzed = NamedTupleExpr(info, is_typed=is_typed)
call.analyzed.set_line(call)
def parse_namedtuple_args(
self, call: CallExpr, fullname: str
) -> None | (tuple[list[str], list[Type], list[Expression], str, list[TypeVarLikeType], bool]):
"""Parse a namedtuple() call into data needed to construct a type.
Returns a 6-tuple:
- List of argument names
- List of argument types
- List of default values
- First argument of namedtuple
- All typevars found in the field definition
- Whether all types are ready.
Return None if the definition didn't typecheck.
"""
type_name = "NamedTuple" if fullname in TYPED_NAMEDTUPLE_NAMES else "namedtuple"
# TODO: Share code with check_argument_count in checkexpr.py?
args = call.args
if len(args) < 2:
self.fail(f'Too few arguments for "{type_name}()"', call)
return None
defaults: list[Expression] = []
rename = False
if len(args) > 2:
# Typed namedtuple doesn't support additional arguments.
if fullname in TYPED_NAMEDTUPLE_NAMES:
self.fail('Too many arguments for "NamedTuple()"', call)
return None
for i, arg_name in enumerate(call.arg_names[2:], 2):
if arg_name == "defaults":
arg = args[i]
# We don't care what the values are, as long as the argument is an iterable
# and we can count how many defaults there are.
if isinstance(arg, (ListExpr, TupleExpr)):
defaults = list(arg.items)
else:
self.fail(
"List or tuple literal expected as the defaults argument to "
"{}()".format(type_name),
arg,
)
elif arg_name == "rename":
arg = args[i]
if isinstance(arg, NameExpr) and arg.name in ("True", "False"):
rename = arg.name == "True"
else:
self.fail(
'Boolean literal expected as the "rename" argument to '
f"{type_name}()",
arg,
code=ARG_TYPE,
)
if call.arg_kinds[:2] != [ARG_POS, ARG_POS]:
self.fail(f'Unexpected arguments to "{type_name}()"', call)
return None
if not isinstance(args[0], StrExpr):
self.fail(f'"{type_name}()" expects a string literal as the first argument', call)
return None
typename = args[0].value
types: list[Type] = []
tvar_defs = []
if not isinstance(args[1], (ListExpr, TupleExpr)):
if fullname == "collections.namedtuple" and isinstance(args[1], StrExpr):
str_expr = args[1]
items = str_expr.value.replace(",", " ").split()
else:
self.fail(
'List or tuple literal expected as the second argument to "{}()"'.format(
type_name
),
call,
)
return None
else:
listexpr = args[1]
if fullname == "collections.namedtuple":
# The fields argument contains just names, with implicit Any types.
if not is_StrExpr_list(listexpr.items):
self.fail('String literal expected as "namedtuple()" item', call)
return None
items = [item.value for item in listexpr.items]
else:
type_exprs = [
t.items[1]
for t in listexpr.items
if isinstance(t, TupleExpr) and len(t.items) == 2
]
tvar_defs = self.api.get_and_bind_all_tvars(type_exprs)
# The fields argument contains (name, type) tuples.
result = self.parse_namedtuple_fields_with_types(listexpr.items, call)
if result is None:
# One of the types is not ready, defer.
return None
items, types, _, ok = result
if not ok:
return [], [], [], typename, [], False
if not types:
types = [AnyType(TypeOfAny.unannotated) for _ in items]
processed_items = []
seen_names: set[str] = set()
for i, item in enumerate(items):
problem = self.check_namedtuple_field_name(item, seen_names)
if problem is None:
processed_items.append(item)
seen_names.add(item)
else:
if not rename:
self.fail(f'"{type_name}()" {problem}', call)
# Even if rename=False, we pretend that it is True.
# At runtime namedtuple creation would throw an error;
# applying the rename logic means we create a more sensible
# namedtuple.
new_name = f"_{i}"
processed_items.append(new_name)
seen_names.add(new_name)
if len(defaults) > len(items):
self.fail(f'Too many defaults given in call to "{type_name}()"', call)
defaults = defaults[: len(items)]
return processed_items, types, defaults, typename, tvar_defs, True
def parse_namedtuple_fields_with_types(
self, nodes: list[Expression], context: Context
) -> tuple[list[str], list[Type], list[Expression], bool] | None:
"""Parse typed named tuple fields.
Return (names, types, defaults, whether types are all ready), or None if error occurred.
"""
items: list[str] = []
types: list[Type] = []
for item in nodes:
if isinstance(item, TupleExpr):
if len(item.items) != 2:
self.fail('Invalid "NamedTuple()" field definition', item)
return None
name, type_node = item.items
if isinstance(name, StrExpr):
items.append(name.value)
else:
self.fail('Invalid "NamedTuple()" field name', item)
return None
try:
type = expr_to_unanalyzed_type(type_node, self.options, self.api.is_stub_file)
except TypeTranslationError:
self.fail("Invalid field type", type_node)
return None
# We never allow recursive types at function scope.
analyzed = self.api.anal_type(
type,
allow_placeholder=not self.api.is_func_scope(),
prohibit_self_type="NamedTuple item type",
)
# Workaround #4987 and avoid introducing a bogus UnboundType
if isinstance(analyzed, UnboundType):
analyzed = AnyType(TypeOfAny.from_error)
# These should be all known, otherwise we would defer in visit_assignment_stmt().
if analyzed is None:
return [], [], [], False
types.append(analyzed)
else:
self.fail('Tuple expected as "NamedTuple()" field', item)
return None
return items, types, [], True
def build_namedtuple_typeinfo(
self,
name: str,
items: list[str],
types: list[Type],
default_items: Mapping[str, Expression],
line: int,
existing_info: TypeInfo | None,
) -> TypeInfo:
strtype = self.api.named_type("builtins.str")
implicit_any = AnyType(TypeOfAny.special_form)
basetuple_type = self.api.named_type("builtins.tuple", [implicit_any])
dictype = self.api.named_type("builtins.dict", [strtype, implicit_any])
# Actual signature should return OrderedDict[str, Union[types]]
ordereddictype = self.api.named_type("builtins.dict", [strtype, implicit_any])
fallback = self.api.named_type("builtins.tuple", [implicit_any])
# Note: actual signature should accept an invariant version of Iterable[UnionType[types]].
# but it can't be expressed. 'new' and 'len' should be callable types.
iterable_type = self.api.named_type_or_none("typing.Iterable", [implicit_any])
function_type = self.api.named_type("builtins.function")
literals: list[Type] = [LiteralType(item, strtype) for item in items]
match_args_type = TupleType(literals, basetuple_type)
info = existing_info or self.api.basic_new_typeinfo(name, fallback, line)
info.is_named_tuple = True
tuple_base = TupleType(types, fallback)
if info.special_alias and has_placeholder(info.special_alias.target):
self.api.process_placeholder(
None, "NamedTuple item", info, force_progress=tuple_base != info.tuple_type
)
info.update_tuple_type(tuple_base)
info.line = line
# For use by mypyc.
info.metadata["namedtuple"] = {"fields": items.copy()}
# We can't calculate the complete fallback type until after semantic
# analysis, since otherwise base classes might be incomplete. Postpone a
# callback function that patches the fallback.
if not has_placeholder(tuple_base) and not has_type_vars(tuple_base):
self.api.schedule_patch(
PRIORITY_FALLBACKS, lambda: calculate_tuple_fallback(tuple_base)
)
def add_field(
var: Var, is_initialized_in_class: bool = False, is_property: bool = False
) -> None:
var.info = info
var.is_initialized_in_class = is_initialized_in_class
var.is_property = is_property
var._fullname = f"{info.fullname}.{var.name}"
info.names[var.name] = SymbolTableNode(MDEF, var)
fields = [Var(item, typ) for item, typ in zip(items, types)]
for var in fields:
add_field(var, is_property=True)
# We can't share Vars between fields and method arguments, since they
# have different full names (the latter are normally used as local variables
# in functions, so their full names are set to short names when generated methods
# are analyzed).
vars = [Var(item, typ) for item, typ in zip(items, types)]
tuple_of_strings = TupleType([strtype for _ in items], basetuple_type)
add_field(Var("_fields", tuple_of_strings), is_initialized_in_class=True)
add_field(Var("_field_types", dictype), is_initialized_in_class=True)
add_field(Var("_field_defaults", dictype), is_initialized_in_class=True)
add_field(Var("_source", strtype), is_initialized_in_class=True)
add_field(Var("__annotations__", ordereddictype), is_initialized_in_class=True)
add_field(Var("__doc__", strtype), is_initialized_in_class=True)
if self.options.python_version >= (3, 10):
add_field(Var("__match_args__", match_args_type), is_initialized_in_class=True)
assert info.tuple_type is not None # Set by update_tuple_type() above.
shared_self_type = TypeVarType(
name=SELF_TVAR_NAME,
fullname=f"{info.fullname}.{SELF_TVAR_NAME}",
# Namespace is patched per-method below.
id=self.api.tvar_scope.new_unique_func_id(),
values=[],
upper_bound=info.tuple_type,
default=AnyType(TypeOfAny.from_omitted_generics),
)
def add_method(
funcname: str,
ret: Type | None, # None means use (patched) self-type
args: list[Argument],
is_classmethod: bool = False,
is_new: bool = False,
) -> None:
fullname = f"{info.fullname}.{funcname}"
self_type = shared_self_type.copy_modified(
id=TypeVarId(shared_self_type.id.raw_id, namespace=fullname)
)
if ret is None:
ret = self_type
if is_classmethod or is_new:
first = [Argument(Var("_cls"), TypeType.make_normalized(self_type), None, ARG_POS)]
else:
first = [Argument(Var("_self"), self_type, None, ARG_POS)]
args = first + args
types = [arg.type_annotation for arg in args]
items = [arg.variable.name for arg in args]
arg_kinds = [arg.kind for arg in args]
assert None not in types
signature = CallableType(cast(List[Type], types), arg_kinds, items, ret, function_type)
signature.variables = [self_type]
func = FuncDef(funcname, args, Block([]))
func.info = info
func.is_class = is_classmethod
func.type = set_callable_name(signature, func)
func._fullname = fullname
func.line = line
if is_classmethod:
v = Var(funcname, func.type)
v.is_classmethod = True
v.info = info
v._fullname = func._fullname
func.is_decorated = True
dec = Decorator(func, [NameExpr("classmethod")], v)
dec.line = line
sym = SymbolTableNode(MDEF, dec)
else:
sym = SymbolTableNode(MDEF, func)
sym.plugin_generated = True
info.names[funcname] = sym
add_method(
"_replace",
ret=None,
args=[Argument(var, var.type, EllipsisExpr(), ARG_NAMED_OPT) for var in vars],
)
if self.options.python_version >= (3, 13):
add_method(
"__replace__",
ret=None,
args=[Argument(var, var.type, EllipsisExpr(), ARG_NAMED_OPT) for var in vars],
)
def make_init_arg(var: Var) -> Argument:
default = default_items.get(var.name, None)
kind = ARG_POS if default is None else ARG_OPT
return Argument(var, var.type, default, kind)
add_method("__new__", ret=None, args=[make_init_arg(var) for var in vars], is_new=True)
add_method("_asdict", args=[], ret=ordereddictype)
add_method(
"_make",
ret=None,
is_classmethod=True,
args=[Argument(Var("iterable", iterable_type), iterable_type, None, ARG_POS)],
)
self_tvar_expr = TypeVarExpr(
SELF_TVAR_NAME,
info.fullname + "." + SELF_TVAR_NAME,
[],
info.tuple_type,
AnyType(TypeOfAny.from_omitted_generics),
)
info.names[SELF_TVAR_NAME] = SymbolTableNode(MDEF, self_tvar_expr)
return info
@contextmanager
def save_namedtuple_body(self, named_tuple_info: TypeInfo) -> Iterator[None]:
"""Preserve the generated body of class-based named tuple and then restore it.
Temporarily clear the names dict so we don't get errors about duplicate names
that were already set in build_namedtuple_typeinfo (we already added the tuple
field names while generating the TypeInfo, and actual duplicates are
already reported).
"""
nt_names = named_tuple_info.names
named_tuple_info.names = SymbolTable()
yield
# Make sure we didn't use illegal names, then reset the names in the typeinfo.
for prohibited in NAMEDTUPLE_PROHIBITED_NAMES:
if prohibited in named_tuple_info.names:
if nt_names.get(prohibited) is named_tuple_info.names[prohibited]:
continue
ctx = named_tuple_info.names[prohibited].node
assert ctx is not None
self.fail(f'Cannot overwrite NamedTuple attribute "{prohibited}"', ctx)
# Restore the names in the original symbol table. This ensures that the symbol
# table contains the field objects created by build_namedtuple_typeinfo. Exclude
# __doc__, which can legally be overwritten by the class.
for key, value in nt_names.items():
if key in named_tuple_info.names:
if key == "__doc__":
continue
sym = named_tuple_info.names[key]
if isinstance(sym.node, (FuncBase, Decorator)) and not sym.plugin_generated:
# Keep user-defined methods as is.
continue
# Keep existing (user-provided) definitions under mangled names, so they
# get semantically analyzed.
r_key = get_unique_redefinition_name(key, named_tuple_info.names)
named_tuple_info.names[r_key] = sym
named_tuple_info.names[key] = value
# Helpers
def check_namedtuple_field_name(self, field: str, seen_names: Container[str]) -> str | None:
"""Return None for valid fields, a string description for invalid ones."""
if field in seen_names:
return f'has duplicate field name "{field}"'
elif not field.isidentifier():
return f'field name "{field}" is not a valid identifier'
elif field.startswith("_"):
return f'field name "{field}" starts with an underscore'
elif keyword.iskeyword(field):
return f'field name "{field}" is a keyword'
return None
def fail(self, msg: str, ctx: Context, code: ErrorCode | None = None) -> None:
self.api.fail(msg, ctx, code=code)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/semanal_namedtuple.py
|
Python
|
NOASSERTION
| 30,934 |
"""Semantic analysis of NewType definitions.
This is conceptually part of mypy.semanal (semantic analyzer pass 2).
"""
from __future__ import annotations
from mypy import errorcodes as codes
from mypy.errorcodes import ErrorCode
from mypy.exprtotype import TypeTranslationError, expr_to_unanalyzed_type
from mypy.messages import MessageBuilder, format_type
from mypy.nodes import (
ARG_POS,
MDEF,
Argument,
AssignmentStmt,
Block,
CallExpr,
Context,
FuncDef,
NameExpr,
NewTypeExpr,
PlaceholderNode,
RefExpr,
StrExpr,
SymbolTableNode,
TypeInfo,
Var,
)
from mypy.options import Options
from mypy.semanal_shared import SemanticAnalyzerInterface, has_placeholder
from mypy.typeanal import check_for_explicit_any, has_any_from_unimported_type
from mypy.types import (
AnyType,
CallableType,
Instance,
NoneType,
PlaceholderType,
TupleType,
Type,
TypeOfAny,
get_proper_type,
)
class NewTypeAnalyzer:
def __init__(
self, options: Options, api: SemanticAnalyzerInterface, msg: MessageBuilder
) -> None:
self.options = options
self.api = api
self.msg = msg
def process_newtype_declaration(self, s: AssignmentStmt) -> bool:
"""Check if s declares a NewType; if yes, store it in symbol table.
Return True if it's a NewType declaration. The current target may be
deferred as a side effect if the base type is not ready, even if
the return value is True.
The logic in this function mostly copies the logic for visit_class_def()
with a single (non-Generic) base.
"""
var_name, call = self.analyze_newtype_declaration(s)
if var_name is None or call is None:
return False
name = var_name
# OK, now we know this is a NewType. But the base type may be not ready yet,
# add placeholder as we do for ClassDef.
if self.api.is_func_scope():
name += "@" + str(s.line)
fullname = self.api.qualified_name(name)
if not call.analyzed or isinstance(call.analyzed, NewTypeExpr) and not call.analyzed.info:
# Start from labeling this as a future class, as we do for normal ClassDefs.
placeholder = PlaceholderNode(fullname, s, s.line, becomes_typeinfo=True)
self.api.add_symbol(var_name, placeholder, s, can_defer=False)
old_type, should_defer = self.check_newtype_args(var_name, call, s)
old_type = get_proper_type(old_type)
if not isinstance(call.analyzed, NewTypeExpr):
call.analyzed = NewTypeExpr(var_name, old_type, line=call.line, column=call.column)
else:
call.analyzed.old_type = old_type
if old_type is None:
if should_defer:
# Base type is not ready.
self.api.defer()
return True
# Create the corresponding class definition if the aliased type is subtypeable
assert isinstance(call.analyzed, NewTypeExpr)
if isinstance(old_type, TupleType):
newtype_class_info = self.build_newtype_typeinfo(
name, old_type, old_type.partial_fallback, s.line, call.analyzed.info
)
newtype_class_info.update_tuple_type(old_type)
elif isinstance(old_type, Instance):
if old_type.type.is_protocol:
self.fail("NewType cannot be used with protocol classes", s)
newtype_class_info = self.build_newtype_typeinfo(
name, old_type, old_type, s.line, call.analyzed.info
)
else:
if old_type is not None:
message = "Argument 2 to NewType(...) must be subclassable (got {})"
self.fail(
message.format(format_type(old_type, self.options)),
s,
code=codes.VALID_NEWTYPE,
)
# Otherwise the error was already reported.
old_type = AnyType(TypeOfAny.from_error)
object_type = self.api.named_type("builtins.object")
newtype_class_info = self.build_newtype_typeinfo(
name, old_type, object_type, s.line, call.analyzed.info
)
newtype_class_info.fallback_to_any = True
check_for_explicit_any(
old_type, self.options, self.api.is_typeshed_stub_file, self.msg, context=s
)
if self.options.disallow_any_unimported and has_any_from_unimported_type(old_type):
self.msg.unimported_type_becomes_any("Argument 2 to NewType(...)", old_type, s)
# If so, add it to the symbol table.
assert isinstance(call.analyzed, NewTypeExpr)
# As we do for normal classes, create the TypeInfo only once, then just
# update base classes on next iterations (to get rid of placeholders there).
if not call.analyzed.info:
call.analyzed.info = newtype_class_info
else:
call.analyzed.info.bases = newtype_class_info.bases
self.api.add_symbol(var_name, call.analyzed.info, s)
if self.api.is_func_scope():
self.api.add_symbol_skip_local(name, call.analyzed.info)
newtype_class_info.line = s.line
return True
def analyze_newtype_declaration(self, s: AssignmentStmt) -> tuple[str | None, CallExpr | None]:
"""Return the NewType call expression if `s` is a newtype declaration or None otherwise."""
name, call = None, None
if (
len(s.lvalues) == 1
and isinstance(s.lvalues[0], NameExpr)
and isinstance(s.rvalue, CallExpr)
and isinstance(s.rvalue.callee, RefExpr)
and (s.rvalue.callee.fullname in ("typing.NewType", "typing_extensions.NewType"))
):
name = s.lvalues[0].name
if s.type:
self.fail("Cannot declare the type of a NewType declaration", s)
names = self.api.current_symbol_table()
existing = names.get(name)
# Give a better error message than generic "Name already defined".
if (
existing
and not isinstance(existing.node, PlaceholderNode)
and not s.rvalue.analyzed
):
self.fail(f'Cannot redefine "{name}" as a NewType', s)
# This dummy NewTypeExpr marks the call as sufficiently analyzed; it will be
# overwritten later with a fully complete NewTypeExpr if there are no other
# errors with the NewType() call.
call = s.rvalue
return name, call
def check_newtype_args(
self, name: str, call: CallExpr, context: Context
) -> tuple[Type | None, bool]:
"""Ananlyze base type in NewType call.
Return a tuple (type, should defer).
"""
has_failed = False
args, arg_kinds = call.args, call.arg_kinds
if len(args) != 2 or arg_kinds[0] != ARG_POS or arg_kinds[1] != ARG_POS:
self.fail("NewType(...) expects exactly two positional arguments", context)
return None, False
# Check first argument
if not isinstance(args[0], StrExpr):
self.fail("Argument 1 to NewType(...) must be a string literal", context)
has_failed = True
elif args[0].value != name:
msg = 'String argument 1 "{}" to NewType(...) does not match variable name "{}"'
self.fail(msg.format(args[0].value, name), context)
has_failed = True
# Check second argument
msg = "Argument 2 to NewType(...) must be a valid type"
try:
unanalyzed_type = expr_to_unanalyzed_type(args[1], self.options, self.api.is_stub_file)
except TypeTranslationError:
self.fail(msg, context)
return None, False
# We want to use our custom error message (see above), so we suppress
# the default error message for invalid types here.
old_type = get_proper_type(
self.api.anal_type(
unanalyzed_type,
report_invalid_types=False,
allow_placeholder=not self.api.is_func_scope(),
)
)
should_defer = False
if isinstance(old_type, PlaceholderType):
old_type = None
if old_type is None:
should_defer = True
# The caller of this function assumes that if we return a Type, it's always
# a valid one. So, we translate AnyTypes created from errors into None.
if isinstance(old_type, AnyType) and old_type.is_from_error:
self.fail(msg, context)
return None, False
return None if has_failed else old_type, should_defer
def build_newtype_typeinfo(
self,
name: str,
old_type: Type,
base_type: Instance,
line: int,
existing_info: TypeInfo | None,
) -> TypeInfo:
info = existing_info or self.api.basic_new_typeinfo(name, base_type, line)
info.bases = [base_type] # Update in case there were nested placeholders.
info.is_newtype = True
# Add __init__ method
args = [
Argument(Var("self"), NoneType(), None, ARG_POS),
self.make_argument("item", old_type),
]
signature = CallableType(
arg_types=[Instance(info, []), old_type],
arg_kinds=[arg.kind for arg in args],
arg_names=["self", "item"],
ret_type=NoneType(),
fallback=self.api.named_type("builtins.function"),
name=name,
)
init_func = FuncDef("__init__", args, Block([]), typ=signature)
init_func.info = info
init_func._fullname = info.fullname + ".__init__"
if not existing_info:
updated = True
else:
previous_sym = info.names["__init__"].node
assert isinstance(previous_sym, FuncDef)
updated = old_type != previous_sym.arguments[1].variable.type
info.names["__init__"] = SymbolTableNode(MDEF, init_func)
if has_placeholder(old_type):
self.api.process_placeholder(None, "NewType base", info, force_progress=updated)
return info
# Helpers
def make_argument(self, name: str, type: Type) -> Argument:
return Argument(Var(name), type, None, ARG_POS)
def fail(self, msg: str, ctx: Context, *, code: ErrorCode | None = None) -> None:
self.api.fail(msg, ctx, code=code)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/semanal_newtype.py
|
Python
|
NOASSERTION
| 10,577 |
"""Block/import reachability analysis."""
from __future__ import annotations
from mypy.nodes import (
AssertStmt,
AssignmentStmt,
Block,
ClassDef,
ExpressionStmt,
ForStmt,
FuncDef,
IfStmt,
Import,
ImportAll,
ImportFrom,
MatchStmt,
MypyFile,
ReturnStmt,
)
from mypy.options import Options
from mypy.reachability import (
assert_will_always_fail,
infer_reachability_of_if_statement,
infer_reachability_of_match_statement,
)
from mypy.traverser import TraverserVisitor
class SemanticAnalyzerPreAnalysis(TraverserVisitor):
"""Analyze reachability of blocks and imports and other local things.
This runs before semantic analysis, so names have not been bound. Imports are
also not resolved yet, so we can only access the current module.
This determines static reachability of blocks and imports due to version and
platform checks, among others.
The main entry point is 'visit_file'.
Reachability of imports needs to be determined very early in the build since
this affects which modules will ultimately be processed.
Consider this example:
import sys
def do_stuff() -> None:
if sys.version_info >= (3, 10):
import xyz # Only available in Python 3.10+
xyz.whatever()
...
The block containing 'import xyz' is unreachable in Python 3 mode. The import
shouldn't be processed in Python 3 mode, even if the module happens to exist.
"""
def visit_file(self, file: MypyFile, fnam: str, mod_id: str, options: Options) -> None:
self.platform = options.platform
self.cur_mod_id = mod_id
self.cur_mod_node = file
self.options = options
self.is_global_scope = True
self.skipped_lines: set[int] = set()
for i, defn in enumerate(file.defs):
defn.accept(self)
if isinstance(defn, AssertStmt) and assert_will_always_fail(defn, options):
# We've encountered an assert that's always false,
# e.g. assert sys.platform == 'lol'. Truncate the
# list of statements. This mutates file.defs too.
if i < len(file.defs) - 1:
next_def, last = file.defs[i + 1], file.defs[-1]
if last.end_line is not None:
# We are on a Python version recent enough to support end lines.
self.skipped_lines |= set(range(next_def.line, last.end_line + 1))
del file.defs[i + 1 :]
break
file.skipped_lines = self.skipped_lines
def visit_func_def(self, node: FuncDef) -> None:
old_global_scope = self.is_global_scope
self.is_global_scope = False
super().visit_func_def(node)
self.is_global_scope = old_global_scope
file_node = self.cur_mod_node
if (
self.is_global_scope
and file_node.is_stub
and node.name == "__getattr__"
and file_node.is_package_init_file()
):
# __init__.pyi with __getattr__ means that any submodules are assumed
# to exist, even if there is no stub. Note that we can't verify that the
# return type is compatible, since we haven't bound types yet.
file_node.is_partial_stub_package = True
def visit_class_def(self, node: ClassDef) -> None:
old_global_scope = self.is_global_scope
self.is_global_scope = False
super().visit_class_def(node)
self.is_global_scope = old_global_scope
def visit_import_from(self, node: ImportFrom) -> None:
node.is_top_level = self.is_global_scope
super().visit_import_from(node)
def visit_import_all(self, node: ImportAll) -> None:
node.is_top_level = self.is_global_scope
super().visit_import_all(node)
def visit_import(self, node: Import) -> None:
node.is_top_level = self.is_global_scope
super().visit_import(node)
def visit_if_stmt(self, s: IfStmt) -> None:
infer_reachability_of_if_statement(s, self.options)
for expr in s.expr:
expr.accept(self)
for node in s.body:
node.accept(self)
if s.else_body:
s.else_body.accept(self)
def visit_block(self, b: Block) -> None:
if b.is_unreachable:
if b.end_line is not None:
# We are on a Python version recent enough to support end lines.
self.skipped_lines |= set(range(b.line, b.end_line + 1))
return
super().visit_block(b)
def visit_match_stmt(self, s: MatchStmt) -> None:
infer_reachability_of_match_statement(s, self.options)
for guard in s.guards:
if guard is not None:
guard.accept(self)
for body in s.bodies:
body.accept(self)
# The remaining methods are an optimization: don't visit nested expressions
# of common statements, since they can have no effect.
def visit_assignment_stmt(self, s: AssignmentStmt) -> None:
pass
def visit_expression_stmt(self, s: ExpressionStmt) -> None:
pass
def visit_return_stmt(self, s: ReturnStmt) -> None:
pass
def visit_for_stmt(self, s: ForStmt) -> None:
s.body.accept(self)
if s.else_body is not None:
s.else_body.accept(self)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/semanal_pass1.py
|
Python
|
NOASSERTION
| 5,439 |
"""Shared definitions used by different parts of semantic analysis."""
from __future__ import annotations
from abc import abstractmethod
from typing import Callable, Final, overload
from typing_extensions import Literal, Protocol
from mypy_extensions import trait
from mypy.errorcodes import LITERAL_REQ, ErrorCode
from mypy.nodes import (
CallExpr,
ClassDef,
Context,
DataclassTransformSpec,
Decorator,
Expression,
FuncDef,
NameExpr,
Node,
OverloadedFuncDef,
RefExpr,
SymbolNode,
SymbolTable,
SymbolTableNode,
TypeInfo,
)
from mypy.plugin import SemanticAnalyzerPluginInterface
from mypy.tvar_scope import TypeVarLikeScope
from mypy.type_visitor import ANY_STRATEGY, BoolTypeQuery
from mypy.typeops import make_simplified_union
from mypy.types import (
TPDICT_FB_NAMES,
AnyType,
FunctionLike,
Instance,
Parameters,
ParamSpecFlavor,
ParamSpecType,
PlaceholderType,
ProperType,
TupleType,
Type,
TypeOfAny,
TypeVarId,
TypeVarLikeType,
TypeVarTupleType,
UnpackType,
get_proper_type,
)
# Subclasses can override these Var attributes with incompatible types. This can also be
# set for individual attributes using 'allow_incompatible_override' of Var.
ALLOW_INCOMPATIBLE_OVERRIDE: Final = ("__slots__", "__deletable__", "__match_args__")
# Priorities for ordering of patches within the "patch" phase of semantic analysis
# (after the main pass):
# Fix fallbacks (does subtype checks).
PRIORITY_FALLBACKS: Final = 1
@trait
class SemanticAnalyzerCoreInterface:
"""A core abstract interface to generic semantic analyzer functionality.
This is implemented by both semantic analyzer passes 2 and 3.
"""
@abstractmethod
def lookup_qualified(
self, name: str, ctx: Context, suppress_errors: bool = False
) -> SymbolTableNode | None:
raise NotImplementedError
@abstractmethod
def lookup_fully_qualified(self, name: str) -> SymbolTableNode:
raise NotImplementedError
@abstractmethod
def lookup_fully_qualified_or_none(self, name: str) -> SymbolTableNode | None:
raise NotImplementedError
@abstractmethod
def fail(
self,
msg: str,
ctx: Context,
serious: bool = False,
*,
blocker: bool = False,
code: ErrorCode | None = None,
) -> None:
raise NotImplementedError
@abstractmethod
def note(self, msg: str, ctx: Context, *, code: ErrorCode | None = None) -> None:
raise NotImplementedError
@abstractmethod
def incomplete_feature_enabled(self, feature: str, ctx: Context) -> bool:
raise NotImplementedError
@abstractmethod
def record_incomplete_ref(self) -> None:
raise NotImplementedError
@abstractmethod
def defer(self, debug_context: Context | None = None, force_progress: bool = False) -> None:
raise NotImplementedError
@abstractmethod
def is_incomplete_namespace(self, fullname: str) -> bool:
"""Is a module or class namespace potentially missing some definitions?"""
raise NotImplementedError
@property
@abstractmethod
def final_iteration(self) -> bool:
"""Is this the final iteration of semantic analysis?"""
raise NotImplementedError
@abstractmethod
def is_future_flag_set(self, flag: str) -> bool:
"""Is the specific __future__ feature imported"""
raise NotImplementedError
@property
@abstractmethod
def is_stub_file(self) -> bool:
raise NotImplementedError
@abstractmethod
def is_func_scope(self) -> bool:
raise NotImplementedError
@property
@abstractmethod
def type(self) -> TypeInfo | None:
raise NotImplementedError
@trait
class SemanticAnalyzerInterface(SemanticAnalyzerCoreInterface):
"""A limited abstract interface to some generic semantic analyzer pass 2 functionality.
We use this interface for various reasons:
* Looser coupling
* Cleaner import graph
* Less need to pass around callback functions
"""
tvar_scope: TypeVarLikeScope
@abstractmethod
def lookup(
self, name: str, ctx: Context, suppress_errors: bool = False
) -> SymbolTableNode | None:
raise NotImplementedError
@abstractmethod
def named_type(self, fullname: str, args: list[Type] | None = None) -> Instance:
raise NotImplementedError
@abstractmethod
def named_type_or_none(self, fullname: str, args: list[Type] | None = None) -> Instance | None:
raise NotImplementedError
@abstractmethod
def accept(self, node: Node) -> None:
raise NotImplementedError
@abstractmethod
def anal_type(
self,
t: Type,
*,
tvar_scope: TypeVarLikeScope | None = None,
allow_tuple_literal: bool = False,
allow_unbound_tvars: bool = False,
allow_typed_dict_special_forms: bool = False,
allow_placeholder: bool = False,
report_invalid_types: bool = True,
prohibit_self_type: str | None = None,
) -> Type | None:
raise NotImplementedError
@abstractmethod
def get_and_bind_all_tvars(self, type_exprs: list[Expression]) -> list[TypeVarLikeType]:
raise NotImplementedError
@abstractmethod
def basic_new_typeinfo(self, name: str, basetype_or_fallback: Instance, line: int) -> TypeInfo:
raise NotImplementedError
@abstractmethod
def schedule_patch(self, priority: int, fn: Callable[[], None]) -> None:
raise NotImplementedError
@abstractmethod
def add_symbol_table_node(self, name: str, stnode: SymbolTableNode) -> bool:
"""Add node to the current symbol table."""
raise NotImplementedError
@abstractmethod
def current_symbol_table(self) -> SymbolTable:
"""Get currently active symbol table.
May be module, class, or local namespace.
"""
raise NotImplementedError
@abstractmethod
def add_symbol(
self,
name: str,
node: SymbolNode,
context: Context,
module_public: bool = True,
module_hidden: bool = False,
can_defer: bool = True,
) -> bool:
"""Add symbol to the current symbol table."""
raise NotImplementedError
@abstractmethod
def add_symbol_skip_local(self, name: str, node: SymbolNode) -> None:
"""Add symbol to the current symbol table, skipping locals.
This is used to store symbol nodes in a symbol table that
is going to be serialized (local namespaces are not serialized).
See implementation docstring for more details.
"""
raise NotImplementedError
@abstractmethod
def parse_bool(self, expr: Expression) -> bool | None:
raise NotImplementedError
@abstractmethod
def qualified_name(self, n: str) -> str:
raise NotImplementedError
@property
@abstractmethod
def is_typeshed_stub_file(self) -> bool:
raise NotImplementedError
@abstractmethod
def process_placeholder(
self, name: str | None, kind: str, ctx: Context, force_progress: bool = False
) -> None:
raise NotImplementedError
def set_callable_name(sig: Type, fdef: FuncDef) -> ProperType:
sig = get_proper_type(sig)
if isinstance(sig, FunctionLike):
if fdef.info:
if fdef.info.fullname in TPDICT_FB_NAMES:
# Avoid exposing the internal _TypedDict name.
class_name = "TypedDict"
else:
class_name = fdef.info.name
return sig.with_name(f"{fdef.name} of {class_name}")
else:
return sig.with_name(fdef.name)
else:
return sig
def calculate_tuple_fallback(typ: TupleType) -> None:
"""Calculate a precise item type for the fallback of a tuple type.
This must be called only after the main semantic analysis pass, since joins
aren't available before that.
Note that there is an apparent chicken and egg problem with respect
to verifying type arguments against bounds. Verifying bounds might
require fallbacks, but we might use the bounds to calculate the
fallbacks. In practice this is not a problem, since the worst that
can happen is that we have invalid type argument values, and these
can happen in later stages as well (they will generate errors, but
we don't prevent their existence).
"""
fallback = typ.partial_fallback
assert fallback.type.fullname == "builtins.tuple"
items = []
for item in typ.items:
# TODO: this duplicates some logic in typeops.tuple_fallback().
if isinstance(item, UnpackType):
unpacked_type = get_proper_type(item.type)
if isinstance(unpacked_type, TypeVarTupleType):
unpacked_type = get_proper_type(unpacked_type.upper_bound)
if (
isinstance(unpacked_type, Instance)
and unpacked_type.type.fullname == "builtins.tuple"
):
items.append(unpacked_type.args[0])
else:
raise NotImplementedError
else:
items.append(item)
fallback.args = (make_simplified_union(items),)
class _NamedTypeCallback(Protocol):
def __call__(self, fully_qualified_name: str, args: list[Type] | None = None) -> Instance: ...
def paramspec_args(
name: str,
fullname: str,
id: TypeVarId,
*,
named_type_func: _NamedTypeCallback,
line: int = -1,
column: int = -1,
prefix: Parameters | None = None,
) -> ParamSpecType:
return ParamSpecType(
name,
fullname,
id,
flavor=ParamSpecFlavor.ARGS,
upper_bound=named_type_func("builtins.tuple", [named_type_func("builtins.object")]),
default=AnyType(TypeOfAny.from_omitted_generics),
line=line,
column=column,
prefix=prefix,
)
def paramspec_kwargs(
name: str,
fullname: str,
id: TypeVarId,
*,
named_type_func: _NamedTypeCallback,
line: int = -1,
column: int = -1,
prefix: Parameters | None = None,
) -> ParamSpecType:
return ParamSpecType(
name,
fullname,
id,
flavor=ParamSpecFlavor.KWARGS,
upper_bound=named_type_func(
"builtins.dict", [named_type_func("builtins.str"), named_type_func("builtins.object")]
),
default=AnyType(TypeOfAny.from_omitted_generics),
line=line,
column=column,
prefix=prefix,
)
class HasPlaceholders(BoolTypeQuery):
def __init__(self) -> None:
super().__init__(ANY_STRATEGY)
def visit_placeholder_type(self, t: PlaceholderType) -> bool:
return True
def has_placeholder(typ: Type) -> bool:
"""Check if a type contains any placeholder types (recursively)."""
return typ.accept(HasPlaceholders())
def find_dataclass_transform_spec(node: Node | None) -> DataclassTransformSpec | None:
"""
Find the dataclass transform spec for the given node, if any exists.
Per PEP 681 (https://peps.python.org/pep-0681/#the-dataclass-transform-decorator), dataclass
transforms can be specified in multiple ways, including decorator functions and
metaclasses/base classes. This function resolves the spec from any of these variants.
"""
# The spec only lives on the function/class definition itself, so we need to unwrap down to that
# point
if isinstance(node, CallExpr):
# Like dataclasses.dataclass, transform-based decorators can be applied either with or
# without parameters; ie, both of these forms are accepted:
#
# @typing.dataclass_transform
# class Foo: ...
# @typing.dataclass_transform(eq=True, order=True, ...)
# class Bar: ...
#
# We need to unwrap the call for the second variant.
node = node.callee
if isinstance(node, RefExpr):
node = node.node
if isinstance(node, Decorator):
# typing.dataclass_transform usage must always result in a Decorator; it always uses the
# `@dataclass_transform(...)` syntax and never `@dataclass_transform`
node = node.func
if isinstance(node, OverloadedFuncDef):
# The dataclass_transform decorator may be attached to any single overload, so we must
# search them all.
# Note that using more than one decorator is undefined behavior, so we can just take the
# first that we find.
for candidate in node.items:
spec = find_dataclass_transform_spec(candidate)
if spec is not None:
return spec
return find_dataclass_transform_spec(node.impl)
# For functions, we can directly consult the AST field for the spec
if isinstance(node, FuncDef):
return node.dataclass_transform_spec
if isinstance(node, ClassDef):
node = node.info
if isinstance(node, TypeInfo):
# Search all parent classes to see if any are decorated with `typing.dataclass_transform`
for base in node.mro[1:]:
if base.dataclass_transform_spec is not None:
return base.dataclass_transform_spec
# Check if there is a metaclass that is decorated with `typing.dataclass_transform`
#
# Note that PEP 681 only discusses using a metaclass that is directly decorated with
# `typing.dataclass_transform`; subclasses thereof should be treated with dataclass
# semantics rather than as transforms:
#
# > If dataclass_transform is applied to a class, dataclass-like semantics will be assumed
# > for any class that directly or indirectly derives from the decorated class or uses the
# > decorated class as a metaclass.
#
# The wording doesn't make this entirely explicit, but Pyright (the reference
# implementation for this PEP) only handles directly-decorated metaclasses.
metaclass_type = node.metaclass_type
if metaclass_type is not None and metaclass_type.type.dataclass_transform_spec is not None:
return metaclass_type.type.dataclass_transform_spec
return None
# Never returns `None` if a default is given
@overload
def require_bool_literal_argument(
api: SemanticAnalyzerInterface | SemanticAnalyzerPluginInterface,
expression: Expression,
name: str,
default: Literal[True] | Literal[False],
) -> bool: ...
@overload
def require_bool_literal_argument(
api: SemanticAnalyzerInterface | SemanticAnalyzerPluginInterface,
expression: Expression,
name: str,
default: None = None,
) -> bool | None: ...
def require_bool_literal_argument(
api: SemanticAnalyzerInterface | SemanticAnalyzerPluginInterface,
expression: Expression,
name: str,
default: bool | None = None,
) -> bool | None:
"""Attempt to interpret an expression as a boolean literal, and fail analysis if we can't."""
value = parse_bool(expression)
if value is None:
api.fail(
f'"{name}" argument must be a True or False literal', expression, code=LITERAL_REQ
)
return default
return value
def parse_bool(expr: Expression) -> bool | None:
if isinstance(expr, NameExpr):
if expr.fullname == "builtins.True":
return True
if expr.fullname == "builtins.False":
return False
return None
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/semanal_shared.py
|
Python
|
NOASSERTION
| 15,513 |
"""Verify properties of type arguments, like 'int' in C[int] being valid.
This must happen after semantic analysis since there can be placeholder
types until the end of semantic analysis, and these break various type
operations, including subtype checks.
"""
from __future__ import annotations
from typing import Callable
from mypy import errorcodes as codes, message_registry
from mypy.errorcodes import ErrorCode
from mypy.errors import Errors
from mypy.message_registry import INVALID_PARAM_SPEC_LOCATION, INVALID_PARAM_SPEC_LOCATION_NOTE
from mypy.messages import format_type
from mypy.mixedtraverser import MixedTraverserVisitor
from mypy.nodes import Block, ClassDef, Context, FakeInfo, FuncItem, MypyFile
from mypy.options import Options
from mypy.scope import Scope
from mypy.subtypes import is_same_type, is_subtype
from mypy.types import (
AnyType,
CallableType,
Instance,
Parameters,
ParamSpecType,
TupleType,
Type,
TypeAliasType,
TypeOfAny,
TypeVarLikeType,
TypeVarTupleType,
TypeVarType,
UnboundType,
UnpackType,
flatten_nested_tuples,
get_proper_type,
get_proper_types,
split_with_prefix_and_suffix,
)
from mypy.typevartuples import erased_vars
class TypeArgumentAnalyzer(MixedTraverserVisitor):
def __init__(
self,
errors: Errors,
options: Options,
is_typeshed_file: bool,
named_type: Callable[[str, list[Type]], Instance],
) -> None:
super().__init__()
self.errors = errors
self.options = options
self.is_typeshed_file = is_typeshed_file
self.named_type = named_type
self.scope = Scope()
# Should we also analyze function definitions, or only module top-levels?
self.recurse_into_functions = True
# Keep track of the type aliases already visited. This is needed to avoid
# infinite recursion on types like A = Union[int, List[A]].
self.seen_aliases: set[TypeAliasType] = set()
def visit_mypy_file(self, o: MypyFile) -> None:
self.errors.set_file(o.path, o.fullname, scope=self.scope, options=self.options)
with self.scope.module_scope(o.fullname):
super().visit_mypy_file(o)
def visit_func(self, defn: FuncItem) -> None:
if not self.recurse_into_functions:
return
with self.scope.function_scope(defn):
super().visit_func(defn)
def visit_class_def(self, defn: ClassDef) -> None:
with self.scope.class_scope(defn.info):
super().visit_class_def(defn)
def visit_block(self, o: Block) -> None:
if not o.is_unreachable:
super().visit_block(o)
def visit_type_alias_type(self, t: TypeAliasType) -> None:
super().visit_type_alias_type(t)
if t in self.seen_aliases:
# Avoid infinite recursion on recursive type aliases.
# Note: it is fine to skip the aliases we have already seen in non-recursive
# types, since errors there have already been reported.
return
self.seen_aliases.add(t)
assert t.alias is not None, f"Unfixed type alias {t.type_ref}"
is_error, is_invalid = self.validate_args(
t.alias.name, tuple(t.args), t.alias.alias_tvars, t
)
if is_invalid:
# If there is an arity error (e.g. non-Parameters used for ParamSpec etc.),
# then it is safer to erase the arguments completely, to avoid crashes later.
# TODO: can we move this logic to typeanal.py?
t.args = erased_vars(t.alias.alias_tvars, TypeOfAny.from_error)
if not is_error:
# If there was already an error for the alias itself, there is no point in checking
# the expansion, most likely it will result in the same kind of error.
get_proper_type(t).accept(self)
def visit_tuple_type(self, t: TupleType) -> None:
t.items = flatten_nested_tuples(t.items)
# We could also normalize Tuple[*tuple[X, ...]] -> tuple[X, ...] like in
# expand_type() but we can't do this here since it is not a translator visitor,
# and we need to return an Instance instead of TupleType.
super().visit_tuple_type(t)
def visit_callable_type(self, t: CallableType) -> None:
super().visit_callable_type(t)
t.normalize_trivial_unpack()
def visit_instance(self, t: Instance) -> None:
super().visit_instance(t)
# Type argument counts were checked in the main semantic analyzer pass. We assume
# that the counts are correct here.
info = t.type
if isinstance(info, FakeInfo):
return # https://github.com/python/mypy/issues/11079
_, is_invalid = self.validate_args(info.name, t.args, info.defn.type_vars, t)
if is_invalid:
t.args = tuple(erased_vars(info.defn.type_vars, TypeOfAny.from_error))
if t.type.fullname == "builtins.tuple" and len(t.args) == 1:
# Normalize Tuple[*Tuple[X, ...], ...] -> Tuple[X, ...]
arg = t.args[0]
if isinstance(arg, UnpackType):
unpacked = get_proper_type(arg.type)
if isinstance(unpacked, Instance):
assert unpacked.type.fullname == "builtins.tuple"
t.args = unpacked.args
def validate_args(
self, name: str, args: tuple[Type, ...], type_vars: list[TypeVarLikeType], ctx: Context
) -> tuple[bool, bool]:
if any(isinstance(v, TypeVarTupleType) for v in type_vars):
prefix = next(i for (i, v) in enumerate(type_vars) if isinstance(v, TypeVarTupleType))
tvt = type_vars[prefix]
assert isinstance(tvt, TypeVarTupleType)
start, middle, end = split_with_prefix_and_suffix(
tuple(args), prefix, len(type_vars) - prefix - 1
)
args = start + (TupleType(list(middle), tvt.tuple_fallback),) + end
is_error = False
is_invalid = False
for (i, arg), tvar in zip(enumerate(args), type_vars):
if isinstance(tvar, TypeVarType):
if isinstance(arg, ParamSpecType):
is_invalid = True
self.fail(
INVALID_PARAM_SPEC_LOCATION.format(format_type(arg, self.options)),
ctx,
code=codes.VALID_TYPE,
)
self.note(
INVALID_PARAM_SPEC_LOCATION_NOTE.format(arg.name),
ctx,
code=codes.VALID_TYPE,
)
continue
if isinstance(arg, Parameters):
is_invalid = True
self.fail(
f"Cannot use {format_type(arg, self.options)} for regular type variable,"
" only for ParamSpec",
ctx,
code=codes.VALID_TYPE,
)
continue
if tvar.values:
if isinstance(arg, TypeVarType):
if self.in_type_alias_expr:
# Type aliases are allowed to use unconstrained type variables
# error will be checked at substitution point.
continue
arg_values = arg.values
if not arg_values:
is_error = True
self.fail(
message_registry.INVALID_TYPEVAR_AS_TYPEARG.format(arg.name, name),
ctx,
code=codes.TYPE_VAR,
)
continue
else:
arg_values = [arg]
if self.check_type_var_values(name, arg_values, tvar.name, tvar.values, ctx):
is_error = True
# Check against upper bound. Since it's object the vast majority of the time,
# add fast path to avoid a potentially slow subtype check.
upper_bound = tvar.upper_bound
object_upper_bound = (
type(upper_bound) is Instance
and upper_bound.type.fullname == "builtins.object"
)
if not object_upper_bound and not is_subtype(arg, upper_bound):
if self.in_type_alias_expr and isinstance(arg, TypeVarType):
# Type aliases are allowed to use unconstrained type variables
# error will be checked at substitution point.
continue
is_error = True
self.fail(
message_registry.INVALID_TYPEVAR_ARG_BOUND.format(
format_type(arg, self.options),
name,
format_type(upper_bound, self.options),
),
ctx,
code=codes.TYPE_VAR,
)
elif isinstance(tvar, ParamSpecType):
if not isinstance(
get_proper_type(arg), (ParamSpecType, Parameters, AnyType, UnboundType)
):
is_invalid = True
self.fail(
"Can only replace ParamSpec with a parameter types list or"
f" another ParamSpec, got {format_type(arg, self.options)}",
ctx,
code=codes.VALID_TYPE,
)
if is_invalid:
is_error = True
return is_error, is_invalid
def visit_unpack_type(self, typ: UnpackType) -> None:
super().visit_unpack_type(typ)
proper_type = get_proper_type(typ.type)
if isinstance(proper_type, TupleType):
return
if isinstance(proper_type, TypeVarTupleType):
return
# TODO: this should probably be .has_base("builtins.tuple"), also elsewhere. This is
# tricky however, since this needs map_instance_to_supertype() available in many places.
if isinstance(proper_type, Instance) and proper_type.type.fullname == "builtins.tuple":
return
if not isinstance(proper_type, (UnboundType, AnyType)):
# Avoid extra errors if there were some errors already. Also interpret plain Any
# as tuple[Any, ...] (this is better for the code in type checker).
self.fail(
message_registry.INVALID_UNPACK.format(format_type(proper_type, self.options)),
typ.type,
code=codes.VALID_TYPE,
)
typ.type = self.named_type("builtins.tuple", [AnyType(TypeOfAny.from_error)])
def check_type_var_values(
self, name: str, actuals: list[Type], arg_name: str, valids: list[Type], context: Context
) -> bool:
is_error = False
for actual in get_proper_types(actuals):
# We skip UnboundType here, since they may appear in defn.bases,
# the error will be caught when visiting info.bases, that have bound type
# variables.
if not isinstance(actual, (AnyType, UnboundType)) and not any(
is_same_type(actual, value) for value in valids
):
is_error = True
if len(actuals) > 1 or not isinstance(actual, Instance):
self.fail(
message_registry.INVALID_TYPEVAR_ARG_VALUE.format(name),
context,
code=codes.TYPE_VAR,
)
else:
class_name = f'"{name}"'
actual_type_name = f'"{actual.type.name}"'
self.fail(
message_registry.INCOMPATIBLE_TYPEVAR_VALUE.format(
arg_name, class_name, actual_type_name
),
context,
code=codes.TYPE_VAR,
)
return is_error
def fail(self, msg: str, context: Context, *, code: ErrorCode | None = None) -> None:
self.errors.report(context.line, context.column, msg, code=code)
def note(self, msg: str, context: Context, *, code: ErrorCode | None = None) -> None:
self.errors.report(context.line, context.column, msg, severity="note", code=code)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/semanal_typeargs.py
|
Python
|
NOASSERTION
| 12,646 |
"""Semantic analysis of TypedDict definitions."""
from __future__ import annotations
from typing import Final
from mypy import errorcodes as codes, message_registry
from mypy.errorcodes import ErrorCode
from mypy.expandtype import expand_type
from mypy.exprtotype import TypeTranslationError, expr_to_unanalyzed_type
from mypy.message_registry import TYPEDDICT_OVERRIDE_MERGE
from mypy.messages import MessageBuilder
from mypy.nodes import (
ARG_NAMED,
ARG_POS,
AssignmentStmt,
CallExpr,
ClassDef,
Context,
DictExpr,
EllipsisExpr,
Expression,
ExpressionStmt,
IndexExpr,
NameExpr,
PassStmt,
RefExpr,
Statement,
StrExpr,
TempNode,
TupleExpr,
TypedDictExpr,
TypeInfo,
)
from mypy.options import Options
from mypy.semanal_shared import (
SemanticAnalyzerInterface,
has_placeholder,
require_bool_literal_argument,
)
from mypy.state import state
from mypy.typeanal import check_for_explicit_any, has_any_from_unimported_type
from mypy.types import (
TPDICT_NAMES,
AnyType,
ReadOnlyType,
RequiredType,
Type,
TypedDictType,
TypeOfAny,
TypeVarLikeType,
)
TPDICT_CLASS_ERROR: Final = (
'Invalid statement in TypedDict definition; expected "field_name: field_type"'
)
class TypedDictAnalyzer:
def __init__(
self, options: Options, api: SemanticAnalyzerInterface, msg: MessageBuilder
) -> None:
self.options = options
self.api = api
self.msg = msg
def analyze_typeddict_classdef(self, defn: ClassDef) -> tuple[bool, TypeInfo | None]:
"""Analyze a class that may define a TypedDict.
Assume that base classes have been analyzed already.
Note: Unlike normal classes, we won't create a TypeInfo until
the whole definition of the TypeDict (including the body and all
key names and types) is complete. This is mostly because we
store the corresponding TypedDictType in the TypeInfo.
Return (is this a TypedDict, new TypeInfo). Specifics:
* If we couldn't finish due to incomplete reference anywhere in
the definition, return (True, None).
* If this is not a TypedDict, return (False, None).
"""
possible = False
for base_expr in defn.base_type_exprs:
if isinstance(base_expr, CallExpr):
base_expr = base_expr.callee
if isinstance(base_expr, IndexExpr):
base_expr = base_expr.base
if isinstance(base_expr, RefExpr):
self.api.accept(base_expr)
if base_expr.fullname in TPDICT_NAMES or self.is_typeddict(base_expr):
possible = True
if isinstance(base_expr.node, TypeInfo) and base_expr.node.is_final:
err = message_registry.CANNOT_INHERIT_FROM_FINAL
self.fail(err.format(base_expr.node.name).value, defn, code=err.code)
if not possible:
return False, None
existing_info = None
if isinstance(defn.analyzed, TypedDictExpr):
existing_info = defn.analyzed.info
if (
len(defn.base_type_exprs) == 1
and isinstance(defn.base_type_exprs[0], RefExpr)
and defn.base_type_exprs[0].fullname in TPDICT_NAMES
):
# Building a new TypedDict
fields, types, statements, required_keys, readonly_keys = (
self.analyze_typeddict_classdef_fields(defn)
)
if fields is None:
return True, None # Defer
if self.api.is_func_scope() and "@" not in defn.name:
defn.name += "@" + str(defn.line)
info = self.build_typeddict_typeinfo(
defn.name, fields, types, required_keys, readonly_keys, defn.line, existing_info
)
defn.analyzed = TypedDictExpr(info)
defn.analyzed.line = defn.line
defn.analyzed.column = defn.column
defn.defs.body = statements
return True, info
# Extending/merging existing TypedDicts
typeddict_bases: list[Expression] = []
typeddict_bases_set = set()
for expr in defn.base_type_exprs:
ok, maybe_type_info, _ = self.check_typeddict(expr, None, False)
if ok and maybe_type_info is not None:
# expr is a CallExpr
info = maybe_type_info
typeddict_bases_set.add(info.fullname)
typeddict_bases.append(expr)
elif isinstance(expr, RefExpr) and expr.fullname in TPDICT_NAMES:
if "TypedDict" not in typeddict_bases_set:
typeddict_bases_set.add("TypedDict")
else:
self.fail('Duplicate base class "TypedDict"', defn)
elif isinstance(expr, RefExpr) and self.is_typeddict(expr):
assert expr.fullname
if expr.fullname not in typeddict_bases_set:
typeddict_bases_set.add(expr.fullname)
typeddict_bases.append(expr)
else:
assert isinstance(expr.node, TypeInfo)
self.fail(f'Duplicate base class "{expr.node.name}"', defn)
elif isinstance(expr, IndexExpr) and self.is_typeddict(expr.base):
assert isinstance(expr.base, RefExpr)
assert expr.base.fullname
if expr.base.fullname not in typeddict_bases_set:
typeddict_bases_set.add(expr.base.fullname)
typeddict_bases.append(expr)
else:
assert isinstance(expr.base.node, TypeInfo)
self.fail(f'Duplicate base class "{expr.base.node.name}"', defn)
else:
self.fail("All bases of a new TypedDict must be TypedDict types", defn)
keys: list[str] = []
types = []
required_keys = set()
readonly_keys = set()
# Iterate over bases in reverse order so that leftmost base class' keys take precedence
for base in reversed(typeddict_bases):
self.add_keys_and_types_from_base(
base, keys, types, required_keys, readonly_keys, defn
)
(new_keys, new_types, new_statements, new_required_keys, new_readonly_keys) = (
self.analyze_typeddict_classdef_fields(defn, keys)
)
if new_keys is None:
return True, None # Defer
keys.extend(new_keys)
types.extend(new_types)
required_keys.update(new_required_keys)
readonly_keys.update(new_readonly_keys)
info = self.build_typeddict_typeinfo(
defn.name, keys, types, required_keys, readonly_keys, defn.line, existing_info
)
defn.analyzed = TypedDictExpr(info)
defn.analyzed.line = defn.line
defn.analyzed.column = defn.column
defn.defs.body = new_statements
return True, info
def add_keys_and_types_from_base(
self,
base: Expression,
keys: list[str],
types: list[Type],
required_keys: set[str],
readonly_keys: set[str],
ctx: Context,
) -> None:
base_args: list[Type] = []
if isinstance(base, RefExpr):
assert isinstance(base.node, TypeInfo)
info = base.node
elif isinstance(base, IndexExpr):
assert isinstance(base.base, RefExpr)
assert isinstance(base.base.node, TypeInfo)
info = base.base.node
args = self.analyze_base_args(base, ctx)
if args is None:
return
base_args = args
else:
assert isinstance(base, CallExpr)
assert isinstance(base.analyzed, TypedDictExpr)
info = base.analyzed.info
assert info.typeddict_type is not None
base_typed_dict = info.typeddict_type
base_items = base_typed_dict.items
valid_items = base_items.copy()
# Always fix invalid bases to avoid crashes.
tvars = info.defn.type_vars
if len(base_args) != len(tvars):
any_kind = TypeOfAny.from_omitted_generics
if base_args:
self.fail(f'Invalid number of type arguments for "{info.name}"', ctx)
any_kind = TypeOfAny.from_error
base_args = [AnyType(any_kind) for _ in tvars]
with state.strict_optional_set(self.options.strict_optional):
valid_items = self.map_items_to_base(valid_items, tvars, base_args)
for key in base_items:
if key in keys:
self.fail(TYPEDDICT_OVERRIDE_MERGE.format(key), ctx)
keys.extend(valid_items.keys())
types.extend(valid_items.values())
required_keys.update(base_typed_dict.required_keys)
readonly_keys.update(base_typed_dict.readonly_keys)
def analyze_base_args(self, base: IndexExpr, ctx: Context) -> list[Type] | None:
"""Analyze arguments of base type expressions as types.
We need to do this, because normal base class processing happens after
the TypedDict special-casing (plus we get a custom error message).
"""
base_args = []
if isinstance(base.index, TupleExpr):
args = base.index.items
else:
args = [base.index]
for arg_expr in args:
try:
type = expr_to_unanalyzed_type(arg_expr, self.options, self.api.is_stub_file)
except TypeTranslationError:
self.fail("Invalid TypedDict type argument", ctx)
return None
analyzed = self.api.anal_type(
type,
allow_typed_dict_special_forms=True,
allow_placeholder=not self.api.is_func_scope(),
)
if analyzed is None:
return None
base_args.append(analyzed)
return base_args
def map_items_to_base(
self, valid_items: dict[str, Type], tvars: list[TypeVarLikeType], base_args: list[Type]
) -> dict[str, Type]:
"""Map item types to how they would look in their base with type arguments applied.
Note it is safe to use expand_type() during semantic analysis, because it should never
(indirectly) call is_subtype().
"""
mapped_items = {}
for key in valid_items:
type_in_base = valid_items[key]
if not tvars:
mapped_items[key] = type_in_base
continue
# TODO: simple zip can't be used for variadic types.
mapped_items[key] = expand_type(
type_in_base, {t.id: a for (t, a) in zip(tvars, base_args)}
)
return mapped_items
def analyze_typeddict_classdef_fields(
self, defn: ClassDef, oldfields: list[str] | None = None
) -> tuple[list[str] | None, list[Type], list[Statement], set[str], set[str]]:
"""Analyze fields defined in a TypedDict class definition.
This doesn't consider inherited fields (if any). Also consider totality,
if given.
Return tuple with these items:
* List of keys (or None if found an incomplete reference --> deferral)
* List of types for each key
* List of statements from defn.defs.body that are legally allowed to be a
part of a TypedDict definition
* Set of required keys
"""
fields: list[str] = []
types: list[Type] = []
statements: list[Statement] = []
for stmt in defn.defs.body:
if not isinstance(stmt, AssignmentStmt):
# Still allow pass or ... (for empty TypedDict's) and docstrings
if isinstance(stmt, PassStmt) or (
isinstance(stmt, ExpressionStmt)
and isinstance(stmt.expr, (EllipsisExpr, StrExpr))
):
statements.append(stmt)
else:
defn.removed_statements.append(stmt)
self.fail(TPDICT_CLASS_ERROR, stmt)
elif len(stmt.lvalues) > 1 or not isinstance(stmt.lvalues[0], NameExpr):
# An assignment, but an invalid one.
defn.removed_statements.append(stmt)
self.fail(TPDICT_CLASS_ERROR, stmt)
else:
name = stmt.lvalues[0].name
if name in (oldfields or []):
self.fail(f'Overwriting TypedDict field "{name}" while extending', stmt)
if name in fields:
self.fail(f'Duplicate TypedDict key "{name}"', stmt)
continue
# Append stmt, name, and type in this case...
fields.append(name)
statements.append(stmt)
if stmt.unanalyzed_type is None:
types.append(AnyType(TypeOfAny.unannotated))
else:
analyzed = self.api.anal_type(
stmt.unanalyzed_type,
allow_typed_dict_special_forms=True,
allow_placeholder=not self.api.is_func_scope(),
prohibit_self_type="TypedDict item type",
)
if analyzed is None:
return None, [], [], set(), set() # Need to defer
types.append(analyzed)
if not has_placeholder(analyzed):
stmt.type = self.extract_meta_info(analyzed, stmt)[0]
# ...despite possible minor failures that allow further analysis.
if stmt.type is None or hasattr(stmt, "new_syntax") and not stmt.new_syntax:
self.fail(TPDICT_CLASS_ERROR, stmt)
elif not isinstance(stmt.rvalue, TempNode):
# x: int assigns rvalue to TempNode(AnyType())
self.fail("Right hand side values are not supported in TypedDict", stmt)
total: bool | None = True
if "total" in defn.keywords:
total = require_bool_literal_argument(self.api, defn.keywords["total"], "total", True)
if defn.keywords and defn.keywords.keys() != {"total"}:
for_function = ' for "__init_subclass__" of "TypedDict"'
for key in defn.keywords:
if key == "total":
continue
self.msg.unexpected_keyword_argument_for_function(for_function, key, defn)
res_types = []
readonly_keys = set()
required_keys = set()
for field, t in zip(fields, types):
typ, required, readonly = self.extract_meta_info(t)
res_types.append(typ)
if (total or required is True) and required is not False:
required_keys.add(field)
if readonly:
readonly_keys.add(field)
return fields, res_types, statements, required_keys, readonly_keys
def extract_meta_info(
self, typ: Type, context: Context | None = None
) -> tuple[Type, bool | None, bool]:
"""Unwrap all metadata types."""
is_required = None # default, no modification
readonly = False # by default all is mutable
seen_required = False
seen_readonly = False
while isinstance(typ, (RequiredType, ReadOnlyType)):
if isinstance(typ, RequiredType):
if context is not None and seen_required:
self.fail(
'"{}" type cannot be nested'.format(
"Required[]" if typ.required else "NotRequired[]"
),
context,
code=codes.VALID_TYPE,
)
is_required = typ.required
seen_required = True
typ = typ.item
if isinstance(typ, ReadOnlyType):
if context is not None and seen_readonly:
self.fail('"ReadOnly[]" type cannot be nested', context, code=codes.VALID_TYPE)
readonly = True
seen_readonly = True
typ = typ.item
return typ, is_required, readonly
def check_typeddict(
self, node: Expression, var_name: str | None, is_func_scope: bool
) -> tuple[bool, TypeInfo | None, list[TypeVarLikeType]]:
"""Check if a call defines a TypedDict.
The optional var_name argument is the name of the variable to
which this is assigned, if any.
Return a pair (is it a typed dict, corresponding TypeInfo).
If the definition is invalid but looks like a TypedDict,
report errors but return (some) TypeInfo. If some type is not ready,
return (True, None).
"""
if not isinstance(node, CallExpr):
return False, None, []
call = node
callee = call.callee
if not isinstance(callee, RefExpr):
return False, None, []
fullname = callee.fullname
if fullname not in TPDICT_NAMES:
return False, None, []
res = self.parse_typeddict_args(call)
if res is None:
# This is a valid typed dict, but some type is not ready.
# The caller should defer this until next iteration.
return True, None, []
name, items, types, total, tvar_defs, ok = res
if not ok:
# Error. Construct dummy return value.
if var_name:
name = var_name
if is_func_scope:
name += "@" + str(call.line)
else:
name = var_name = "TypedDict@" + str(call.line)
info = self.build_typeddict_typeinfo(name, [], [], set(), set(), call.line, None)
else:
if var_name is not None and name != var_name:
self.fail(
'First argument "{}" to TypedDict() does not match variable name "{}"'.format(
name, var_name
),
node,
code=codes.NAME_MATCH,
)
if name != var_name or is_func_scope:
# Give it a unique name derived from the line number.
name += "@" + str(call.line)
required_keys = {
field
for (field, t) in zip(items, types)
if (total or (isinstance(t, RequiredType) and t.required))
and not (isinstance(t, RequiredType) and not t.required)
}
readonly_keys = {
field for (field, t) in zip(items, types) if isinstance(t, ReadOnlyType)
}
types = [ # unwrap Required[T] or ReadOnly[T] to just T
t.item if isinstance(t, (RequiredType, ReadOnlyType)) else t for t in types
]
# Perform various validations after unwrapping.
for t in types:
check_for_explicit_any(
t, self.options, self.api.is_typeshed_stub_file, self.msg, context=call
)
if self.options.disallow_any_unimported:
for t in types:
if has_any_from_unimported_type(t):
self.msg.unimported_type_becomes_any("Type of a TypedDict key", t, call)
existing_info = None
if isinstance(node.analyzed, TypedDictExpr):
existing_info = node.analyzed.info
info = self.build_typeddict_typeinfo(
name, items, types, required_keys, readonly_keys, call.line, existing_info
)
info.line = node.line
# Store generated TypeInfo under both names, see semanal_namedtuple for more details.
if name != var_name or is_func_scope:
self.api.add_symbol_skip_local(name, info)
if var_name:
self.api.add_symbol(var_name, info, node)
call.analyzed = TypedDictExpr(info)
call.analyzed.set_line(call)
return True, info, tvar_defs
def parse_typeddict_args(
self, call: CallExpr
) -> tuple[str, list[str], list[Type], bool, list[TypeVarLikeType], bool] | None:
"""Parse typed dict call expression.
Return names, types, totality, was there an error during parsing.
If some type is not ready, return None.
"""
# TODO: Share code with check_argument_count in checkexpr.py?
args = call.args
if len(args) < 2:
return self.fail_typeddict_arg("Too few arguments for TypedDict()", call)
if len(args) > 3:
return self.fail_typeddict_arg("Too many arguments for TypedDict()", call)
# TODO: Support keyword arguments
if call.arg_kinds not in ([ARG_POS, ARG_POS], [ARG_POS, ARG_POS, ARG_NAMED]):
return self.fail_typeddict_arg("Unexpected arguments to TypedDict()", call)
if len(args) == 3 and call.arg_names[2] != "total":
return self.fail_typeddict_arg(
f'Unexpected keyword argument "{call.arg_names[2]}" for "TypedDict"', call
)
if not isinstance(args[0], StrExpr):
return self.fail_typeddict_arg(
"TypedDict() expects a string literal as the first argument", call
)
if not isinstance(args[1], DictExpr):
return self.fail_typeddict_arg(
"TypedDict() expects a dictionary literal as the second argument", call
)
total: bool | None = True
if len(args) == 3:
total = require_bool_literal_argument(self.api, call.args[2], "total")
if total is None:
return "", [], [], True, [], False
dictexpr = args[1]
tvar_defs = self.api.get_and_bind_all_tvars([t for k, t in dictexpr.items])
res = self.parse_typeddict_fields_with_types(dictexpr.items, call)
if res is None:
# One of the types is not ready, defer.
return None
items, types, ok = res
assert total is not None
return args[0].value, items, types, total, tvar_defs, ok
def parse_typeddict_fields_with_types(
self, dict_items: list[tuple[Expression | None, Expression]], context: Context
) -> tuple[list[str], list[Type], bool] | None:
"""Parse typed dict items passed as pairs (name expression, type expression).
Return names, types, was there an error. If some type is not ready, return None.
"""
seen_keys = set()
items: list[str] = []
types: list[Type] = []
for field_name_expr, field_type_expr in dict_items:
if isinstance(field_name_expr, StrExpr):
key = field_name_expr.value
items.append(key)
if key in seen_keys:
self.fail(f'Duplicate TypedDict key "{key}"', field_name_expr)
seen_keys.add(key)
else:
name_context = field_name_expr or field_type_expr
self.fail_typeddict_arg("Invalid TypedDict() field name", name_context)
return [], [], False
try:
type = expr_to_unanalyzed_type(
field_type_expr, self.options, self.api.is_stub_file
)
except TypeTranslationError:
self.fail_typeddict_arg("Use dict literal for nested TypedDict", field_type_expr)
return [], [], False
analyzed = self.api.anal_type(
type,
allow_typed_dict_special_forms=True,
allow_placeholder=not self.api.is_func_scope(),
prohibit_self_type="TypedDict item type",
)
if analyzed is None:
return None
types.append(analyzed)
return items, types, True
def fail_typeddict_arg(
self, message: str, context: Context
) -> tuple[str, list[str], list[Type], bool, list[TypeVarLikeType], bool]:
self.fail(message, context)
return "", [], [], True, [], False
def build_typeddict_typeinfo(
self,
name: str,
items: list[str],
types: list[Type],
required_keys: set[str],
readonly_keys: set[str],
line: int,
existing_info: TypeInfo | None,
) -> TypeInfo:
# Prefer typing then typing_extensions if available.
fallback = (
self.api.named_type_or_none("typing._TypedDict", [])
or self.api.named_type_or_none("typing_extensions._TypedDict", [])
or self.api.named_type_or_none("mypy_extensions._TypedDict", [])
)
assert fallback is not None
info = existing_info or self.api.basic_new_typeinfo(name, fallback, line)
typeddict_type = TypedDictType(
dict(zip(items, types)), required_keys, readonly_keys, fallback
)
if info.special_alias and has_placeholder(info.special_alias.target):
self.api.process_placeholder(
None, "TypedDict item", info, force_progress=typeddict_type != info.typeddict_type
)
info.update_typeddict_type(typeddict_type)
return info
# Helpers
def is_typeddict(self, expr: Expression) -> bool:
return (
isinstance(expr, RefExpr)
and isinstance(expr.node, TypeInfo)
and expr.node.typeddict_type is not None
)
def fail(self, msg: str, ctx: Context, *, code: ErrorCode | None = None) -> None:
self.api.fail(msg, ctx, code=code)
def note(self, msg: str, ctx: Context) -> None:
self.api.note(msg, ctx)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/semanal_typeddict.py
|
Python
|
NOASSERTION
| 25,817 |
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/server/__init__.py
|
Python
|
NOASSERTION
| 0 |
|
"""Utilities for comparing two versions of a module symbol table.
The goal is to find which AST nodes have externally visible changes, so
that we can fire triggers and re-process other parts of the program
that are stale because of the changes.
Only look at detail at definitions at the current module -- don't
recurse into other modules.
A summary of the module contents:
* snapshot_symbol_table(...) creates an opaque snapshot description of a
module/class symbol table (recursing into nested class symbol tables).
* compare_symbol_table_snapshots(...) compares two snapshots for the same
module id and returns fully qualified names of differences (which act as
triggers).
To compare two versions of a module symbol table, take snapshots of both
versions and compare the snapshots. The use of snapshots makes it easy to
compare two versions of the *same* symbol table that is being mutated.
Summary of how this works for certain kinds of differences:
* If a symbol table node is deleted or added (only present in old/new version
of the symbol table), it is considered different, of course.
* If a symbol table node refers to a different sort of thing in the new version,
it is considered different (for example, if a class is replaced with a
function).
* If the signature of a function has changed, it is considered different.
* If the type of a variable changes, it is considered different.
* If the MRO of a class changes, or a non-generic class is turned into a
generic class, the class is considered different (there are other such "big"
differences that cause a class to be considered changed). However, just changes
to attributes or methods don't generally constitute a difference at the
class level -- these are handled at attribute level (say, 'mod.Cls.method'
is different rather than 'mod.Cls' being different).
* If an imported name targets a different name (say, 'from x import y' is
replaced with 'from z import y'), the name in the module is considered
different. If the target of an import continues to have the same name,
but it's specifics change, this doesn't mean that the imported name is
treated as changed. Say, there is 'from x import y' in 'm', and the
type of 'x.y' has changed. This doesn't mean that that 'm.y' is considered
changed. Instead, processing the difference in 'm' will be handled through
fine-grained dependencies.
"""
from __future__ import annotations
from typing import Sequence, Tuple, Union
from typing_extensions import TypeAlias as _TypeAlias
from mypy.expandtype import expand_type
from mypy.nodes import (
UNBOUND_IMPORTED,
Decorator,
FuncBase,
FuncDef,
FuncItem,
MypyFile,
OverloadedFuncDef,
ParamSpecExpr,
SymbolNode,
SymbolTable,
TypeAlias,
TypeInfo,
TypeVarExpr,
TypeVarTupleExpr,
Var,
)
from mypy.semanal_shared import find_dataclass_transform_spec
from mypy.state import state
from mypy.types import (
AnyType,
CallableType,
DeletedType,
ErasedType,
Instance,
LiteralType,
NoneType,
Overloaded,
Parameters,
ParamSpecType,
PartialType,
TupleType,
Type,
TypeAliasType,
TypedDictType,
TypeType,
TypeVarId,
TypeVarLikeType,
TypeVarTupleType,
TypeVarType,
TypeVisitor,
UnboundType,
UninhabitedType,
UnionType,
UnpackType,
)
from mypy.util import get_prefix
# Snapshot representation of a symbol table node or type. The representation is
# opaque -- the only supported operations are comparing for equality and
# hashing (latter for type snapshots only). Snapshots can contain primitive
# objects, nested tuples, lists and dictionaries and primitive objects (type
# snapshots are immutable).
#
# For example, the snapshot of the 'int' type is ('Instance', 'builtins.int', ()).
# Type snapshots are strict, they must be hashable and ordered (e.g. for Unions).
Primitive: _TypeAlias = Union[str, float, int, bool] # float is for Literal[3.14] support.
SnapshotItem: _TypeAlias = Tuple[Union[Primitive, "SnapshotItem"], ...]
# Symbol snapshots can be more lenient.
SymbolSnapshot: _TypeAlias = Tuple[object, ...]
def compare_symbol_table_snapshots(
name_prefix: str, snapshot1: dict[str, SymbolSnapshot], snapshot2: dict[str, SymbolSnapshot]
) -> set[str]:
"""Return names that are different in two snapshots of a symbol table.
Only shallow (intra-module) differences are considered. References to things defined
outside the module are compared based on the name of the target only.
Recurse into class symbol tables (if the class is defined in the target module).
Return a set of fully-qualified names (e.g., 'mod.func' or 'mod.Class.method').
"""
# Find names only defined only in one version.
names1 = {f"{name_prefix}.{name}" for name in snapshot1}
names2 = {f"{name_prefix}.{name}" for name in snapshot2}
triggers = names1 ^ names2
# Look for names defined in both versions that are different.
for name in set(snapshot1.keys()) & set(snapshot2.keys()):
item1 = snapshot1[name]
item2 = snapshot2[name]
kind1 = item1[0]
kind2 = item2[0]
item_name = f"{name_prefix}.{name}"
if kind1 != kind2:
# Different kind of node in two snapshots -> trivially different.
triggers.add(item_name)
elif kind1 == "TypeInfo":
if item1[:-1] != item2[:-1]:
# Record major difference (outside class symbol tables).
triggers.add(item_name)
# Look for differences in nested class symbol table entries.
assert isinstance(item1[-1], dict)
assert isinstance(item2[-1], dict)
triggers |= compare_symbol_table_snapshots(item_name, item1[-1], item2[-1])
else:
# Shallow node (no interesting internal structure). Just use equality.
if snapshot1[name] != snapshot2[name]:
triggers.add(item_name)
return triggers
def snapshot_symbol_table(name_prefix: str, table: SymbolTable) -> dict[str, SymbolSnapshot]:
"""Create a snapshot description that represents the state of a symbol table.
The snapshot has a representation based on nested tuples and dicts
that makes it easy and fast to find differences.
Only "shallow" state is included in the snapshot -- references to
things defined in other modules are represented just by the names of
the targets.
"""
result: dict[str, SymbolSnapshot] = {}
for name, symbol in table.items():
node = symbol.node
# TODO: cross_ref?
fullname = node.fullname if node else None
common = (fullname, symbol.kind, symbol.module_public)
if isinstance(node, MypyFile):
# This is a cross-reference to another module.
# If the reference is busted because the other module is missing,
# the node will be a "stale_info" TypeInfo produced by fixup,
# but that doesn't really matter to us here.
result[name] = ("Moduleref", common)
elif isinstance(node, TypeVarExpr):
result[name] = (
"TypeVar",
node.variance,
[snapshot_type(value) for value in node.values],
snapshot_type(node.upper_bound),
snapshot_type(node.default),
)
elif isinstance(node, TypeAlias):
result[name] = (
"TypeAlias",
snapshot_types(node.alias_tvars),
node.normalized,
node.no_args,
snapshot_optional_type(node.target),
)
elif isinstance(node, ParamSpecExpr):
result[name] = (
"ParamSpec",
node.variance,
snapshot_type(node.upper_bound),
snapshot_type(node.default),
)
elif isinstance(node, TypeVarTupleExpr):
result[name] = (
"TypeVarTuple",
node.variance,
snapshot_type(node.upper_bound),
snapshot_type(node.default),
)
else:
assert symbol.kind != UNBOUND_IMPORTED
if node and get_prefix(node.fullname) != name_prefix:
# This is a cross-reference to a node defined in another module.
# Include the node kind (FuncDef, Decorator, TypeInfo, ...), so that we will
# reprocess when a *new* node is created instead of merging an existing one.
result[name] = ("CrossRef", common, type(node).__name__)
else:
result[name] = snapshot_definition(node, common)
return result
def snapshot_definition(node: SymbolNode | None, common: SymbolSnapshot) -> SymbolSnapshot:
"""Create a snapshot description of a symbol table node.
The representation is nested tuples and dicts. Only externally
visible attributes are included.
"""
if isinstance(node, FuncBase):
# TODO: info
if node.type:
signature = snapshot_type(node.type)
else:
signature = snapshot_untyped_signature(node)
impl: FuncDef | None = None
if isinstance(node, FuncDef):
impl = node
elif isinstance(node, OverloadedFuncDef) and node.impl:
impl = node.impl.func if isinstance(node.impl, Decorator) else node.impl
is_trivial_body = impl.is_trivial_body if impl else False
dataclass_transform_spec = find_dataclass_transform_spec(node)
return (
"Func",
common,
node.is_property,
node.is_final,
node.is_class,
node.is_static,
signature,
is_trivial_body,
dataclass_transform_spec.serialize() if dataclass_transform_spec is not None else None,
)
elif isinstance(node, Var):
return ("Var", common, snapshot_optional_type(node.type), node.is_final)
elif isinstance(node, Decorator):
# Note that decorated methods are represented by Decorator instances in
# a symbol table since we need to preserve information about the
# decorated function (whether it's a class function, for
# example). Top-level decorated functions, however, are represented by
# the corresponding Var node, since that happens to provide enough
# context.
return (
"Decorator",
node.is_overload,
snapshot_optional_type(node.var.type),
snapshot_definition(node.func, common),
)
elif isinstance(node, TypeInfo):
dataclass_transform_spec = node.dataclass_transform_spec
if dataclass_transform_spec is None:
dataclass_transform_spec = find_dataclass_transform_spec(node)
attrs = (
node.is_abstract,
node.is_enum,
node.is_protocol,
node.fallback_to_any,
node.meta_fallback_to_any,
node.is_named_tuple,
node.is_newtype,
# We need this to e.g. trigger metaclass calculation in subclasses.
snapshot_optional_type(node.metaclass_type),
snapshot_optional_type(node.tuple_type),
snapshot_optional_type(node.typeddict_type),
[base.fullname for base in node.mro],
# Note that the structure of type variables is a part of the external interface,
# since creating instances might fail, for example:
# T = TypeVar('T', bound=int)
# class C(Generic[T]):
# ...
# x: C[str] <- this is invalid, and needs to be re-checked if `T` changes.
# An alternative would be to create both deps: <...> -> C, and <...> -> <C>,
# but this currently seems a bit ad hoc.
tuple(snapshot_type(tdef) for tdef in node.defn.type_vars),
[snapshot_type(base) for base in node.bases],
[snapshot_type(p) for p in node._promote],
dataclass_transform_spec.serialize() if dataclass_transform_spec is not None else None,
)
prefix = node.fullname
symbol_table = snapshot_symbol_table(prefix, node.names)
# Special dependency for abstract attribute handling.
symbol_table["(abstract)"] = ("Abstract", tuple(sorted(node.abstract_attributes)))
return ("TypeInfo", common, attrs, symbol_table)
else:
# Other node types are handled elsewhere.
assert False, type(node)
def snapshot_type(typ: Type) -> SnapshotItem:
"""Create a snapshot representation of a type using nested tuples."""
return typ.accept(SnapshotTypeVisitor())
def snapshot_optional_type(typ: Type | None) -> SnapshotItem:
if typ:
return snapshot_type(typ)
else:
return ("<not set>",)
def snapshot_types(types: Sequence[Type]) -> SnapshotItem:
return tuple(snapshot_type(item) for item in types)
def snapshot_simple_type(typ: Type) -> SnapshotItem:
return (type(typ).__name__,)
def encode_optional_str(s: str | None) -> str:
if s is None:
return "<None>"
else:
return s
class SnapshotTypeVisitor(TypeVisitor[SnapshotItem]):
"""Creates a read-only, self-contained snapshot of a type object.
Properties of a snapshot:
- Contains (nested) tuples and other immutable primitive objects only.
- References to AST nodes are replaced with full names of targets.
- Has no references to mutable or non-primitive objects.
- Two snapshots represent the same object if and only if they are
equal.
- Results must be sortable. It's important that tuples have
consistent types and can't arbitrarily mix str and None values,
for example, since they can't be compared.
"""
def visit_unbound_type(self, typ: UnboundType) -> SnapshotItem:
return (
"UnboundType",
typ.name,
typ.optional,
typ.empty_tuple_index,
snapshot_types(typ.args),
)
def visit_any(self, typ: AnyType) -> SnapshotItem:
return snapshot_simple_type(typ)
def visit_none_type(self, typ: NoneType) -> SnapshotItem:
return snapshot_simple_type(typ)
def visit_uninhabited_type(self, typ: UninhabitedType) -> SnapshotItem:
return snapshot_simple_type(typ)
def visit_erased_type(self, typ: ErasedType) -> SnapshotItem:
return snapshot_simple_type(typ)
def visit_deleted_type(self, typ: DeletedType) -> SnapshotItem:
return snapshot_simple_type(typ)
def visit_instance(self, typ: Instance) -> SnapshotItem:
extra_attrs: SnapshotItem
if typ.extra_attrs:
extra_attrs = (
tuple(sorted((k, v.accept(self)) for k, v in typ.extra_attrs.attrs.items())),
tuple(typ.extra_attrs.immutable),
)
else:
extra_attrs = ()
return (
"Instance",
encode_optional_str(typ.type.fullname),
snapshot_types(typ.args),
("None",) if typ.last_known_value is None else snapshot_type(typ.last_known_value),
extra_attrs,
)
def visit_type_var(self, typ: TypeVarType) -> SnapshotItem:
return (
"TypeVar",
typ.name,
typ.fullname,
typ.id.raw_id,
typ.id.meta_level,
snapshot_types(typ.values),
snapshot_type(typ.upper_bound),
snapshot_type(typ.default),
typ.variance,
)
def visit_param_spec(self, typ: ParamSpecType) -> SnapshotItem:
return (
"ParamSpec",
typ.id.raw_id,
typ.id.meta_level,
typ.flavor,
snapshot_type(typ.upper_bound),
snapshot_type(typ.default),
)
def visit_type_var_tuple(self, typ: TypeVarTupleType) -> SnapshotItem:
return (
"TypeVarTupleType",
typ.id.raw_id,
typ.id.meta_level,
snapshot_type(typ.upper_bound),
snapshot_type(typ.default),
)
def visit_unpack_type(self, typ: UnpackType) -> SnapshotItem:
return ("UnpackType", snapshot_type(typ.type))
def visit_parameters(self, typ: Parameters) -> SnapshotItem:
return (
"Parameters",
snapshot_types(typ.arg_types),
tuple(encode_optional_str(name) for name in typ.arg_names),
tuple(k.value for k in typ.arg_kinds),
)
def visit_callable_type(self, typ: CallableType) -> SnapshotItem:
if typ.is_generic():
typ = self.normalize_callable_variables(typ)
return (
"CallableType",
snapshot_types(typ.arg_types),
snapshot_type(typ.ret_type),
tuple(encode_optional_str(name) for name in typ.arg_names),
tuple(k.value for k in typ.arg_kinds),
typ.is_type_obj(),
typ.is_ellipsis_args,
snapshot_types(typ.variables),
)
def normalize_callable_variables(self, typ: CallableType) -> CallableType:
"""Normalize all type variable ids to run from -1 to -len(variables)."""
tvs = []
tvmap: dict[TypeVarId, Type] = {}
for i, v in enumerate(typ.variables):
tid = TypeVarId(-1 - i)
if isinstance(v, TypeVarType):
tv: TypeVarLikeType = v.copy_modified(id=tid)
elif isinstance(v, TypeVarTupleType):
tv = v.copy_modified(id=tid)
else:
assert isinstance(v, ParamSpecType)
tv = v.copy_modified(id=tid)
tvs.append(tv)
tvmap[v.id] = tv
with state.strict_optional_set(True):
return expand_type(typ, tvmap).copy_modified(variables=tvs)
def visit_tuple_type(self, typ: TupleType) -> SnapshotItem:
return ("TupleType", snapshot_types(typ.items))
def visit_typeddict_type(self, typ: TypedDictType) -> SnapshotItem:
items = tuple((key, snapshot_type(item_type)) for key, item_type in typ.items.items())
required = tuple(sorted(typ.required_keys))
readonly = tuple(sorted(typ.readonly_keys))
return ("TypedDictType", items, required, readonly)
def visit_literal_type(self, typ: LiteralType) -> SnapshotItem:
return ("LiteralType", snapshot_type(typ.fallback), typ.value)
def visit_union_type(self, typ: UnionType) -> SnapshotItem:
# Sort and remove duplicates so that we can use equality to test for
# equivalent union type snapshots.
items = {snapshot_type(item) for item in typ.items}
normalized = tuple(sorted(items))
return ("UnionType", normalized)
def visit_overloaded(self, typ: Overloaded) -> SnapshotItem:
return ("Overloaded", snapshot_types(typ.items))
def visit_partial_type(self, typ: PartialType) -> SnapshotItem:
# A partial type is not fully defined, so the result is indeterminate. We shouldn't
# get here.
raise RuntimeError
def visit_type_type(self, typ: TypeType) -> SnapshotItem:
return ("TypeType", snapshot_type(typ.item))
def visit_type_alias_type(self, typ: TypeAliasType) -> SnapshotItem:
assert typ.alias is not None
return ("TypeAliasType", typ.alias.fullname, snapshot_types(typ.args))
def snapshot_untyped_signature(func: OverloadedFuncDef | FuncItem) -> SymbolSnapshot:
"""Create a snapshot of the signature of a function that has no explicit signature.
If the arguments to a function without signature change, it must be
considered as different. We have this special casing since we don't store
the implicit signature anywhere, and we'd rather not construct new
Callable objects in this module (the idea is to only read properties of
the AST here).
"""
if isinstance(func, FuncItem):
return (tuple(func.arg_names), tuple(func.arg_kinds))
else:
result: list[SymbolSnapshot] = []
for item in func.items:
if isinstance(item, Decorator):
if item.var.type:
result.append(snapshot_type(item.var.type))
else:
result.append(("DecoratorWithoutType",))
else:
result.append(snapshot_untyped_signature(item))
return tuple(result)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/server/astdiff.py
|
Python
|
NOASSERTION
| 20,528 |
"""Merge a new version of a module AST and symbol table to older versions of those.
When the source code of a module has a change in fine-grained incremental mode,
we build a new AST from the updated source. However, other parts of the program
may have direct references to parts of the old AST (namely, those nodes exposed
in the module symbol table). The merge operation changes the identities of new
AST nodes that have a correspondence in the old AST to the old ones so that
existing cross-references in other modules will continue to point to the correct
nodes. Also internal cross-references within the new AST are replaced. AST nodes
that aren't externally visible will get new, distinct object identities. This
applies to most expression and statement nodes, for example.
We perform this merge operation so that we don't have to update all
external references (which would be slow and fragile) or always perform
translation when looking up references (which would be hard to retrofit).
The AST merge operation is performed after semantic analysis. Semantic
analysis has to deal with potentially multiple aliases to certain AST
nodes (in particular, MypyFile nodes). Type checking assumes that we
don't have multiple variants of a single AST node visible to the type
checker.
Discussion of some notable special cases:
* If a node is replaced with a different kind of node (say, a function is
replaced with a class), we don't perform the merge. Fine-grained dependencies
will be used to rebind all references to the node.
* If a function is replaced with another function with an identical signature,
call sites continue to point to the same object (by identity) and don't need
to be reprocessed. Similarly, if a class is replaced with a class that is
sufficiently similar (MRO preserved, etc.), class references don't need any
processing. A typical incremental update to a file only changes a few
externally visible things in a module, and this means that often only few
external references need any processing, even if the modified module is large.
* A no-op update of a module should not require any processing outside the
module, since all relevant object identities are preserved.
* The AST diff operation (mypy.server.astdiff) and the top-level fine-grained
incremental logic (mypy.server.update) handle the cases where the new AST has
differences from the old one that may need to be propagated to elsewhere in the
program.
See the main entry point merge_asts for more details.
"""
from __future__ import annotations
from typing import TypeVar, cast
from mypy.nodes import (
MDEF,
AssertTypeExpr,
AssignmentStmt,
Block,
CallExpr,
CastExpr,
ClassDef,
EnumCallExpr,
FuncBase,
FuncDef,
LambdaExpr,
MemberExpr,
MypyFile,
NamedTupleExpr,
NameExpr,
NewTypeExpr,
OverloadedFuncDef,
RefExpr,
Statement,
SuperExpr,
SymbolNode,
SymbolTable,
TypeAlias,
TypedDictExpr,
TypeInfo,
Var,
)
from mypy.traverser import TraverserVisitor
from mypy.types import (
AnyType,
CallableArgument,
CallableType,
DeletedType,
EllipsisType,
ErasedType,
Instance,
LiteralType,
NoneType,
Overloaded,
Parameters,
ParamSpecType,
PartialType,
PlaceholderType,
RawExpressionType,
SyntheticTypeVisitor,
TupleType,
Type,
TypeAliasType,
TypedDictType,
TypeList,
TypeType,
TypeVarTupleType,
TypeVarType,
UnboundType,
UninhabitedType,
UnionType,
UnpackType,
)
from mypy.typestate import type_state
from mypy.util import get_prefix, replace_object_state
def merge_asts(
old: MypyFile, old_symbols: SymbolTable, new: MypyFile, new_symbols: SymbolTable
) -> None:
"""Merge a new version of a module AST to a previous version.
The main idea is to preserve the identities of externally visible
nodes in the old AST (that have a corresponding node in the new AST).
All old node state (outside identity) will come from the new AST.
When this returns, 'old' will refer to the merged AST, but 'new_symbols'
will be the new symbol table. 'new' and 'old_symbols' will no longer be
valid.
"""
assert new.fullname == old.fullname
# Find the mapping from new to old node identities for all nodes
# whose identities should be preserved.
replacement_map = replacement_map_from_symbol_table(
old_symbols, new_symbols, prefix=old.fullname
)
# Also replace references to the new MypyFile node.
replacement_map[new] = old
# Perform replacements to everywhere within the new AST (not including symbol
# tables).
node = replace_nodes_in_ast(new, replacement_map)
assert node is old
# Also replace AST node references in the *new* symbol table (we'll
# continue to use the new symbol table since it has all the new definitions
# that have no correspondence in the old AST).
replace_nodes_in_symbol_table(new_symbols, replacement_map)
def replacement_map_from_symbol_table(
old: SymbolTable, new: SymbolTable, prefix: str
) -> dict[SymbolNode, SymbolNode]:
"""Create a new-to-old object identity map by comparing two symbol table revisions.
Both symbol tables must refer to revisions of the same module id. The symbol tables
are compared recursively (recursing into nested class symbol tables), but only within
the given module prefix. Don't recurse into other modules accessible through the symbol
table.
"""
replacements: dict[SymbolNode, SymbolNode] = {}
for name, node in old.items():
if name in new and (
node.kind == MDEF or node.node and get_prefix(node.node.fullname) == prefix
):
new_node = new[name]
if (
type(new_node.node) == type(node.node) # noqa: E721
and new_node.node
and node.node
and new_node.node.fullname == node.node.fullname
and new_node.kind == node.kind
):
replacements[new_node.node] = node.node
if isinstance(node.node, TypeInfo) and isinstance(new_node.node, TypeInfo):
type_repl = replacement_map_from_symbol_table(
node.node.names, new_node.node.names, prefix
)
replacements.update(type_repl)
if node.node.special_alias and new_node.node.special_alias:
replacements[new_node.node.special_alias] = node.node.special_alias
return replacements
def replace_nodes_in_ast(
node: SymbolNode, replacements: dict[SymbolNode, SymbolNode]
) -> SymbolNode:
"""Replace all references to replacement map keys within an AST node, recursively.
Also replace the *identity* of any nodes that have replacements. Return the
*replaced* version of the argument node (which may have a different identity, if
it's included in the replacement map).
"""
visitor = NodeReplaceVisitor(replacements)
node.accept(visitor)
return replacements.get(node, node)
SN = TypeVar("SN", bound=SymbolNode)
class NodeReplaceVisitor(TraverserVisitor):
"""Transform some nodes to new identities in an AST.
Only nodes that live in the symbol table may be
replaced, which simplifies the implementation some. Also
replace all references to the old identities.
"""
def __init__(self, replacements: dict[SymbolNode, SymbolNode]) -> None:
self.replacements = replacements
def visit_mypy_file(self, node: MypyFile) -> None:
node = self.fixup(node)
node.defs = self.replace_statements(node.defs)
super().visit_mypy_file(node)
def visit_block(self, node: Block) -> None:
node.body = self.replace_statements(node.body)
super().visit_block(node)
def visit_func_def(self, node: FuncDef) -> None:
node = self.fixup(node)
self.process_base_func(node)
super().visit_func_def(node)
def visit_overloaded_func_def(self, node: OverloadedFuncDef) -> None:
self.process_base_func(node)
super().visit_overloaded_func_def(node)
def visit_class_def(self, node: ClassDef) -> None:
# TODO additional things?
node.info = self.fixup_and_reset_typeinfo(node.info)
node.defs.body = self.replace_statements(node.defs.body)
info = node.info
for tv in node.type_vars:
if isinstance(tv, TypeVarType):
self.process_type_var_def(tv)
if info:
if info.is_named_tuple:
self.process_synthetic_type_info(info)
else:
self.process_type_info(info)
super().visit_class_def(node)
def process_base_func(self, node: FuncBase) -> None:
self.fixup_type(node.type)
node.info = self.fixup(node.info)
if node.unanalyzed_type:
# Unanalyzed types can have AST node references
self.fixup_type(node.unanalyzed_type)
def process_type_var_def(self, tv: TypeVarType) -> None:
for value in tv.values:
self.fixup_type(value)
self.fixup_type(tv.upper_bound)
self.fixup_type(tv.default)
def process_param_spec_def(self, tv: ParamSpecType) -> None:
self.fixup_type(tv.upper_bound)
self.fixup_type(tv.default)
def process_type_var_tuple_def(self, tv: TypeVarTupleType) -> None:
self.fixup_type(tv.upper_bound)
self.fixup_type(tv.default)
def visit_assignment_stmt(self, node: AssignmentStmt) -> None:
self.fixup_type(node.type)
super().visit_assignment_stmt(node)
# Expressions
def visit_name_expr(self, node: NameExpr) -> None:
self.visit_ref_expr(node)
def visit_member_expr(self, node: MemberExpr) -> None:
if node.def_var:
node.def_var = self.fixup(node.def_var)
self.visit_ref_expr(node)
super().visit_member_expr(node)
def visit_ref_expr(self, node: RefExpr) -> None:
if node.node is not None:
node.node = self.fixup(node.node)
if isinstance(node.node, Var):
# The Var node may be an orphan and won't otherwise be processed.
node.node.accept(self)
def visit_namedtuple_expr(self, node: NamedTupleExpr) -> None:
super().visit_namedtuple_expr(node)
node.info = self.fixup_and_reset_typeinfo(node.info)
self.process_synthetic_type_info(node.info)
def visit_cast_expr(self, node: CastExpr) -> None:
super().visit_cast_expr(node)
self.fixup_type(node.type)
def visit_assert_type_expr(self, node: AssertTypeExpr) -> None:
super().visit_assert_type_expr(node)
self.fixup_type(node.type)
def visit_super_expr(self, node: SuperExpr) -> None:
super().visit_super_expr(node)
if node.info is not None:
node.info = self.fixup(node.info)
def visit_call_expr(self, node: CallExpr) -> None:
super().visit_call_expr(node)
if isinstance(node.analyzed, SymbolNode):
node.analyzed = self.fixup(node.analyzed)
def visit_newtype_expr(self, node: NewTypeExpr) -> None:
if node.info:
node.info = self.fixup_and_reset_typeinfo(node.info)
self.process_synthetic_type_info(node.info)
self.fixup_type(node.old_type)
super().visit_newtype_expr(node)
def visit_lambda_expr(self, node: LambdaExpr) -> None:
node.info = self.fixup(node.info)
super().visit_lambda_expr(node)
def visit_typeddict_expr(self, node: TypedDictExpr) -> None:
super().visit_typeddict_expr(node)
node.info = self.fixup_and_reset_typeinfo(node.info)
self.process_synthetic_type_info(node.info)
def visit_enum_call_expr(self, node: EnumCallExpr) -> None:
node.info = self.fixup_and_reset_typeinfo(node.info)
self.process_synthetic_type_info(node.info)
super().visit_enum_call_expr(node)
# Others
def visit_var(self, node: Var) -> None:
node.info = self.fixup(node.info)
self.fixup_type(node.type)
super().visit_var(node)
def visit_type_alias(self, node: TypeAlias) -> None:
self.fixup_type(node.target)
for v in node.alias_tvars:
self.fixup_type(v)
super().visit_type_alias(node)
# Helpers
def fixup(self, node: SN) -> SN:
if node in self.replacements:
new = self.replacements[node]
skip_slots: tuple[str, ...] = ()
if isinstance(node, TypeInfo) and isinstance(new, TypeInfo):
# Special case: special_alias is not exposed in symbol tables, but may appear
# in external types (e.g. named tuples), so we need to update it manually.
skip_slots = ("special_alias",)
replace_object_state(new.special_alias, node.special_alias)
replace_object_state(new, node, skip_slots=skip_slots)
return cast(SN, new)
return node
def fixup_and_reset_typeinfo(self, node: TypeInfo) -> TypeInfo:
"""Fix-up type info and reset subtype caches.
This needs to be called at least once per each merged TypeInfo, as otherwise we
may leak stale caches.
"""
if node in self.replacements:
# The subclass relationships may change, so reset all caches relevant to the
# old MRO.
new = self.replacements[node]
assert isinstance(new, TypeInfo)
type_state.reset_all_subtype_caches_for(new)
return self.fixup(node)
def fixup_type(self, typ: Type | None) -> None:
if typ is not None:
typ.accept(TypeReplaceVisitor(self.replacements))
def process_type_info(self, info: TypeInfo | None) -> None:
if info is None:
return
self.fixup_type(info.declared_metaclass)
self.fixup_type(info.metaclass_type)
for target in info._promote:
self.fixup_type(target)
self.fixup_type(info.tuple_type)
self.fixup_type(info.typeddict_type)
if info.special_alias:
self.fixup_type(info.special_alias.target)
info.defn.info = self.fixup(info)
replace_nodes_in_symbol_table(info.names, self.replacements)
for i, item in enumerate(info.mro):
info.mro[i] = self.fixup(info.mro[i])
for i, base in enumerate(info.bases):
self.fixup_type(info.bases[i])
def process_synthetic_type_info(self, info: TypeInfo) -> None:
# Synthetic types (types not created using a class statement) don't
# have bodies in the AST so we need to iterate over their symbol
# tables separately, unlike normal classes.
self.process_type_info(info)
for node in info.names.values():
if node.node:
node.node.accept(self)
def replace_statements(self, nodes: list[Statement]) -> list[Statement]:
result = []
for node in nodes:
if isinstance(node, SymbolNode):
node = self.fixup(node)
result.append(node)
return result
class TypeReplaceVisitor(SyntheticTypeVisitor[None]):
"""Similar to NodeReplaceVisitor, but for type objects.
Note: this visitor may sometimes visit unanalyzed types
such as 'UnboundType' and 'RawExpressionType' For example, see
NodeReplaceVisitor.process_base_func.
"""
def __init__(self, replacements: dict[SymbolNode, SymbolNode]) -> None:
self.replacements = replacements
def visit_instance(self, typ: Instance) -> None:
typ.type = self.fixup(typ.type)
for arg in typ.args:
arg.accept(self)
if typ.last_known_value:
typ.last_known_value.accept(self)
def visit_type_alias_type(self, typ: TypeAliasType) -> None:
assert typ.alias is not None
typ.alias = self.fixup(typ.alias)
for arg in typ.args:
arg.accept(self)
def visit_any(self, typ: AnyType) -> None:
pass
def visit_none_type(self, typ: NoneType) -> None:
pass
def visit_callable_type(self, typ: CallableType) -> None:
for arg in typ.arg_types:
arg.accept(self)
typ.ret_type.accept(self)
if typ.definition:
# No need to fixup since this is just a cross-reference.
typ.definition = self.replacements.get(typ.definition, typ.definition)
# Fallback can be None for callable types that haven't been semantically analyzed.
if typ.fallback is not None:
typ.fallback.accept(self)
for tv in typ.variables:
if isinstance(tv, TypeVarType):
tv.upper_bound.accept(self)
for value in tv.values:
value.accept(self)
def visit_overloaded(self, t: Overloaded) -> None:
for item in t.items:
item.accept(self)
# Fallback can be None for overloaded types that haven't been semantically analyzed.
if t.fallback is not None:
t.fallback.accept(self)
def visit_erased_type(self, t: ErasedType) -> None:
# This type should exist only temporarily during type inference
raise RuntimeError("Cannot handle erased type")
def visit_deleted_type(self, typ: DeletedType) -> None:
pass
def visit_partial_type(self, typ: PartialType) -> None:
raise RuntimeError("Cannot handle partial type")
def visit_tuple_type(self, typ: TupleType) -> None:
for item in typ.items:
item.accept(self)
# Fallback can be None for implicit tuple types that haven't been semantically analyzed.
if typ.partial_fallback is not None:
typ.partial_fallback.accept(self)
def visit_type_type(self, typ: TypeType) -> None:
typ.item.accept(self)
def visit_type_var(self, typ: TypeVarType) -> None:
typ.upper_bound.accept(self)
typ.default.accept(self)
for value in typ.values:
value.accept(self)
def visit_param_spec(self, typ: ParamSpecType) -> None:
typ.upper_bound.accept(self)
typ.default.accept(self)
def visit_type_var_tuple(self, typ: TypeVarTupleType) -> None:
typ.upper_bound.accept(self)
typ.default.accept(self)
def visit_unpack_type(self, typ: UnpackType) -> None:
typ.type.accept(self)
def visit_parameters(self, typ: Parameters) -> None:
for arg in typ.arg_types:
arg.accept(self)
def visit_typeddict_type(self, typ: TypedDictType) -> None:
for value_type in typ.items.values():
value_type.accept(self)
typ.fallback.accept(self)
def visit_raw_expression_type(self, t: RawExpressionType) -> None:
pass
def visit_literal_type(self, typ: LiteralType) -> None:
typ.fallback.accept(self)
def visit_unbound_type(self, typ: UnboundType) -> None:
for arg in typ.args:
arg.accept(self)
def visit_type_list(self, typ: TypeList) -> None:
for item in typ.items:
item.accept(self)
def visit_callable_argument(self, typ: CallableArgument) -> None:
typ.typ.accept(self)
def visit_ellipsis_type(self, typ: EllipsisType) -> None:
pass
def visit_uninhabited_type(self, typ: UninhabitedType) -> None:
pass
def visit_union_type(self, typ: UnionType) -> None:
for item in typ.items:
item.accept(self)
def visit_placeholder_type(self, t: PlaceholderType) -> None:
for item in t.args:
item.accept(self)
# Helpers
def fixup(self, node: SN) -> SN:
if node in self.replacements:
new = self.replacements[node]
return cast(SN, new)
return node
def replace_nodes_in_symbol_table(
symbols: SymbolTable, replacements: dict[SymbolNode, SymbolNode]
) -> None:
for node in symbols.values():
if node.node:
if node.node in replacements:
new = replacements[node.node]
old = node.node
# Needed for TypeInfo, see comment in fixup() above.
replace_object_state(new, old, skip_slots=("special_alias",))
node.node = new
if isinstance(node.node, (Var, TypeAlias)):
# Handle them here just in case these aren't exposed through the AST.
node.node.accept(NodeReplaceVisitor(replacements))
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/server/astmerge.py
|
Python
|
NOASSERTION
| 20,636 |
"""Strip/reset AST in-place to match state after semantic analyzer pre-analysis.
Fine-grained incremental mode reruns semantic analysis main pass
and type checking for *existing* AST nodes (targets) when changes are
propagated using fine-grained dependencies. AST nodes attributes are
sometimes changed during semantic analysis main pass, and running
semantic analysis again on those nodes would produce incorrect
results, since this pass isn't idempotent. This pass resets AST
nodes to reflect the state after semantic pre-analysis, so that we
can rerun semantic analysis.
(The above is in contrast to behavior with modules that have source code
changes, for which we re-parse the entire module and reconstruct a fresh
AST. No stripping is required in this case. Both modes of operation should
have the same outcome.)
Notes:
* This is currently pretty fragile, as we must carefully undo whatever
changes can be made in semantic analysis main pass, including changes
to symbol tables.
* We reuse existing AST nodes because it makes it relatively straightforward
to reprocess only a single target within a module efficiently. If there
was a way to parse a single target within a file, in time proportional to
the size of the target, we'd rather create fresh AST nodes than strip them.
(This is possible only in Python 3.8+)
* Currently we don't actually reset all changes, but only those known to affect
non-idempotent semantic analysis behavior.
TODO: It would be more principled and less fragile to reset everything
changed in semantic analysis main pass and later.
* Reprocessing may recreate AST nodes (such as Var nodes, and TypeInfo nodes
created with assignment statements) that will get different identities from
the original AST. Thus running an AST merge is necessary after stripping,
even though some identities are preserved.
"""
from __future__ import annotations
from contextlib import contextmanager, nullcontext
from typing import Dict, Iterator, Tuple
from typing_extensions import TypeAlias as _TypeAlias
from mypy.nodes import (
CLASSDEF_NO_INFO,
AssignmentStmt,
Block,
CallExpr,
ClassDef,
Decorator,
ForStmt,
FuncDef,
ImportAll,
ImportFrom,
IndexExpr,
ListExpr,
MemberExpr,
MypyFile,
NameExpr,
Node,
OpExpr,
OverloadedFuncDef,
RefExpr,
StarExpr,
SuperExpr,
SymbolTableNode,
TupleExpr,
TypeInfo,
Var,
)
from mypy.traverser import TraverserVisitor
from mypy.types import CallableType
from mypy.typestate import type_state
SavedAttributes: _TypeAlias = Dict[Tuple[ClassDef, str], SymbolTableNode]
def strip_target(
node: MypyFile | FuncDef | OverloadedFuncDef, saved_attrs: SavedAttributes
) -> None:
"""Reset a fine-grained incremental target to state before semantic analysis.
All TypeInfos are killed. Therefore we need to preserve the variables
defined as attributes on self. This is done by patches (callbacks)
returned from this function that re-add these variables when called.
Args:
node: node to strip
saved_attrs: collect attributes here that may need to be re-added to
classes afterwards if stripping a class body (this dict is mutated)
"""
visitor = NodeStripVisitor(saved_attrs)
if isinstance(node, MypyFile):
visitor.strip_file_top_level(node)
else:
node.accept(visitor)
class NodeStripVisitor(TraverserVisitor):
def __init__(self, saved_class_attrs: SavedAttributes) -> None:
# The current active class.
self.type: TypeInfo | None = None
# This is True at class scope, but not in methods.
self.is_class_body = False
# By default, process function definitions. If False, don't -- this is used for
# processing module top levels.
self.recurse_into_functions = True
# These attributes were removed from top-level classes during strip and
# will be added afterwards (if no existing definition is found). These
# must be added back before semantically analyzing any methods.
self.saved_class_attrs = saved_class_attrs
def strip_file_top_level(self, file_node: MypyFile) -> None:
"""Strip a module top-level (don't recursive into functions)."""
self.recurse_into_functions = False
file_node.plugin_deps.clear()
file_node.accept(self)
for name in file_node.names.copy():
# TODO: this is a hot fix, we should delete all names,
# see https://github.com/python/mypy/issues/6422.
if "@" not in name:
del file_node.names[name]
def visit_block(self, b: Block) -> None:
if b.is_unreachable:
return
super().visit_block(b)
def visit_class_def(self, node: ClassDef) -> None:
"""Strip class body and type info, but don't strip methods."""
# We need to save the implicitly defined instance variables,
# i.e. those defined as attributes on self. Otherwise, they would
# be lost if we only reprocess top-levels (this kills TypeInfos)
# but not the methods that defined those variables.
if not self.recurse_into_functions:
self.save_implicit_attributes(node)
# We need to delete any entries that were generated by plugins,
# since they will get regenerated.
to_delete = {v.node for v in node.info.names.values() if v.plugin_generated}
node.type_vars = []
node.base_type_exprs.extend(node.removed_base_type_exprs)
node.removed_base_type_exprs = []
node.defs.body = [
s for s in node.defs.body if s not in to_delete # type: ignore[comparison-overlap]
]
with self.enter_class(node.info):
super().visit_class_def(node)
node.defs.body.extend(node.removed_statements)
node.removed_statements = []
type_state.reset_subtype_caches_for(node.info)
# Kill the TypeInfo, since there is none before semantic analysis.
node.info = CLASSDEF_NO_INFO
node.analyzed = None
def save_implicit_attributes(self, node: ClassDef) -> None:
"""Produce callbacks that re-add attributes defined on self."""
for name, sym in node.info.names.items():
if isinstance(sym.node, Var) and sym.implicit:
self.saved_class_attrs[node, name] = sym
def visit_func_def(self, node: FuncDef) -> None:
if not self.recurse_into_functions:
return
node.expanded = []
node.type = node.unanalyzed_type
if node.type:
# Type variable binder binds type variables before the type is analyzed,
# this causes unanalyzed_type to be modified in place. We needed to revert this
# in order to get the state exactly as it was before semantic analysis.
# See also #4814.
assert isinstance(node.type, CallableType)
node.type.variables = []
with self.enter_method(node.info) if node.info else nullcontext():
super().visit_func_def(node)
def visit_decorator(self, node: Decorator) -> None:
node.var.type = None
for expr in node.decorators:
expr.accept(self)
if self.recurse_into_functions:
node.func.accept(self)
else:
# Only touch the final status if we re-process
# the top level, since decorators are processed there.
node.var.is_final = False
node.func.is_final = False
def visit_overloaded_func_def(self, node: OverloadedFuncDef) -> None:
if not self.recurse_into_functions:
return
# Revert change made during semantic analysis main pass.
node.items = node.unanalyzed_items.copy()
node.impl = None
node.is_final = False
super().visit_overloaded_func_def(node)
def visit_assignment_stmt(self, node: AssignmentStmt) -> None:
node.type = node.unanalyzed_type
node.is_final_def = False
node.is_alias_def = False
if self.type and not self.is_class_body:
for lvalue in node.lvalues:
# Revert assignments made via self attributes.
self.process_lvalue_in_method(lvalue)
super().visit_assignment_stmt(node)
def visit_import_from(self, node: ImportFrom) -> None:
node.assignments = []
def visit_import_all(self, node: ImportAll) -> None:
node.assignments = []
def visit_for_stmt(self, node: ForStmt) -> None:
node.index_type = node.unanalyzed_index_type
node.inferred_item_type = None
node.inferred_iterator_type = None
super().visit_for_stmt(node)
def visit_name_expr(self, node: NameExpr) -> None:
self.strip_ref_expr(node)
def visit_member_expr(self, node: MemberExpr) -> None:
self.strip_ref_expr(node)
super().visit_member_expr(node)
def visit_index_expr(self, node: IndexExpr) -> None:
node.analyzed = None # May have been an alias or type application.
super().visit_index_expr(node)
def visit_op_expr(self, node: OpExpr) -> None:
node.analyzed = None # May have been an alias
super().visit_op_expr(node)
def strip_ref_expr(self, node: RefExpr) -> None:
node.kind = None
node.node = None
node.fullname = ""
node.is_new_def = False
node.is_inferred_def = False
def visit_call_expr(self, node: CallExpr) -> None:
node.analyzed = None
super().visit_call_expr(node)
def visit_super_expr(self, node: SuperExpr) -> None:
node.info = None
super().visit_super_expr(node)
def process_lvalue_in_method(self, lvalue: Node) -> None:
if isinstance(lvalue, MemberExpr):
if lvalue.is_new_def:
# Remove defined attribute from the class symbol table. If is_new_def is
# true for a MemberExpr, we know that it must be an assignment through
# self, since only those can define new attributes.
assert self.type is not None
if lvalue.name in self.type.names:
del self.type.names[lvalue.name]
key = (self.type.defn, lvalue.name)
if key in self.saved_class_attrs:
del self.saved_class_attrs[key]
elif isinstance(lvalue, (TupleExpr, ListExpr)):
for item in lvalue.items:
self.process_lvalue_in_method(item)
elif isinstance(lvalue, StarExpr):
self.process_lvalue_in_method(lvalue.expr)
@contextmanager
def enter_class(self, info: TypeInfo) -> Iterator[None]:
old_type = self.type
old_is_class_body = self.is_class_body
self.type = info
self.is_class_body = True
yield
self.type = old_type
self.is_class_body = old_is_class_body
@contextmanager
def enter_method(self, info: TypeInfo) -> Iterator[None]:
old_type = self.type
old_is_class_body = self.is_class_body
self.type = info
self.is_class_body = False
yield
self.type = old_type
self.is_class_body = old_is_class_body
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/server/aststrip.py
|
Python
|
NOASSERTION
| 11,293 |
"""Generate fine-grained dependencies for AST nodes, for use in the daemon mode.
Dependencies are stored in a map from *triggers* to *sets of affected locations*.
A trigger is a string that represents a program property that has changed, such
as the signature of a specific function. Triggers are written as '<...>' (angle
brackets). When a program property changes, we determine the relevant trigger(s)
and all affected locations. The latter are stale and will have to be reprocessed.
An affected location is a string than can refer to a *target* (a non-nested
function or method, or a module top level), a class, or a trigger (for
recursively triggering other triggers).
Here's an example representation of a simple dependency map (in format
"<trigger> -> locations"):
<m.A.g> -> m.f
<m.A> -> <m.f>, m.A, m.f
Assuming 'A' is a class, this means that
1) if a property of 'm.A.g', such as the signature, is changed, we need
to process target (function) 'm.f'
2) if the MRO or other significant property of class 'm.A' changes, we
need to process target 'm.f', the entire class 'm.A', and locations
triggered by trigger '<m.f>' (this explanation is a bit simplified;
see below for more details).
The triggers to fire are determined using mypy.server.astdiff.
Examples of triggers:
* '<mod.x>' represents a module attribute/function/class. If any externally
visible property of 'x' changes, this gets fired. For changes within
classes, only "big" changes cause the class to be triggered (such as a
change in MRO). Smaller changes, such as changes to some attributes, don't
trigger the entire class.
* '<mod.Cls.x>' represents the type and kind of attribute/method 'x' of
class 'mod.Cls'. This can also refer to an attribute inherited from a
base class (relevant if it's accessed through a value of type 'Cls'
instead of the base class type).
* '<package.mod>' represents the existence of module 'package.mod'. This
gets triggered if 'package.mod' is created or deleted, or if it gets
changed into something other than a module.
Examples of locations:
* 'mod' is the top level of module 'mod' (doesn't include any function bodies,
but includes class bodies not nested within a function).
* 'mod.f' is function 'f' in module 'mod' (module-level variables aren't separate
locations but are included in the module top level). Functions also include
any nested functions and classes -- such nested definitions aren't separate
locations, for simplicity of implementation.
* 'mod.Cls.f' is method 'f' of 'mod.Cls'. Non-method attributes aren't locations.
* 'mod.Cls' represents each method in class 'mod.Cls' + the top-level of the
module 'mod'. (To simplify the implementation, there is no location that only
includes the body of a class without the entire surrounding module top level.)
* Trigger '<...>' as a location is an indirect way of referring to to all
locations triggered by the trigger. These indirect locations keep the
dependency map smaller and easier to manage.
Triggers can be triggered by program changes such as these:
* Addition or deletion of an attribute (or module).
* Change of the kind of thing a name represents (such as a change from a function
to a class).
* Change of the static type of a name.
Changes in the body of a function that aren't reflected in the signature don't
cause the function to be triggered. More generally, we trigger only on changes
that may affect type checking results outside the module that contains the
change.
We don't generate dependencies from builtins and certain other stdlib modules,
since these change very rarely, and they would just increase the size of the
dependency map significantly without significant benefit.
Test cases for this module live in 'test-data/unit/deps*.test'.
"""
from __future__ import annotations
from collections import defaultdict
from typing import List
from mypy.nodes import (
GDEF,
LDEF,
MDEF,
AssertTypeExpr,
AssignmentStmt,
AwaitExpr,
Block,
CallExpr,
CastExpr,
ClassDef,
ComparisonExpr,
Decorator,
DelStmt,
DictionaryComprehension,
EnumCallExpr,
Expression,
ForStmt,
FuncBase,
FuncDef,
GeneratorExpr,
Import,
ImportAll,
ImportFrom,
IndexExpr,
MemberExpr,
MypyFile,
NamedTupleExpr,
NameExpr,
NewTypeExpr,
Node,
OperatorAssignmentStmt,
OpExpr,
OverloadedFuncDef,
RefExpr,
StarExpr,
SuperExpr,
TupleExpr,
TypeAliasExpr,
TypeApplication,
TypedDictExpr,
TypeInfo,
TypeVarExpr,
UnaryExpr,
Var,
WithStmt,
YieldFromExpr,
)
from mypy.operators import (
op_methods,
ops_with_inplace_method,
reverse_op_methods,
unary_op_methods,
)
from mypy.options import Options
from mypy.scope import Scope
from mypy.server.trigger import make_trigger, make_wildcard_trigger
from mypy.traverser import TraverserVisitor
from mypy.typeops import bind_self
from mypy.types import (
AnyType,
CallableType,
DeletedType,
ErasedType,
FunctionLike,
Instance,
LiteralType,
NoneType,
Overloaded,
Parameters,
ParamSpecType,
PartialType,
ProperType,
TupleType,
Type,
TypeAliasType,
TypedDictType,
TypeOfAny,
TypeType,
TypeVarTupleType,
TypeVarType,
TypeVisitor,
UnboundType,
UninhabitedType,
UnionType,
UnpackType,
get_proper_type,
)
from mypy.typestate import type_state
from mypy.util import correct_relative_import
def get_dependencies(
target: MypyFile,
type_map: dict[Expression, Type],
python_version: tuple[int, int],
options: Options,
) -> dict[str, set[str]]:
"""Get all dependencies of a node, recursively."""
visitor = DependencyVisitor(type_map, python_version, target.alias_deps, options)
target.accept(visitor)
return visitor.map
def get_dependencies_of_target(
module_id: str,
module_tree: MypyFile,
target: Node,
type_map: dict[Expression, Type],
python_version: tuple[int, int],
) -> dict[str, set[str]]:
"""Get dependencies of a target -- don't recursive into nested targets."""
# TODO: Add tests for this function.
visitor = DependencyVisitor(type_map, python_version, module_tree.alias_deps)
with visitor.scope.module_scope(module_id):
if isinstance(target, MypyFile):
# Only get dependencies of the top-level of the module. Don't recurse into
# functions.
for defn in target.defs:
# TODO: Recurse into top-level statements and class bodies but skip functions.
if not isinstance(defn, (ClassDef, Decorator, FuncDef, OverloadedFuncDef)):
defn.accept(visitor)
elif isinstance(target, FuncBase) and target.info:
# It's a method.
# TODO: Methods in nested classes.
with visitor.scope.class_scope(target.info):
target.accept(visitor)
else:
target.accept(visitor)
return visitor.map
class DependencyVisitor(TraverserVisitor):
def __init__(
self,
type_map: dict[Expression, Type],
python_version: tuple[int, int],
alias_deps: defaultdict[str, set[str]],
options: Options | None = None,
) -> None:
self.scope = Scope()
self.type_map = type_map
# This attribute holds a mapping from target to names of type aliases
# it depends on. These need to be processed specially, since they are
# only present in expanded form in symbol tables. For example, after:
# A = List[int]
# x: A
# The module symbol table will just have a Var `x` with type `List[int]`,
# and the dependency of `x` on `A` is lost. Therefore the alias dependencies
# are preserved at alias expansion points in `semanal.py`, stored as an attribute
# on MypyFile, and then passed here.
self.alias_deps = alias_deps
self.map: dict[str, set[str]] = {}
self.is_class = False
self.is_package_init_file = False
self.options = options
def visit_mypy_file(self, o: MypyFile) -> None:
with self.scope.module_scope(o.fullname):
self.is_package_init_file = o.is_package_init_file()
self.add_type_alias_deps(self.scope.current_target())
for trigger, targets in o.plugin_deps.items():
self.map.setdefault(trigger, set()).update(targets)
super().visit_mypy_file(o)
def visit_func_def(self, o: FuncDef) -> None:
with self.scope.function_scope(o):
target = self.scope.current_target()
if o.type:
if self.is_class and isinstance(o.type, FunctionLike):
signature: Type = bind_self(o.type)
else:
signature = o.type
for trigger in self.get_type_triggers(signature):
self.add_dependency(trigger)
self.add_dependency(trigger, target=make_trigger(target))
if o.info:
for base in non_trivial_bases(o.info):
# Base class __init__/__new__ doesn't generate a logical
# dependency since the override can be incompatible.
if not self.use_logical_deps() or o.name not in ("__init__", "__new__"):
self.add_dependency(make_trigger(base.fullname + "." + o.name))
self.add_type_alias_deps(self.scope.current_target())
super().visit_func_def(o)
variants = set(o.expanded) - {o}
for ex in variants:
if isinstance(ex, FuncDef):
super().visit_func_def(ex)
def visit_decorator(self, o: Decorator) -> None:
if not self.use_logical_deps():
# We don't need to recheck outer scope for an overload, only overload itself.
# Also if any decorator is nested, it is not externally visible, so we don't need to
# generate dependency.
if not o.func.is_overload and self.scope.current_function_name() is None:
self.add_dependency(make_trigger(o.func.fullname))
else:
# Add logical dependencies from decorators to the function. For example,
# if we have
# @dec
# def func(): ...
# then if `dec` is unannotated, then it will "spoil" `func` and consequently
# all call sites, making them all `Any`.
for d in o.decorators:
tname: str | None = None
if isinstance(d, RefExpr) and d.fullname:
tname = d.fullname
if isinstance(d, CallExpr) and isinstance(d.callee, RefExpr) and d.callee.fullname:
tname = d.callee.fullname
if tname is not None:
self.add_dependency(make_trigger(tname), make_trigger(o.func.fullname))
super().visit_decorator(o)
def visit_class_def(self, o: ClassDef) -> None:
with self.scope.class_scope(o.info):
target = self.scope.current_full_target()
self.add_dependency(make_trigger(target), target)
old_is_class = self.is_class
self.is_class = True
# Add dependencies to type variables of a generic class.
for tv in o.type_vars:
self.add_dependency(make_trigger(tv.fullname), target)
self.process_type_info(o.info)
super().visit_class_def(o)
self.is_class = old_is_class
def visit_newtype_expr(self, o: NewTypeExpr) -> None:
if o.info:
with self.scope.class_scope(o.info):
self.process_type_info(o.info)
def process_type_info(self, info: TypeInfo) -> None:
target = self.scope.current_full_target()
for base in info.bases:
self.add_type_dependencies(base, target=target)
if info.tuple_type:
self.add_type_dependencies(info.tuple_type, target=make_trigger(target))
if info.typeddict_type:
self.add_type_dependencies(info.typeddict_type, target=make_trigger(target))
if info.declared_metaclass:
self.add_type_dependencies(info.declared_metaclass, target=make_trigger(target))
if info.is_protocol:
for base_info in info.mro[:-1]:
# We add dependencies from whole MRO to cover explicit subprotocols.
# For example:
#
# class Super(Protocol):
# x: int
# class Sub(Super, Protocol):
# y: int
#
# In this example we add <Super[wildcard]> -> <Sub>, to invalidate Sub if
# a new member is added to Super.
self.add_dependency(
make_wildcard_trigger(base_info.fullname), target=make_trigger(target)
)
# More protocol dependencies are collected in type_state._snapshot_protocol_deps
# after a full run or update is finished.
self.add_type_alias_deps(self.scope.current_target())
for name, node in info.names.items():
if isinstance(node.node, Var):
# Recheck Liskov if needed, self definitions are checked in the defining method
if node.node.is_initialized_in_class and has_user_bases(info):
self.add_dependency(make_trigger(info.fullname + "." + name))
for base_info in non_trivial_bases(info):
# If the type of an attribute changes in a base class, we make references
# to the attribute in the subclass stale.
self.add_dependency(
make_trigger(base_info.fullname + "." + name),
target=make_trigger(info.fullname + "." + name),
)
for base_info in non_trivial_bases(info):
for name, node in base_info.names.items():
if self.use_logical_deps():
# Skip logical dependency if an attribute is not overridden. For example,
# in case of:
# class Base:
# x = 1
# y = 2
# class Sub(Base):
# x = 3
# we skip <Base.y> -> <Child.y>, because even if `y` is unannotated it
# doesn't affect precision of Liskov checking.
if name not in info.names:
continue
# __init__ and __new__ can be overridden with different signatures, so no
# logical dependency.
if name in ("__init__", "__new__"):
continue
self.add_dependency(
make_trigger(base_info.fullname + "." + name),
target=make_trigger(info.fullname + "." + name),
)
if not self.use_logical_deps():
# These dependencies are only useful for propagating changes --
# they aren't logical dependencies since __init__ and __new__ can be
# overridden with a different signature.
self.add_dependency(
make_trigger(base_info.fullname + ".__init__"),
target=make_trigger(info.fullname + ".__init__"),
)
self.add_dependency(
make_trigger(base_info.fullname + ".__new__"),
target=make_trigger(info.fullname + ".__new__"),
)
# If the set of abstract attributes change, this may invalidate class
# instantiation, or change the generated error message, since Python checks
# class abstract status when creating an instance.
self.add_dependency(
make_trigger(base_info.fullname + ".(abstract)"),
target=make_trigger(info.fullname + ".__init__"),
)
# If the base class abstract attributes change, subclass abstract
# attributes need to be recalculated.
self.add_dependency(make_trigger(base_info.fullname + ".(abstract)"))
def visit_import(self, o: Import) -> None:
for id, as_id in o.ids:
self.add_dependency(make_trigger(id), self.scope.current_target())
def visit_import_from(self, o: ImportFrom) -> None:
if self.use_logical_deps():
# Just importing a name doesn't create a logical dependency.
return
module_id, _ = correct_relative_import(
self.scope.current_module_id(), o.relative, o.id, self.is_package_init_file
)
self.add_dependency(make_trigger(module_id)) # needed if module is added/removed
for name, as_name in o.names:
self.add_dependency(make_trigger(module_id + "." + name))
def visit_import_all(self, o: ImportAll) -> None:
module_id, _ = correct_relative_import(
self.scope.current_module_id(), o.relative, o.id, self.is_package_init_file
)
# The current target needs to be rechecked if anything "significant" changes in the
# target module namespace (as the imported definitions will need to be updated).
self.add_dependency(make_wildcard_trigger(module_id))
def visit_block(self, o: Block) -> None:
if not o.is_unreachable:
super().visit_block(o)
def visit_assignment_stmt(self, o: AssignmentStmt) -> None:
rvalue = o.rvalue
if isinstance(rvalue, CallExpr) and isinstance(rvalue.analyzed, TypeVarExpr):
analyzed = rvalue.analyzed
self.add_type_dependencies(
analyzed.upper_bound, target=make_trigger(analyzed.fullname)
)
for val in analyzed.values:
self.add_type_dependencies(val, target=make_trigger(analyzed.fullname))
# We need to re-analyze the definition if bound or value is deleted.
super().visit_call_expr(rvalue)
elif isinstance(rvalue, CallExpr) and isinstance(rvalue.analyzed, NamedTupleExpr):
# Depend on types of named tuple items.
info = rvalue.analyzed.info
prefix = f"{self.scope.current_full_target()}.{info.name}"
for name, symnode in info.names.items():
if not name.startswith("_") and isinstance(symnode.node, Var):
typ = symnode.node.type
if typ:
self.add_type_dependencies(typ)
self.add_type_dependencies(typ, target=make_trigger(prefix))
attr_target = make_trigger(f"{prefix}.{name}")
self.add_type_dependencies(typ, target=attr_target)
elif isinstance(rvalue, CallExpr) and isinstance(rvalue.analyzed, TypedDictExpr):
# Depend on the underlying typeddict type
info = rvalue.analyzed.info
assert info.typeddict_type is not None
prefix = f"{self.scope.current_full_target()}.{info.name}"
self.add_type_dependencies(info.typeddict_type, target=make_trigger(prefix))
elif isinstance(rvalue, CallExpr) and isinstance(rvalue.analyzed, EnumCallExpr):
# Enum values are currently not checked, but for future we add the deps on them
for name, symnode in rvalue.analyzed.info.names.items():
if isinstance(symnode.node, Var) and symnode.node.type:
self.add_type_dependencies(symnode.node.type)
elif o.is_alias_def:
assert len(o.lvalues) == 1
lvalue = o.lvalues[0]
assert isinstance(lvalue, NameExpr)
typ = get_proper_type(self.type_map.get(lvalue))
if isinstance(typ, FunctionLike) and typ.is_type_obj():
class_name = typ.type_object().fullname
self.add_dependency(make_trigger(class_name + ".__init__"))
self.add_dependency(make_trigger(class_name + ".__new__"))
if isinstance(rvalue, IndexExpr) and isinstance(rvalue.analyzed, TypeAliasExpr):
self.add_type_dependencies(rvalue.analyzed.node.target)
elif typ:
self.add_type_dependencies(typ)
else:
# Normal assignment
super().visit_assignment_stmt(o)
for lvalue in o.lvalues:
self.process_lvalue(lvalue)
items = o.lvalues + [rvalue]
for i in range(len(items) - 1):
lvalue = items[i]
rvalue = items[i + 1]
if isinstance(lvalue, TupleExpr):
self.add_attribute_dependency_for_expr(rvalue, "__iter__")
if o.type:
self.add_type_dependencies(o.type)
if self.use_logical_deps() and o.unanalyzed_type is None:
# Special case: for definitions without an explicit type like this:
# x = func(...)
# we add a logical dependency <func> -> <x>, because if `func` is not annotated,
# then it will make all points of use of `x` unchecked.
if (
isinstance(rvalue, CallExpr)
and isinstance(rvalue.callee, RefExpr)
and rvalue.callee.fullname
):
fname: str | None = None
if isinstance(rvalue.callee.node, TypeInfo):
# use actual __init__ as a dependency source
init = rvalue.callee.node.get("__init__")
if init and isinstance(init.node, FuncBase):
fname = init.node.fullname
else:
fname = rvalue.callee.fullname
if not fname:
return
for lv in o.lvalues:
if isinstance(lv, RefExpr) and lv.fullname and lv.is_new_def:
if lv.kind == LDEF:
return # local definitions don't generate logical deps
self.add_dependency(make_trigger(fname), make_trigger(lv.fullname))
def process_lvalue(self, lvalue: Expression) -> None:
"""Generate additional dependencies for an lvalue."""
if isinstance(lvalue, IndexExpr):
self.add_operator_method_dependency(lvalue.base, "__setitem__")
elif isinstance(lvalue, NameExpr):
if lvalue.kind in (MDEF, GDEF):
# Assignment to an attribute in the class body, or direct assignment to a
# global variable.
lvalue_type = self.get_non_partial_lvalue_type(lvalue)
type_triggers = self.get_type_triggers(lvalue_type)
attr_trigger = make_trigger(f"{self.scope.current_full_target()}.{lvalue.name}")
for type_trigger in type_triggers:
self.add_dependency(type_trigger, attr_trigger)
elif isinstance(lvalue, MemberExpr):
if self.is_self_member_ref(lvalue) and lvalue.is_new_def:
node = lvalue.node
if isinstance(node, Var):
info = node.info
if info and has_user_bases(info):
# Recheck Liskov for self definitions
self.add_dependency(make_trigger(info.fullname + "." + lvalue.name))
if lvalue.kind is None:
# Reference to a non-module attribute
if lvalue.expr not in self.type_map:
# Unreachable assignment -> not checked so no dependencies to generate.
return
object_type = self.type_map[lvalue.expr]
lvalue_type = self.get_non_partial_lvalue_type(lvalue)
type_triggers = self.get_type_triggers(lvalue_type)
for attr_trigger in self.attribute_triggers(object_type, lvalue.name):
for type_trigger in type_triggers:
self.add_dependency(type_trigger, attr_trigger)
elif isinstance(lvalue, TupleExpr):
for item in lvalue.items:
self.process_lvalue(item)
elif isinstance(lvalue, StarExpr):
self.process_lvalue(lvalue.expr)
def is_self_member_ref(self, memberexpr: MemberExpr) -> bool:
"""Does memberexpr to refer to an attribute of self?"""
if not isinstance(memberexpr.expr, NameExpr):
return False
node = memberexpr.expr.node
return isinstance(node, Var) and node.is_self
def get_non_partial_lvalue_type(self, lvalue: RefExpr) -> Type:
if lvalue not in self.type_map:
# Likely a block considered unreachable during type checking.
return UninhabitedType()
lvalue_type = get_proper_type(self.type_map[lvalue])
if isinstance(lvalue_type, PartialType):
if isinstance(lvalue.node, Var):
if lvalue.node.type:
lvalue_type = get_proper_type(lvalue.node.type)
else:
lvalue_type = UninhabitedType()
else:
# Probably a secondary, non-definition assignment that doesn't
# result in a non-partial type. We won't be able to infer any
# dependencies from this so just return something. (The first,
# definition assignment with a partial type is handled
# differently, in the semantic analyzer.)
assert not lvalue.is_new_def
return UninhabitedType()
return lvalue_type
def visit_operator_assignment_stmt(self, o: OperatorAssignmentStmt) -> None:
super().visit_operator_assignment_stmt(o)
self.process_lvalue(o.lvalue)
method = op_methods[o.op]
self.add_attribute_dependency_for_expr(o.lvalue, method)
if o.op in ops_with_inplace_method:
inplace_method = "__i" + method[2:]
self.add_attribute_dependency_for_expr(o.lvalue, inplace_method)
def visit_for_stmt(self, o: ForStmt) -> None:
super().visit_for_stmt(o)
if not o.is_async:
# __getitem__ is only used if __iter__ is missing but for simplicity we
# just always depend on both.
self.add_attribute_dependency_for_expr(o.expr, "__iter__")
self.add_attribute_dependency_for_expr(o.expr, "__getitem__")
if o.inferred_iterator_type:
self.add_attribute_dependency(o.inferred_iterator_type, "__next__")
else:
self.add_attribute_dependency_for_expr(o.expr, "__aiter__")
if o.inferred_iterator_type:
self.add_attribute_dependency(o.inferred_iterator_type, "__anext__")
self.process_lvalue(o.index)
if isinstance(o.index, TupleExpr):
# Process multiple assignment to index variables.
item_type = o.inferred_item_type
if item_type:
# This is similar to above.
self.add_attribute_dependency(item_type, "__iter__")
self.add_attribute_dependency(item_type, "__getitem__")
if o.index_type:
self.add_type_dependencies(o.index_type)
def visit_with_stmt(self, o: WithStmt) -> None:
super().visit_with_stmt(o)
for e in o.expr:
if not o.is_async:
self.add_attribute_dependency_for_expr(e, "__enter__")
self.add_attribute_dependency_for_expr(e, "__exit__")
else:
self.add_attribute_dependency_for_expr(e, "__aenter__")
self.add_attribute_dependency_for_expr(e, "__aexit__")
for typ in o.analyzed_types:
self.add_type_dependencies(typ)
def visit_del_stmt(self, o: DelStmt) -> None:
super().visit_del_stmt(o)
if isinstance(o.expr, IndexExpr):
self.add_attribute_dependency_for_expr(o.expr.base, "__delitem__")
# Expressions
def process_global_ref_expr(self, o: RefExpr) -> None:
if o.fullname:
self.add_dependency(make_trigger(o.fullname))
# If this is a reference to a type, generate a dependency to its
# constructor.
# IDEA: Avoid generating spurious dependencies for except statements,
# class attribute references, etc., if performance is a problem.
typ = get_proper_type(self.type_map.get(o))
if isinstance(typ, FunctionLike) and typ.is_type_obj():
class_name = typ.type_object().fullname
self.add_dependency(make_trigger(class_name + ".__init__"))
self.add_dependency(make_trigger(class_name + ".__new__"))
def visit_name_expr(self, o: NameExpr) -> None:
if o.kind == LDEF:
# We don't track dependencies to local variables, since they
# aren't externally visible.
return
if o.kind == MDEF:
# Direct reference to member is only possible in the scope that
# defined the name, so no dependency is required.
return
self.process_global_ref_expr(o)
def visit_member_expr(self, e: MemberExpr) -> None:
if isinstance(e.expr, RefExpr) and isinstance(e.expr.node, TypeInfo):
# Special case class attribute so that we don't depend on "__init__".
self.add_dependency(make_trigger(e.expr.node.fullname))
else:
super().visit_member_expr(e)
if e.kind is not None:
# Reference to a module attribute
self.process_global_ref_expr(e)
else:
# Reference to a non-module (or missing) attribute
if e.expr not in self.type_map:
# No type available -- this happens for unreachable code. Since it's unreachable,
# it wasn't type checked and we don't need to generate dependencies.
return
if isinstance(e.expr, RefExpr) and isinstance(e.expr.node, MypyFile):
# Special case: reference to a missing module attribute.
self.add_dependency(make_trigger(e.expr.node.fullname + "." + e.name))
return
typ = get_proper_type(self.type_map[e.expr])
self.add_attribute_dependency(typ, e.name)
if self.use_logical_deps() and isinstance(typ, AnyType):
name = self.get_unimported_fullname(e, typ)
if name is not None:
# Generate a logical dependency from an unimported
# definition (which comes from a missing module).
# Example:
# import missing # "missing" not in build
#
# def g() -> None:
# missing.f() # Generate dependency from "missing.f"
self.add_dependency(make_trigger(name))
def get_unimported_fullname(self, e: MemberExpr, typ: AnyType) -> str | None:
"""If e refers to an unimported definition, infer the fullname of this.
Return None if e doesn't refer to an unimported definition or if we can't
determine the name.
"""
suffix = ""
# Unwrap nested member expression to handle cases like "a.b.c.d" where
# "a.b" is a known reference to an unimported module. Find the base
# reference to an unimported module (such as "a.b") and the name suffix
# (such as "c.d") needed to build a full name.
while typ.type_of_any == TypeOfAny.from_another_any and isinstance(e.expr, MemberExpr):
suffix = "." + e.name + suffix
e = e.expr
if e.expr not in self.type_map:
return None
obj_type = get_proper_type(self.type_map[e.expr])
if not isinstance(obj_type, AnyType):
# Can't find the base reference to the unimported module.
return None
typ = obj_type
if typ.type_of_any == TypeOfAny.from_unimported_type and typ.missing_import_name:
# Infer the full name of the unimported definition.
return typ.missing_import_name + "." + e.name + suffix
return None
def visit_super_expr(self, e: SuperExpr) -> None:
# Arguments in "super(C, self)" won't generate useful logical deps.
if not self.use_logical_deps():
super().visit_super_expr(e)
if e.info is not None:
name = e.name
for base in non_trivial_bases(e.info):
self.add_dependency(make_trigger(base.fullname + "." + name))
if name in base.names:
# No need to depend on further base classes, since we found
# the target. This is safe since if the target gets
# deleted or modified, we'll trigger it.
break
def visit_call_expr(self, e: CallExpr) -> None:
if isinstance(e.callee, RefExpr) and e.callee.fullname == "builtins.isinstance":
self.process_isinstance_call(e)
else:
super().visit_call_expr(e)
typ = self.type_map.get(e.callee)
if typ is not None:
typ = get_proper_type(typ)
if not isinstance(typ, FunctionLike):
self.add_attribute_dependency(typ, "__call__")
def process_isinstance_call(self, e: CallExpr) -> None:
"""Process "isinstance(...)" in a way to avoid some extra dependencies."""
if len(e.args) == 2:
arg = e.args[1]
if (
isinstance(arg, RefExpr)
and arg.kind == GDEF
and isinstance(arg.node, TypeInfo)
and arg.fullname
):
# Special case to avoid redundant dependencies from "__init__".
self.add_dependency(make_trigger(arg.fullname))
return
# In uncommon cases generate normal dependencies. These will include
# spurious dependencies, but the performance impact is small.
super().visit_call_expr(e)
def visit_cast_expr(self, e: CastExpr) -> None:
super().visit_cast_expr(e)
self.add_type_dependencies(e.type)
def visit_assert_type_expr(self, e: AssertTypeExpr) -> None:
super().visit_assert_type_expr(e)
self.add_type_dependencies(e.type)
def visit_type_application(self, e: TypeApplication) -> None:
super().visit_type_application(e)
for typ in e.types:
self.add_type_dependencies(typ)
def visit_index_expr(self, e: IndexExpr) -> None:
super().visit_index_expr(e)
self.add_operator_method_dependency(e.base, "__getitem__")
def visit_unary_expr(self, e: UnaryExpr) -> None:
super().visit_unary_expr(e)
if e.op not in unary_op_methods:
return
method = unary_op_methods[e.op]
self.add_operator_method_dependency(e.expr, method)
def visit_op_expr(self, e: OpExpr) -> None:
super().visit_op_expr(e)
self.process_binary_op(e.op, e.left, e.right)
def visit_comparison_expr(self, e: ComparisonExpr) -> None:
super().visit_comparison_expr(e)
for i, op in enumerate(e.operators):
left = e.operands[i]
right = e.operands[i + 1]
self.process_binary_op(op, left, right)
def process_binary_op(self, op: str, left: Expression, right: Expression) -> None:
method = op_methods.get(op)
if method:
if op == "in":
self.add_operator_method_dependency(right, method)
else:
self.add_operator_method_dependency(left, method)
rev_method = reverse_op_methods.get(method)
if rev_method:
self.add_operator_method_dependency(right, rev_method)
def add_operator_method_dependency(self, e: Expression, method: str) -> None:
typ = get_proper_type(self.type_map.get(e))
if typ is not None:
self.add_operator_method_dependency_for_type(typ, method)
def add_operator_method_dependency_for_type(self, typ: ProperType, method: str) -> None:
# Note that operator methods can't be (non-metaclass) methods of type objects
# (that is, TypeType objects or Callables representing a type).
if isinstance(typ, TypeVarType):
typ = get_proper_type(typ.upper_bound)
if isinstance(typ, TupleType):
typ = typ.partial_fallback
if isinstance(typ, Instance):
trigger = make_trigger(typ.type.fullname + "." + method)
self.add_dependency(trigger)
elif isinstance(typ, UnionType):
for item in typ.items:
self.add_operator_method_dependency_for_type(get_proper_type(item), method)
elif isinstance(typ, FunctionLike) and typ.is_type_obj():
self.add_operator_method_dependency_for_type(typ.fallback, method)
elif isinstance(typ, TypeType):
if isinstance(typ.item, Instance) and typ.item.type.metaclass_type is not None:
self.add_operator_method_dependency_for_type(typ.item.type.metaclass_type, method)
def visit_generator_expr(self, e: GeneratorExpr) -> None:
super().visit_generator_expr(e)
for seq in e.sequences:
self.add_iter_dependency(seq)
def visit_dictionary_comprehension(self, e: DictionaryComprehension) -> None:
super().visit_dictionary_comprehension(e)
for seq in e.sequences:
self.add_iter_dependency(seq)
def visit_star_expr(self, e: StarExpr) -> None:
super().visit_star_expr(e)
self.add_iter_dependency(e.expr)
def visit_yield_from_expr(self, e: YieldFromExpr) -> None:
super().visit_yield_from_expr(e)
self.add_iter_dependency(e.expr)
def visit_await_expr(self, e: AwaitExpr) -> None:
super().visit_await_expr(e)
self.add_attribute_dependency_for_expr(e.expr, "__await__")
# Helpers
def add_type_alias_deps(self, target: str) -> None:
# Type aliases are special, because some of the dependencies are calculated
# in semanal.py, before they are expanded.
if target in self.alias_deps:
for alias in self.alias_deps[target]:
self.add_dependency(make_trigger(alias))
def add_dependency(self, trigger: str, target: str | None = None) -> None:
"""Add dependency from trigger to a target.
If the target is not given explicitly, use the current target.
"""
if trigger.startswith(
("<builtins.", "<typing.", "<mypy_extensions.", "<typing_extensions.")
):
# Don't track dependencies to certain library modules to keep the size of
# the dependencies manageable. These dependencies should only
# change on mypy version updates, which will require a full rebuild
# anyway.
return
if target is None:
target = self.scope.current_target()
self.map.setdefault(trigger, set()).add(target)
def add_type_dependencies(self, typ: Type, target: str | None = None) -> None:
"""Add dependencies to all components of a type.
Args:
target: If not None, override the default (current) target of the
generated dependency.
"""
for trigger in self.get_type_triggers(typ):
self.add_dependency(trigger, target)
def add_attribute_dependency(self, typ: Type, name: str) -> None:
"""Add dependencies for accessing a named attribute of a type."""
targets = self.attribute_triggers(typ, name)
for target in targets:
self.add_dependency(target)
def attribute_triggers(self, typ: Type, name: str) -> list[str]:
"""Return all triggers associated with the attribute of a type."""
typ = get_proper_type(typ)
if isinstance(typ, TypeVarType):
typ = get_proper_type(typ.upper_bound)
if isinstance(typ, TupleType):
typ = typ.partial_fallback
if isinstance(typ, Instance):
member = f"{typ.type.fullname}.{name}"
return [make_trigger(member)]
elif isinstance(typ, FunctionLike) and typ.is_type_obj():
member = f"{typ.type_object().fullname}.{name}"
triggers = [make_trigger(member)]
triggers.extend(self.attribute_triggers(typ.fallback, name))
return triggers
elif isinstance(typ, UnionType):
targets = []
for item in typ.items:
targets.extend(self.attribute_triggers(item, name))
return targets
elif isinstance(typ, TypeType):
triggers = self.attribute_triggers(typ.item, name)
if isinstance(typ.item, Instance) and typ.item.type.metaclass_type is not None:
triggers.append(
make_trigger(f"{typ.item.type.metaclass_type.type.fullname}.{name}")
)
return triggers
else:
return []
def add_attribute_dependency_for_expr(self, e: Expression, name: str) -> None:
typ = self.type_map.get(e)
if typ is not None:
self.add_attribute_dependency(typ, name)
def add_iter_dependency(self, node: Expression) -> None:
typ = self.type_map.get(node)
if typ:
self.add_attribute_dependency(typ, "__iter__")
def use_logical_deps(self) -> bool:
return self.options is not None and self.options.logical_deps
def get_type_triggers(self, typ: Type) -> list[str]:
return get_type_triggers(typ, self.use_logical_deps())
def get_type_triggers(
typ: Type, use_logical_deps: bool, seen_aliases: set[TypeAliasType] | None = None
) -> list[str]:
"""Return all triggers that correspond to a type becoming stale."""
return typ.accept(TypeTriggersVisitor(use_logical_deps, seen_aliases))
class TypeTriggersVisitor(TypeVisitor[List[str]]):
def __init__(
self, use_logical_deps: bool, seen_aliases: set[TypeAliasType] | None = None
) -> None:
self.deps: list[str] = []
self.seen_aliases: set[TypeAliasType] = seen_aliases or set()
self.use_logical_deps = use_logical_deps
def get_type_triggers(self, typ: Type) -> list[str]:
return get_type_triggers(typ, self.use_logical_deps, self.seen_aliases)
def visit_instance(self, typ: Instance) -> list[str]:
trigger = make_trigger(typ.type.fullname)
triggers = [trigger]
for arg in typ.args:
triggers.extend(self.get_type_triggers(arg))
if typ.last_known_value:
triggers.extend(self.get_type_triggers(typ.last_known_value))
if typ.extra_attrs and typ.extra_attrs.mod_name:
# Module as type effectively depends on all module attributes, use wildcard.
triggers.append(make_wildcard_trigger(typ.extra_attrs.mod_name))
return triggers
def visit_type_alias_type(self, typ: TypeAliasType) -> list[str]:
if typ in self.seen_aliases:
return []
self.seen_aliases.add(typ)
assert typ.alias is not None
trigger = make_trigger(typ.alias.fullname)
triggers = [trigger]
for arg in typ.args:
triggers.extend(self.get_type_triggers(arg))
# TODO: Now that type aliases are its own kind of types we can simplify
# the logic to rely on intermediate dependencies (like for instance types).
triggers.extend(self.get_type_triggers(typ.alias.target))
return triggers
def visit_any(self, typ: AnyType) -> list[str]:
if typ.missing_import_name is not None:
return [make_trigger(typ.missing_import_name)]
return []
def visit_none_type(self, typ: NoneType) -> list[str]:
return []
def visit_callable_type(self, typ: CallableType) -> list[str]:
triggers = []
for arg in typ.arg_types:
triggers.extend(self.get_type_triggers(arg))
triggers.extend(self.get_type_triggers(typ.ret_type))
# fallback is a metaclass type for class objects, and is
# processed separately.
return triggers
def visit_overloaded(self, typ: Overloaded) -> list[str]:
triggers = []
for item in typ.items:
triggers.extend(self.get_type_triggers(item))
return triggers
def visit_erased_type(self, t: ErasedType) -> list[str]:
# This type should exist only temporarily during type inference
assert False, "Should not see an erased type here"
def visit_deleted_type(self, typ: DeletedType) -> list[str]:
return []
def visit_partial_type(self, typ: PartialType) -> list[str]:
assert False, "Should not see a partial type here"
def visit_tuple_type(self, typ: TupleType) -> list[str]:
triggers = []
for item in typ.items:
triggers.extend(self.get_type_triggers(item))
triggers.extend(self.get_type_triggers(typ.partial_fallback))
return triggers
def visit_type_type(self, typ: TypeType) -> list[str]:
triggers = self.get_type_triggers(typ.item)
if not self.use_logical_deps:
old_triggers = triggers.copy()
for trigger in old_triggers:
triggers.append(trigger.rstrip(">") + ".__init__>")
triggers.append(trigger.rstrip(">") + ".__new__>")
return triggers
def visit_type_var(self, typ: TypeVarType) -> list[str]:
triggers = []
if typ.fullname:
triggers.append(make_trigger(typ.fullname))
if typ.upper_bound:
triggers.extend(self.get_type_triggers(typ.upper_bound))
if typ.default:
triggers.extend(self.get_type_triggers(typ.default))
for val in typ.values:
triggers.extend(self.get_type_triggers(val))
return triggers
def visit_param_spec(self, typ: ParamSpecType) -> list[str]:
triggers = []
if typ.fullname:
triggers.append(make_trigger(typ.fullname))
if typ.upper_bound:
triggers.extend(self.get_type_triggers(typ.upper_bound))
if typ.default:
triggers.extend(self.get_type_triggers(typ.default))
triggers.extend(self.get_type_triggers(typ.upper_bound))
return triggers
def visit_type_var_tuple(self, typ: TypeVarTupleType) -> list[str]:
triggers = []
if typ.fullname:
triggers.append(make_trigger(typ.fullname))
if typ.upper_bound:
triggers.extend(self.get_type_triggers(typ.upper_bound))
if typ.default:
triggers.extend(self.get_type_triggers(typ.default))
triggers.extend(self.get_type_triggers(typ.upper_bound))
return triggers
def visit_unpack_type(self, typ: UnpackType) -> list[str]:
return typ.type.accept(self)
def visit_parameters(self, typ: Parameters) -> list[str]:
triggers = []
for arg in typ.arg_types:
triggers.extend(self.get_type_triggers(arg))
return triggers
def visit_typeddict_type(self, typ: TypedDictType) -> list[str]:
triggers = []
for item in typ.items.values():
triggers.extend(self.get_type_triggers(item))
triggers.extend(self.get_type_triggers(typ.fallback))
return triggers
def visit_literal_type(self, typ: LiteralType) -> list[str]:
return self.get_type_triggers(typ.fallback)
def visit_unbound_type(self, typ: UnboundType) -> list[str]:
return []
def visit_uninhabited_type(self, typ: UninhabitedType) -> list[str]:
return []
def visit_union_type(self, typ: UnionType) -> list[str]:
triggers = []
for item in typ.items:
triggers.extend(self.get_type_triggers(item))
return triggers
def merge_dependencies(new_deps: dict[str, set[str]], deps: dict[str, set[str]]) -> None:
for trigger, targets in new_deps.items():
deps.setdefault(trigger, set()).update(targets)
def non_trivial_bases(info: TypeInfo) -> list[TypeInfo]:
return [base for base in info.mro[1:] if base.fullname != "builtins.object"]
def has_user_bases(info: TypeInfo) -> bool:
return any(base.module_name not in ("builtins", "typing", "enum") for base in info.mro[1:])
def dump_all_dependencies(
modules: dict[str, MypyFile],
type_map: dict[Expression, Type],
python_version: tuple[int, int],
options: Options,
) -> None:
"""Generate dependencies for all interesting modules and print them to stdout."""
all_deps: dict[str, set[str]] = {}
for id, node in modules.items():
# Uncomment for debugging:
# print('processing', id)
if id in ("builtins", "typing") or "/typeshed/" in node.path:
continue
assert id == node.fullname
deps = get_dependencies(node, type_map, python_version, options)
for trigger, targets in deps.items():
all_deps.setdefault(trigger, set()).update(targets)
type_state.add_all_protocol_deps(all_deps)
for trigger, targets in sorted(all_deps.items(), key=lambda x: x[0]):
print(trigger)
for target in sorted(targets):
print(f" {target}")
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/server/deps.py
|
Python
|
NOASSERTION
| 49,736 |
"""Check for duplicate AST nodes after merge."""
from __future__ import annotations
from typing import Final
from mypy.nodes import Decorator, FakeInfo, FuncDef, SymbolNode, Var
from mypy.server.objgraph import get_path, get_reachable_graph
# If True, print more verbose output on failure.
DUMP_MISMATCH_NODES: Final = False
def check_consistency(o: object) -> None:
"""Fail if there are two AST nodes with the same fullname reachable from 'o'.
Raise AssertionError on failure and print some debugging output.
"""
seen, parents = get_reachable_graph(o)
reachable = list(seen.values())
syms = [x for x in reachable if isinstance(x, SymbolNode)]
m: dict[str, SymbolNode] = {}
for sym in syms:
if isinstance(sym, FakeInfo):
continue
fn = sym.fullname
# Skip None names, since they are ambiguous.
# TODO: Everything should have a proper full name?
if fn is None:
continue
# Skip stuff that should be expected to have duplicate names
if isinstance(sym, (Var, Decorator)):
continue
if isinstance(sym, FuncDef) and sym.is_overload:
continue
if fn not in m:
m[sym.fullname] = sym
continue
# We have trouble and need to decide what to do about it.
sym1, sym2 = sym, m[fn]
# If the type changed, then it shouldn't have been merged.
if type(sym1) is not type(sym2):
continue
path1 = get_path(sym1, seen, parents)
path2 = get_path(sym2, seen, parents)
if fn in m:
print(f"\nDuplicate {type(sym).__name__!r} nodes with fullname {fn!r} found:")
print("[1] %d: %s" % (id(sym1), path_to_str(path1)))
print("[2] %d: %s" % (id(sym2), path_to_str(path2)))
if DUMP_MISMATCH_NODES and fn in m:
# Add verbose output with full AST node contents.
print("---")
print(id(sym1), sym1)
print("---")
print(id(sym2), sym2)
assert sym.fullname not in m
def path_to_str(path: list[tuple[object, object]]) -> str:
result = "<root>"
for attr, obj in path:
t = type(obj).__name__
if t in ("dict", "tuple", "SymbolTable", "list"):
result += f"[{repr(attr)}]"
else:
if isinstance(obj, Var):
result += f".{attr}({t}:{obj.name})"
elif t in ("BuildManager", "FineGrainedBuildManager"):
# Omit class name for some classes that aren't part of a class
# hierarchy since there isn't much ambiguity.
result += f".{attr}"
else:
result += f".{attr}({t})"
return result
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/server/mergecheck.py
|
Python
|
NOASSERTION
| 2,760 |
"""Find all objects reachable from a root object."""
from __future__ import annotations
import types
import weakref
from collections.abc import Iterable
from typing import Final, Iterator, Mapping
method_descriptor_type: Final = type(object.__dir__)
method_wrapper_type: Final = type(object().__ne__)
wrapper_descriptor_type: Final = type(object.__ne__)
FUNCTION_TYPES: Final = (
types.BuiltinFunctionType,
types.FunctionType,
types.MethodType,
method_descriptor_type,
wrapper_descriptor_type,
method_wrapper_type,
)
ATTR_BLACKLIST: Final = {"__doc__", "__name__", "__class__", "__dict__"}
# Instances of these types can't have references to other objects
ATOMIC_TYPE_BLACKLIST: Final = {bool, int, float, str, type(None), object}
# Don't look at most attributes of these types
COLLECTION_TYPE_BLACKLIST: Final = {list, set, dict, tuple}
# Don't return these objects
TYPE_BLACKLIST: Final = {weakref.ReferenceType}
def isproperty(o: object, attr: str) -> bool:
return isinstance(getattr(type(o), attr, None), property)
def get_edge_candidates(o: object) -> Iterator[tuple[object, object]]:
# use getattr because mypyc expects dict, not mappingproxy
if "__getattribute__" in getattr(type(o), "__dict__"): # noqa: B009
return
if type(o) not in COLLECTION_TYPE_BLACKLIST:
for attr in dir(o):
try:
if attr not in ATTR_BLACKLIST and hasattr(o, attr) and not isproperty(o, attr):
e = getattr(o, attr)
if type(e) not in ATOMIC_TYPE_BLACKLIST:
yield attr, e
except AssertionError:
pass
if isinstance(o, Mapping):
yield from o.items()
elif isinstance(o, Iterable) and not isinstance(o, str):
for i, e in enumerate(o):
yield i, e
def get_edges(o: object) -> Iterator[tuple[object, object]]:
for s, e in get_edge_candidates(o):
if isinstance(e, FUNCTION_TYPES):
# We don't want to collect methods, but do want to collect values
# in closures and self pointers to other objects
if hasattr(e, "__closure__"):
yield (s, "__closure__"), e.__closure__
if hasattr(e, "__self__"):
se = e.__self__
if se is not o and se is not type(o) and hasattr(s, "__self__"):
yield s.__self__, se
else:
if type(e) not in TYPE_BLACKLIST:
yield s, e
def get_reachable_graph(root: object) -> tuple[dict[int, object], dict[int, tuple[int, object]]]:
parents = {}
seen = {id(root): root}
worklist = [root]
while worklist:
o = worklist.pop()
for s, e in get_edges(o):
if id(e) in seen:
continue
parents[id(e)] = (id(o), s)
seen[id(e)] = e
worklist.append(e)
return seen, parents
def get_path(
o: object, seen: dict[int, object], parents: dict[int, tuple[int, object]]
) -> list[tuple[object, object]]:
path = []
while id(o) in parents:
pid, attr = parents[id(o)]
o = seen[pid]
path.append((attr, o))
path.reverse()
return path
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/server/objgraph.py
|
Python
|
NOASSERTION
| 3,230 |
"""Find all subexpressions of an AST node."""
from __future__ import annotations
from mypy.nodes import (
AssertTypeExpr,
AssignmentExpr,
AwaitExpr,
CallExpr,
CastExpr,
ComparisonExpr,
ConditionalExpr,
DictExpr,
DictionaryComprehension,
Expression,
GeneratorExpr,
IndexExpr,
LambdaExpr,
ListComprehension,
ListExpr,
MemberExpr,
Node,
OpExpr,
RevealExpr,
SetComprehension,
SetExpr,
SliceExpr,
StarExpr,
TupleExpr,
TypeApplication,
UnaryExpr,
YieldExpr,
YieldFromExpr,
)
from mypy.traverser import TraverserVisitor
def get_subexpressions(node: Node) -> list[Expression]:
visitor = SubexpressionFinder()
node.accept(visitor)
return visitor.expressions
class SubexpressionFinder(TraverserVisitor):
def __init__(self) -> None:
self.expressions: list[Expression] = []
def visit_int_expr(self, o: Expression) -> None:
self.add(o)
def visit_name_expr(self, o: Expression) -> None:
self.add(o)
def visit_float_expr(self, o: Expression) -> None:
self.add(o)
def visit_str_expr(self, o: Expression) -> None:
self.add(o)
def visit_bytes_expr(self, o: Expression) -> None:
self.add(o)
def visit_unicode_expr(self, o: Expression) -> None:
self.add(o)
def visit_complex_expr(self, o: Expression) -> None:
self.add(o)
def visit_ellipsis(self, o: Expression) -> None:
self.add(o)
def visit_super_expr(self, o: Expression) -> None:
self.add(o)
def visit_type_var_expr(self, o: Expression) -> None:
self.add(o)
def visit_type_alias_expr(self, o: Expression) -> None:
self.add(o)
def visit_namedtuple_expr(self, o: Expression) -> None:
self.add(o)
def visit_typeddict_expr(self, o: Expression) -> None:
self.add(o)
def visit__promote_expr(self, o: Expression) -> None:
self.add(o)
def visit_newtype_expr(self, o: Expression) -> None:
self.add(o)
def visit_member_expr(self, e: MemberExpr) -> None:
self.add(e)
super().visit_member_expr(e)
def visit_yield_from_expr(self, e: YieldFromExpr) -> None:
self.add(e)
super().visit_yield_from_expr(e)
def visit_yield_expr(self, e: YieldExpr) -> None:
self.add(e)
super().visit_yield_expr(e)
def visit_call_expr(self, e: CallExpr) -> None:
self.add(e)
super().visit_call_expr(e)
def visit_op_expr(self, e: OpExpr) -> None:
self.add(e)
super().visit_op_expr(e)
def visit_comparison_expr(self, e: ComparisonExpr) -> None:
self.add(e)
super().visit_comparison_expr(e)
def visit_slice_expr(self, e: SliceExpr) -> None:
self.add(e)
super().visit_slice_expr(e)
def visit_cast_expr(self, e: CastExpr) -> None:
self.add(e)
super().visit_cast_expr(e)
def visit_assert_type_expr(self, e: AssertTypeExpr) -> None:
self.add(e)
super().visit_assert_type_expr(e)
def visit_reveal_expr(self, e: RevealExpr) -> None:
self.add(e)
super().visit_reveal_expr(e)
def visit_assignment_expr(self, e: AssignmentExpr) -> None:
self.add(e)
super().visit_assignment_expr(e)
def visit_unary_expr(self, e: UnaryExpr) -> None:
self.add(e)
super().visit_unary_expr(e)
def visit_list_expr(self, e: ListExpr) -> None:
self.add(e)
super().visit_list_expr(e)
def visit_tuple_expr(self, e: TupleExpr) -> None:
self.add(e)
super().visit_tuple_expr(e)
def visit_dict_expr(self, e: DictExpr) -> None:
self.add(e)
super().visit_dict_expr(e)
def visit_set_expr(self, e: SetExpr) -> None:
self.add(e)
super().visit_set_expr(e)
def visit_index_expr(self, e: IndexExpr) -> None:
self.add(e)
super().visit_index_expr(e)
def visit_generator_expr(self, e: GeneratorExpr) -> None:
self.add(e)
super().visit_generator_expr(e)
def visit_dictionary_comprehension(self, e: DictionaryComprehension) -> None:
self.add(e)
super().visit_dictionary_comprehension(e)
def visit_list_comprehension(self, e: ListComprehension) -> None:
self.add(e)
super().visit_list_comprehension(e)
def visit_set_comprehension(self, e: SetComprehension) -> None:
self.add(e)
super().visit_set_comprehension(e)
def visit_conditional_expr(self, e: ConditionalExpr) -> None:
self.add(e)
super().visit_conditional_expr(e)
def visit_type_application(self, e: TypeApplication) -> None:
self.add(e)
super().visit_type_application(e)
def visit_lambda_expr(self, e: LambdaExpr) -> None:
self.add(e)
super().visit_lambda_expr(e)
def visit_star_expr(self, e: StarExpr) -> None:
self.add(e)
super().visit_star_expr(e)
def visit_await_expr(self, e: AwaitExpr) -> None:
self.add(e)
super().visit_await_expr(e)
def add(self, e: Expression) -> None:
self.expressions.append(e)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/server/subexpr.py
|
Python
|
NOASSERTION
| 5,202 |
from __future__ import annotations
def trigger_to_target(s: str) -> str:
assert s[0] == "<"
# Strip off the angle brackets
s = s[1:-1]
# If there is a [wildcard] or similar, strip that off too
if s[-1] == "]":
s = s.split("[")[0]
return s
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/server/target.py
|
Python
|
NOASSERTION
| 273 |
"""AST triggers that are used for fine-grained dependency handling."""
from __future__ import annotations
from typing import Final
# Used as a suffix for triggers to handle "from m import *" dependencies (see also
# make_wildcard_trigger)
WILDCARD_TAG: Final = "[wildcard]"
def make_trigger(name: str) -> str:
return f"<{name}>"
def make_wildcard_trigger(module: str) -> str:
"""Special trigger fired when any top-level name is changed in a module.
Note that this is different from a module trigger, as module triggers are only
fired if the module is created, deleted, or replaced with a non-module, whereas
a wildcard trigger is triggered for namespace changes.
This is used for "from m import *" dependencies.
"""
return f"<{module}{WILDCARD_TAG}>"
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/server/trigger.py
|
Python
|
NOASSERTION
| 793 |
"""Update build by processing changes using fine-grained dependencies.
Use fine-grained dependencies to update targets in other modules that
may be affected by externally-visible changes in the changed modules.
This forms the core of the fine-grained incremental daemon mode. This
module is not used at all by the 'classic' (non-daemon) incremental
mode.
Here is some motivation for this mode:
* By keeping program state in memory between incremental runs, we
only have to process changed modules, not their dependencies. The
classic incremental mode has to deserialize the symbol tables of
all dependencies of changed modules, which can be slow for large
programs.
* Fine-grained dependencies allow processing only the relevant parts
of modules indirectly affected by a change. Say, if only one function
in a large module is affected by a change in another module, only this
function is processed. The classic incremental mode always processes
an entire file as a unit, which is typically much slower.
* It's possible to independently process individual modules within an
import cycle (SCC). Small incremental changes can be fast independent
of the size of the related SCC. In classic incremental mode, any change
within a SCC requires the entire SCC to be processed, which can slow
things down considerably.
Some terms:
* A *target* is a function/method definition or the top level of a module.
We refer to targets using their fully qualified name (e.g.
'mod.Cls.method'). Targets are the smallest units of processing during
fine-grained incremental checking.
* A *trigger* represents the properties of a part of a program, and it
gets triggered/fired when these properties change. For example,
'<mod.func>' refers to a module-level function. It gets triggered if
the signature of the function changes, or if the function is removed,
for example.
Some program state is maintained across multiple build increments in
memory:
* The full ASTs of all modules are stored in memory all the time (this
includes the type map).
* A fine-grained dependency map is maintained, which maps triggers to
affected program locations (these can be targets, triggers, or
classes). The latter determine what other parts of a program need to
be processed again due to a fired trigger.
Here's a summary of how a fine-grained incremental program update happens:
* Determine which modules have changes in their source code since the
previous update.
* Process changed modules one at a time. Perform a separate full update
for each changed module, but only report the errors after all modules
have been processed, since the intermediate states can generate bogus
errors due to only seeing a partial set of changes.
* Each changed module is processed in full. We parse the module, and
run semantic analysis to create a new AST and symbol table for the
module. Reuse the existing ASTs and symbol tables of modules that
have no changes in their source code. At the end of this stage, we have
two ASTs and symbol tables for the changed module (the old and the new
versions). The latter AST has not yet been type checked.
* Take a snapshot of the old symbol table. This is used later to determine
which properties of the module have changed and which triggers to fire.
* Merge the old AST with the new AST, preserving the identities of
externally visible AST nodes for which we can find a corresponding node
in the new AST. (Look at mypy.server.astmerge for the details.) This
way all external references to AST nodes in the changed module will
continue to point to the right nodes (assuming they still have a valid
target).
* Type check the new module.
* Take another snapshot of the symbol table of the changed module.
Look at the differences between the old and new snapshots to determine
which parts of the changed modules have changed. The result is a set of
fired triggers.
* Using the dependency map and the fired triggers, decide which other
targets have become stale and need to be reprocessed.
* Create new fine-grained dependencies for the changed module. We don't
garbage collect old dependencies, since extra dependencies are relatively
harmless (they take some memory and can theoretically slow things down
a bit by causing redundant work). This is implemented in
mypy.server.deps.
* Strip the stale AST nodes that we found above. This returns them to a
state resembling the end of semantic analysis pass 1. We'll run semantic
analysis again on the existing AST nodes, and since semantic analysis
is not idempotent, we need to revert some changes made during semantic
analysis. This is implemented in mypy.server.aststrip.
* Run semantic analyzer passes 2 and 3 on the stale AST nodes, and type
check them. We also need to do the symbol table snapshot comparison
dance to find any changes, and we need to merge ASTs to preserve AST node
identities.
* If some triggers haven been fired, continue processing and repeat the
previous steps until no triggers are fired.
This is module is tested using end-to-end fine-grained incremental mode
test cases (test-data/unit/fine-grained*.test).
"""
from __future__ import annotations
import os
import re
import sys
import time
from typing import Callable, Final, NamedTuple, Sequence, Union
from typing_extensions import TypeAlias as _TypeAlias
from mypy.build import (
DEBUG_FINE_GRAINED,
FAKE_ROOT_MODULE,
BuildManager,
BuildResult,
Graph,
State,
load_graph,
process_fresh_modules,
)
from mypy.checker import FineGrainedDeferredNode
from mypy.errors import CompileError
from mypy.fscache import FileSystemCache
from mypy.modulefinder import BuildSource
from mypy.nodes import (
Decorator,
FuncDef,
ImportFrom,
MypyFile,
OverloadedFuncDef,
SymbolNode,
SymbolTable,
TypeInfo,
)
from mypy.options import Options
from mypy.semanal_main import (
core_modules,
semantic_analysis_for_scc,
semantic_analysis_for_targets,
)
from mypy.server.astdiff import (
SymbolSnapshot,
compare_symbol_table_snapshots,
snapshot_symbol_table,
)
from mypy.server.astmerge import merge_asts
from mypy.server.aststrip import SavedAttributes, strip_target
from mypy.server.deps import get_dependencies_of_target, merge_dependencies
from mypy.server.target import trigger_to_target
from mypy.server.trigger import WILDCARD_TAG, make_trigger
from mypy.typestate import type_state
from mypy.util import module_prefix, split_target
MAX_ITER: Final = 1000
SENSITIVE_INTERNAL_MODULES = tuple(core_modules) + ("mypy_extensions", "typing_extensions")
class FineGrainedBuildManager:
def __init__(self, result: BuildResult) -> None:
"""Initialize fine-grained build based on a batch build.
Args:
result: Result from the initialized build.
The manager and graph will be taken over by this class.
manager: State of the build (mutated by this class)
graph: Additional state of the build (mutated by this class)
"""
manager = result.manager
self.manager = manager
self.graph = result.graph
self.previous_modules = get_module_to_path_map(self.graph)
self.deps = manager.fg_deps
# Merge in any root dependencies that may not have been loaded
merge_dependencies(manager.load_fine_grained_deps(FAKE_ROOT_MODULE), self.deps)
self.previous_targets_with_errors = manager.errors.targets()
self.previous_messages: list[str] = result.errors.copy()
# Module, if any, that had blocking errors in the last run as (id, path) tuple.
self.blocking_error: tuple[str, str] | None = None
# Module that we haven't processed yet but that are known to be stale.
self.stale: list[tuple[str, str]] = []
# Disable the cache so that load_graph doesn't try going back to disk
# for the cache.
self.manager.cache_enabled = False
# Some hints to the test suite about what is going on:
# Active triggers during the last update
self.triggered: list[str] = []
# Modules passed to update during the last update
self.changed_modules: list[tuple[str, str]] = []
# Modules processed during the last update
self.updated_modules: list[str] = []
# Targets processed during last update (for testing only).
self.processed_targets: list[str] = []
def update(
self,
changed_modules: list[tuple[str, str]],
removed_modules: list[tuple[str, str]],
followed: bool = False,
) -> list[str]:
"""Update previous build result by processing changed modules.
Also propagate changes to other modules as needed, but only process
those parts of other modules that are affected by the changes. Retain
the existing ASTs and symbol tables of unaffected modules.
Reuses original BuildManager and Graph.
Args:
changed_modules: Modules changed since the previous update/build; each is
a (module id, path) tuple. Includes modified and added modules.
Assume this is correct; it's not validated here.
removed_modules: Modules that have been deleted since the previous update
or removed from the build.
followed: If True, the modules were found through following imports
Returns:
A list of errors.
"""
self.processed_targets.clear()
changed_modules = changed_modules + removed_modules
removed_set = {module for module, _ in removed_modules}
self.changed_modules = changed_modules
if not changed_modules:
return self.previous_messages
# Reset find_module's caches for the new build.
self.manager.find_module_cache.clear()
self.triggered = []
self.updated_modules = []
changed_modules = dedupe_modules(changed_modules + self.stale)
initial_set = {id for id, _ in changed_modules}
self.manager.log_fine_grained(
"==== update %s ====" % ", ".join(repr(id) for id, _ in changed_modules)
)
if self.previous_targets_with_errors and is_verbose(self.manager):
self.manager.log_fine_grained(
"previous targets with errors: %s" % sorted(self.previous_targets_with_errors)
)
blocking_error = None
if self.blocking_error:
# Handle blocking errors first. We'll exit as soon as we find a
# module that still has blocking errors.
self.manager.log_fine_grained(f"existing blocker: {self.blocking_error[0]}")
changed_modules = dedupe_modules([self.blocking_error] + changed_modules)
blocking_error = self.blocking_error[0]
self.blocking_error = None
while True:
result = self.update_one(
changed_modules, initial_set, removed_set, blocking_error, followed
)
changed_modules, (next_id, next_path), blocker_messages = result
if blocker_messages is not None:
self.blocking_error = (next_id, next_path)
self.stale = changed_modules
messages = blocker_messages
break
# It looks like we are done processing everything, so now
# reprocess all targets with errors. We are careful to
# support the possibility that reprocessing an errored module
# might trigger loading of a module, but I am not sure
# if this can really happen.
if not changed_modules:
# N.B: We just checked next_id, so manager.errors contains
# the errors from it. Thus we consider next_id up to date
# when propagating changes from the errored targets,
# which prevents us from reprocessing errors in it.
changed_modules = propagate_changes_using_dependencies(
self.manager,
self.graph,
self.deps,
set(),
{next_id},
self.previous_targets_with_errors,
self.processed_targets,
)
changed_modules = dedupe_modules(changed_modules)
if not changed_modules:
# Preserve state needed for the next update.
self.previous_targets_with_errors = self.manager.errors.targets()
messages = self.manager.errors.new_messages()
break
messages = sort_messages_preserving_file_order(messages, self.previous_messages)
self.previous_messages = messages.copy()
return messages
def trigger(self, target: str) -> list[str]:
"""Trigger a specific target explicitly.
This is intended for use by the suggestions engine.
"""
self.manager.errors.reset()
changed_modules = propagate_changes_using_dependencies(
self.manager,
self.graph,
self.deps,
set(),
set(),
self.previous_targets_with_errors | {target},
[],
)
# Preserve state needed for the next update.
self.previous_targets_with_errors = self.manager.errors.targets()
self.previous_messages = self.manager.errors.new_messages().copy()
return self.update(changed_modules, [])
def flush_cache(self) -> None:
"""Flush AST cache.
This needs to be called after each increment, or file changes won't
be detected reliably.
"""
self.manager.ast_cache.clear()
def update_one(
self,
changed_modules: list[tuple[str, str]],
initial_set: set[str],
removed_set: set[str],
blocking_error: str | None,
followed: bool,
) -> tuple[list[tuple[str, str]], tuple[str, str], list[str] | None]:
"""Process a module from the list of changed modules.
Returns:
Tuple with these items:
- Updated list of pending changed modules as (module id, path) tuples
- Module which was actually processed as (id, path) tuple
- If there was a blocking error, the error messages from it
"""
t0 = time.time()
next_id, next_path = changed_modules.pop(0)
# If we have a module with a blocking error that is no longer
# in the import graph, we must skip it as otherwise we'll be
# stuck with the blocking error.
if (
next_id == blocking_error
and next_id not in self.previous_modules
and next_id not in initial_set
):
self.manager.log_fine_grained(
f"skip {next_id!r} (module with blocking error not in import graph)"
)
return changed_modules, (next_id, next_path), None
result = self.update_module(next_id, next_path, next_id in removed_set, followed)
remaining, (next_id, next_path), blocker_messages = result
changed_modules = [(id, path) for id, path in changed_modules if id != next_id]
changed_modules = dedupe_modules(remaining + changed_modules)
t1 = time.time()
self.manager.log_fine_grained(
f"update once: {next_id} in {t1 - t0:.3f}s - {len(changed_modules)} left"
)
return changed_modules, (next_id, next_path), blocker_messages
def update_module(
self, module: str, path: str, force_removed: bool, followed: bool
) -> tuple[list[tuple[str, str]], tuple[str, str], list[str] | None]:
"""Update a single modified module.
If the module contains imports of previously unseen modules, only process one of
the new modules and return the remaining work to be done.
Args:
module: Id of the module
path: File system path of the module
force_removed: If True, consider module removed from the build even if path
exists (used for removing an existing file from the build)
followed: Was this found via import following?
Returns:
Tuple with these items:
- Remaining modules to process as (module id, path) tuples
- Module which was actually processed as (id, path) tuple
- If there was a blocking error, the error messages from it
"""
self.manager.log_fine_grained(f"--- update single {module!r} ---")
self.updated_modules.append(module)
# builtins and friends could potentially get triggered because
# of protocol stuff, but nothing good could possibly come from
# actually updating them.
if module in SENSITIVE_INTERNAL_MODULES:
return [], (module, path), None
manager = self.manager
previous_modules = self.previous_modules
graph = self.graph
ensure_deps_loaded(module, self.deps, graph)
# If this is an already existing module, make sure that we have
# its tree loaded so that we can snapshot it for comparison.
ensure_trees_loaded(manager, graph, [module])
t0 = time.time()
# Record symbol table snapshot of old version the changed module.
old_snapshots: dict[str, dict[str, SymbolSnapshot]] = {}
if module in manager.modules:
snapshot = snapshot_symbol_table(module, manager.modules[module].names)
old_snapshots[module] = snapshot
manager.errors.reset()
self.processed_targets.append(module)
result = update_module_isolated(
module, path, manager, previous_modules, graph, force_removed, followed
)
if isinstance(result, BlockedUpdate):
# Blocking error -- just give up
module, path, remaining, errors = result
self.previous_modules = get_module_to_path_map(graph)
return remaining, (module, path), errors
assert isinstance(result, NormalUpdate) # Work around #4124
module, path, remaining, tree = result
# TODO: What to do with stale dependencies?
t1 = time.time()
triggered = calculate_active_triggers(manager, old_snapshots, {module: tree})
if is_verbose(self.manager):
filtered = [trigger for trigger in triggered if not trigger.endswith("__>")]
self.manager.log_fine_grained(f"triggered: {sorted(filtered)!r}")
self.triggered.extend(triggered | self.previous_targets_with_errors)
if module in graph:
graph[module].update_fine_grained_deps(self.deps)
graph[module].free_state()
remaining += propagate_changes_using_dependencies(
manager,
graph,
self.deps,
triggered,
{module},
targets_with_errors=set(),
processed_targets=self.processed_targets,
)
t2 = time.time()
manager.add_stats(update_isolated_time=t1 - t0, propagate_time=t2 - t1)
# Preserve state needed for the next update.
self.previous_targets_with_errors.update(manager.errors.targets())
self.previous_modules = get_module_to_path_map(graph)
return remaining, (module, path), None
def find_unloaded_deps(
manager: BuildManager, graph: dict[str, State], initial: Sequence[str]
) -> list[str]:
"""Find all the deps of the nodes in initial that haven't had their tree loaded.
The key invariant here is that if a module is loaded, so are all
of their dependencies. This means that when we encounter a loaded
module, we don't need to explore its dependencies. (This
invariant is slightly violated when dependencies are added, which
can be handled by calling find_unloaded_deps directly on the new
dependencies.)
"""
worklist = list(initial)
seen: set[str] = set()
unloaded = []
while worklist:
node = worklist.pop()
if node in seen or node not in graph:
continue
seen.add(node)
if node not in manager.modules:
ancestors = graph[node].ancestors or []
worklist.extend(graph[node].dependencies + ancestors)
unloaded.append(node)
return unloaded
def ensure_deps_loaded(module: str, deps: dict[str, set[str]], graph: dict[str, State]) -> None:
"""Ensure that the dependencies on a module are loaded.
Dependencies are loaded into the 'deps' dictionary.
This also requires loading dependencies from any parent modules,
since dependencies will get stored with parent modules when a module
doesn't exist.
"""
if module in graph and graph[module].fine_grained_deps_loaded:
return
parts = module.split(".")
for i in range(len(parts)):
base = ".".join(parts[: i + 1])
if base in graph and not graph[base].fine_grained_deps_loaded:
merge_dependencies(graph[base].load_fine_grained_deps(), deps)
graph[base].fine_grained_deps_loaded = True
def ensure_trees_loaded(
manager: BuildManager, graph: dict[str, State], initial: Sequence[str]
) -> None:
"""Ensure that the modules in initial and their deps have loaded trees."""
to_process = find_unloaded_deps(manager, graph, initial)
if to_process:
if is_verbose(manager):
manager.log_fine_grained(
"Calling process_fresh_modules on set of size {} ({})".format(
len(to_process), sorted(to_process)
)
)
process_fresh_modules(graph, to_process, manager)
# The result of update_module_isolated when no blockers, with these items:
#
# - Id of the changed module (can be different from the module argument)
# - Path of the changed module
# - New AST for the changed module (None if module was deleted)
# - Remaining changed modules that are not processed yet as (module id, path)
# tuples (non-empty if the original changed module imported other new
# modules)
class NormalUpdate(NamedTuple):
module: str
path: str
remaining: list[tuple[str, str]]
tree: MypyFile | None
# The result of update_module_isolated when there is a blocking error. Items
# are similar to NormalUpdate (but there are fewer).
class BlockedUpdate(NamedTuple):
module: str
path: str
remaining: list[tuple[str, str]]
messages: list[str]
UpdateResult: _TypeAlias = Union[NormalUpdate, BlockedUpdate]
def update_module_isolated(
module: str,
path: str,
manager: BuildManager,
previous_modules: dict[str, str],
graph: Graph,
force_removed: bool,
followed: bool,
) -> UpdateResult:
"""Build a new version of one changed module only.
Don't propagate changes to elsewhere in the program. Raise CompileError on
encountering a blocking error.
Args:
module: Changed module (modified, created or deleted)
path: Path of the changed module
manager: Build manager
graph: Build graph
force_removed: If True, consider the module removed from the build even it the
file exists
Returns a named tuple describing the result (see above for details).
"""
if module not in graph:
manager.log_fine_grained(f"new module {module!r}")
if not manager.fscache.isfile(path) or force_removed:
delete_module(module, path, graph, manager)
return NormalUpdate(module, path, [], None)
sources = get_sources(manager.fscache, previous_modules, [(module, path)], followed)
if module in manager.missing_modules:
manager.missing_modules.remove(module)
orig_module = module
orig_state = graph.get(module)
orig_tree = manager.modules.get(module)
def restore(ids: list[str]) -> None:
# For each of the modules in ids, restore that id's old
# manager.modules and graphs entries. (Except for the original
# module, this means deleting them.)
for id in ids:
if id == orig_module and orig_tree:
manager.modules[id] = orig_tree
elif id in manager.modules:
del manager.modules[id]
if id == orig_module and orig_state:
graph[id] = orig_state
elif id in graph:
del graph[id]
new_modules: list[State] = []
try:
if module in graph:
del graph[module]
load_graph(sources, manager, graph, new_modules)
except CompileError as err:
# Parse error somewhere in the program -- a blocker
assert err.module_with_blocker
restore([module] + [st.id for st in new_modules])
return BlockedUpdate(err.module_with_blocker, path, [], err.messages)
# Reparsing the file may have brought in dependencies that we
# didn't have before. Make sure that they are loaded to restore
# the invariant that a module having a loaded tree implies that
# its dependencies do as well.
ensure_trees_loaded(manager, graph, graph[module].dependencies)
# Find any other modules brought in by imports.
changed_modules = [(st.id, st.xpath) for st in new_modules]
# If there are multiple modules to process, only process one of them and return
# the remaining ones to the caller.
if len(changed_modules) > 1:
# As an optimization, look for a module that imports no other changed modules.
module, path = find_relative_leaf_module(changed_modules, graph)
changed_modules.remove((module, path))
remaining_modules = changed_modules
# The remaining modules haven't been processed yet so drop them.
restore([id for id, _ in remaining_modules])
manager.log_fine_grained(f"--> {module!r} (newly imported)")
else:
remaining_modules = []
state = graph[module]
# Process the changed file.
state.parse_file()
assert state.tree is not None, "file must be at least parsed"
t0 = time.time()
try:
semantic_analysis_for_scc(graph, [state.id], manager.errors)
except CompileError as err:
# There was a blocking error, so module AST is incomplete. Restore old modules.
restore([module])
return BlockedUpdate(module, path, remaining_modules, err.messages)
# Merge old and new ASTs.
new_modules_dict: dict[str, MypyFile | None] = {module: state.tree}
replace_modules_with_new_variants(manager, graph, {orig_module: orig_tree}, new_modules_dict)
t1 = time.time()
# Perform type checking.
state.type_checker().reset()
state.type_check_first_pass()
state.type_check_second_pass()
state.detect_possibly_undefined_vars()
t2 = time.time()
state.finish_passes()
t3 = time.time()
manager.add_stats(semanal_time=t1 - t0, typecheck_time=t2 - t1, finish_passes_time=t3 - t2)
graph[module] = state
return NormalUpdate(module, path, remaining_modules, state.tree)
def find_relative_leaf_module(modules: list[tuple[str, str]], graph: Graph) -> tuple[str, str]:
"""Find a module in a list that directly imports no other module in the list.
If no such module exists, return the lexicographically first module from the list.
Always return one of the items in the modules list.
NOTE: If both 'abc' and 'typing' have changed, an effect of the above rule is that
we prefer 'abc', even if both are in the same SCC. This works around a false
positive in 'typing', at least in tests.
Args:
modules: List of (module, path) tuples (non-empty)
graph: Program import graph that contains all modules in the module list
"""
assert modules
# Sort for repeatable results.
modules = sorted(modules)
module_set = {module for module, _ in modules}
for module, path in modules:
state = graph[module]
if len(set(state.dependencies) & module_set) == 0:
# Found it!
return module, path
# Could not find any. Just return the first module (by lexicographic order).
return modules[0]
def delete_module(module_id: str, path: str, graph: Graph, manager: BuildManager) -> None:
manager.log_fine_grained(f"delete module {module_id!r}")
# TODO: Remove deps for the module (this only affects memory use, not correctness)
if module_id in graph:
del graph[module_id]
if module_id in manager.modules:
del manager.modules[module_id]
components = module_id.split(".")
if len(components) > 1:
# Delete reference to module in parent module.
parent_id = ".".join(components[:-1])
# If parent module is ignored, it won't be included in the modules dictionary.
if parent_id in manager.modules:
parent = manager.modules[parent_id]
if components[-1] in parent.names:
del parent.names[components[-1]]
# If the module is removed from the build but still exists, then
# we mark it as missing so that it will get picked up by import from still.
if manager.fscache.isfile(path):
manager.missing_modules.add(module_id)
def dedupe_modules(modules: list[tuple[str, str]]) -> list[tuple[str, str]]:
seen: set[str] = set()
result = []
for id, path in modules:
if id not in seen:
seen.add(id)
result.append((id, path))
return result
def get_module_to_path_map(graph: Graph) -> dict[str, str]:
return {module: node.xpath for module, node in graph.items()}
def get_sources(
fscache: FileSystemCache,
modules: dict[str, str],
changed_modules: list[tuple[str, str]],
followed: bool,
) -> list[BuildSource]:
sources = []
for id, path in changed_modules:
if fscache.isfile(path):
sources.append(BuildSource(path, id, None, followed=followed))
return sources
def calculate_active_triggers(
manager: BuildManager,
old_snapshots: dict[str, dict[str, SymbolSnapshot]],
new_modules: dict[str, MypyFile | None],
) -> set[str]:
"""Determine activated triggers by comparing old and new symbol tables.
For example, if only the signature of function m.f is different in the new
symbol table, return {'<m.f>'}.
"""
names: set[str] = set()
for id in new_modules:
snapshot1 = old_snapshots.get(id)
if snapshot1 is None:
names.add(id)
snapshot1 = {}
new = new_modules[id]
if new is None:
snapshot2 = snapshot_symbol_table(id, SymbolTable())
names.add(id)
else:
snapshot2 = snapshot_symbol_table(id, new.names)
diff = compare_symbol_table_snapshots(id, snapshot1, snapshot2)
package_nesting_level = id.count(".")
for item in diff.copy():
if item.count(".") <= package_nesting_level + 1 and item.split(".")[-1] not in (
"__builtins__",
"__file__",
"__name__",
"__package__",
"__doc__",
):
# Activate catch-all wildcard trigger for top-level module changes (used for
# "from m import *"). This also gets triggered by changes to module-private
# entries, but as these unneeded dependencies only result in extra processing,
# it's a minor problem.
#
# TODO: Some __* names cause mistriggers. Fix the underlying issue instead of
# special casing them here.
diff.add(id + WILDCARD_TAG)
if item.count(".") > package_nesting_level + 1:
# These are for changes within classes, used by protocols.
diff.add(item.rsplit(".", 1)[0] + WILDCARD_TAG)
names |= diff
return {make_trigger(name) for name in names}
def replace_modules_with_new_variants(
manager: BuildManager,
graph: dict[str, State],
old_modules: dict[str, MypyFile | None],
new_modules: dict[str, MypyFile | None],
) -> None:
"""Replace modules with newly builds versions.
Retain the identities of externally visible AST nodes in the
old ASTs so that references to the affected modules from other
modules will still be valid (unless something was deleted or
replaced with an incompatible definition, in which case there
will be dangling references that will be handled by
propagate_changes_using_dependencies).
"""
for id in new_modules:
preserved_module = old_modules.get(id)
new_module = new_modules[id]
if preserved_module and new_module is not None:
merge_asts(preserved_module, preserved_module.names, new_module, new_module.names)
manager.modules[id] = preserved_module
graph[id].tree = preserved_module
def propagate_changes_using_dependencies(
manager: BuildManager,
graph: dict[str, State],
deps: dict[str, set[str]],
triggered: set[str],
up_to_date_modules: set[str],
targets_with_errors: set[str],
processed_targets: list[str],
) -> list[tuple[str, str]]:
"""Transitively rechecks targets based on triggers and the dependency map.
Returns a list (module id, path) tuples representing modules that contain
a target that needs to be reprocessed but that has not been parsed yet.
Processed targets should be appended to processed_targets (used in tests only,
to test the order of processing targets).
"""
num_iter = 0
remaining_modules: list[tuple[str, str]] = []
# Propagate changes until nothing visible has changed during the last
# iteration.
while triggered or targets_with_errors:
num_iter += 1
if num_iter > MAX_ITER:
raise RuntimeError("Max number of iterations (%d) reached (endless loop?)" % MAX_ITER)
todo, unloaded, stale_protos = find_targets_recursive(
manager, graph, triggered, deps, up_to_date_modules
)
# TODO: we sort to make it deterministic, but this is *incredibly* ad hoc
remaining_modules.extend((id, graph[id].xpath) for id in sorted(unloaded))
# Also process targets that used to have errors, as otherwise some
# errors might be lost.
for target in targets_with_errors:
id = module_prefix(graph, target)
if id is not None and id not in up_to_date_modules:
if id not in todo:
todo[id] = set()
manager.log_fine_grained(f"process target with error: {target}")
more_nodes, _ = lookup_target(manager, target)
todo[id].update(more_nodes)
triggered = set()
# First invalidate subtype caches in all stale protocols.
# We need to do this to avoid false negatives if the protocol itself is
# unchanged, but was marked stale because its sub- (or super-) type changed.
for info in stale_protos:
type_state.reset_subtype_caches_for(info)
# Then fully reprocess all targets.
# TODO: Preserve order (set is not optimal)
for id, nodes in sorted(todo.items(), key=lambda x: x[0]):
assert id not in up_to_date_modules
triggered |= reprocess_nodes(manager, graph, id, nodes, deps, processed_targets)
# Changes elsewhere may require us to reprocess modules that were
# previously considered up to date. For example, there may be a
# dependency loop that loops back to an originally processed module.
up_to_date_modules = set()
targets_with_errors = set()
if is_verbose(manager):
manager.log_fine_grained(f"triggered: {list(triggered)!r}")
return remaining_modules
def find_targets_recursive(
manager: BuildManager,
graph: Graph,
triggers: set[str],
deps: dict[str, set[str]],
up_to_date_modules: set[str],
) -> tuple[dict[str, set[FineGrainedDeferredNode]], set[str], set[TypeInfo]]:
"""Find names of all targets that need to reprocessed, given some triggers.
Returns: A tuple containing a:
* Dictionary from module id to a set of stale targets.
* A set of module ids for unparsed modules with stale targets.
"""
result: dict[str, set[FineGrainedDeferredNode]] = {}
worklist = triggers
processed: set[str] = set()
stale_protos: set[TypeInfo] = set()
unloaded_files: set[str] = set()
# Find AST nodes corresponding to each target.
#
# TODO: Don't rely on a set, since the items are in an unpredictable order.
while worklist:
processed |= worklist
current = worklist
worklist = set()
for target in current:
if target.startswith("<"):
module_id = module_prefix(graph, trigger_to_target(target))
if module_id:
ensure_deps_loaded(module_id, deps, graph)
worklist |= deps.get(target, set()) - processed
else:
module_id = module_prefix(graph, target)
if module_id is None:
# Deleted module.
continue
if module_id in up_to_date_modules:
# Already processed.
continue
if (
module_id not in manager.modules
or manager.modules[module_id].is_cache_skeleton
):
# We haven't actually parsed and checked the module, so we don't have
# access to the actual nodes.
# Add it to the queue of files that need to be processed fully.
unloaded_files.add(module_id)
continue
if module_id not in result:
result[module_id] = set()
manager.log_fine_grained(f"process: {target}")
deferred, stale_proto = lookup_target(manager, target)
if stale_proto:
stale_protos.add(stale_proto)
result[module_id].update(deferred)
return result, unloaded_files, stale_protos
def reprocess_nodes(
manager: BuildManager,
graph: dict[str, State],
module_id: str,
nodeset: set[FineGrainedDeferredNode],
deps: dict[str, set[str]],
processed_targets: list[str],
) -> set[str]:
"""Reprocess a set of nodes within a single module.
Return fired triggers.
"""
if module_id not in graph:
manager.log_fine_grained("%s not in graph (blocking errors or deleted?)" % module_id)
return set()
file_node = manager.modules[module_id]
old_symbols = find_symbol_tables_recursive(file_node.fullname, file_node.names)
old_symbols = {name: names.copy() for name, names in old_symbols.items()}
old_symbols_snapshot = snapshot_symbol_table(file_node.fullname, file_node.names)
def key(node: FineGrainedDeferredNode) -> int:
# Unlike modules which are sorted by name within SCC,
# nodes within the same module are sorted by line number, because
# this is how they are processed in normal mode.
return node.node.line
nodes = sorted(nodeset, key=key)
state = graph[module_id]
options = state.options
manager.errors.set_file_ignored_lines(
file_node.path, file_node.ignored_lines, options.ignore_errors or state.ignore_all
)
manager.errors.set_skipped_lines(file_node.path, file_node.skipped_lines)
targets = set()
for node in nodes:
target = target_from_node(module_id, node.node)
if target is not None:
targets.add(target)
manager.errors.clear_errors_in_targets(file_node.path, targets)
# If one of the nodes is the module itself, emit any errors that
# happened before semantic analysis.
for target in targets:
if target == module_id:
for info in graph[module_id].early_errors:
manager.errors.add_error_info(info)
# Strip semantic analysis information.
saved_attrs: SavedAttributes = {}
for deferred in nodes:
processed_targets.append(deferred.node.fullname)
strip_target(deferred.node, saved_attrs)
semantic_analysis_for_targets(graph[module_id], nodes, graph, saved_attrs)
# Merge symbol tables to preserve identities of AST nodes. The file node will remain
# the same, but other nodes may have been recreated with different identities, such as
# NamedTuples defined using assignment statements.
new_symbols = find_symbol_tables_recursive(file_node.fullname, file_node.names)
for name in old_symbols:
if name in new_symbols:
merge_asts(file_node, old_symbols[name], file_node, new_symbols[name])
# Type check.
checker = graph[module_id].type_checker()
checker.reset()
# We seem to need additional passes in fine-grained incremental mode.
checker.pass_num = 0
checker.last_pass = 3
more = checker.check_second_pass(nodes)
while more:
more = False
if graph[module_id].type_checker().check_second_pass():
more = True
if manager.options.export_types:
manager.all_types.update(graph[module_id].type_map())
new_symbols_snapshot = snapshot_symbol_table(file_node.fullname, file_node.names)
# Check if any attribute types were changed and need to be propagated further.
changed = compare_symbol_table_snapshots(
file_node.fullname, old_symbols_snapshot, new_symbols_snapshot
)
new_triggered = {make_trigger(name) for name in changed}
# Dependencies may have changed.
update_deps(module_id, nodes, graph, deps, options)
# Report missing imports.
graph[module_id].verify_dependencies()
graph[module_id].free_state()
return new_triggered
def find_symbol_tables_recursive(prefix: str, symbols: SymbolTable) -> dict[str, SymbolTable]:
"""Find all nested symbol tables.
Args:
prefix: Full name prefix (used for return value keys and to filter result so that
cross references to other modules aren't included)
symbols: Root symbol table
Returns a dictionary from full name to corresponding symbol table.
"""
result = {}
result[prefix] = symbols
for name, node in symbols.items():
if isinstance(node.node, TypeInfo) and node.node.fullname.startswith(prefix + "."):
more = find_symbol_tables_recursive(prefix + "." + name, node.node.names)
result.update(more)
return result
def update_deps(
module_id: str,
nodes: list[FineGrainedDeferredNode],
graph: dict[str, State],
deps: dict[str, set[str]],
options: Options,
) -> None:
for deferred in nodes:
node = deferred.node
type_map = graph[module_id].type_map()
tree = graph[module_id].tree
assert tree is not None, "Tree must be processed at this stage"
new_deps = get_dependencies_of_target(
module_id, tree, node, type_map, options.python_version
)
for trigger, targets in new_deps.items():
deps.setdefault(trigger, set()).update(targets)
# Merge also the newly added protocol deps (if any).
type_state.update_protocol_deps(deps)
def lookup_target(
manager: BuildManager, target: str
) -> tuple[list[FineGrainedDeferredNode], TypeInfo | None]:
"""Look up a target by fully-qualified name.
The first item in the return tuple is a list of deferred nodes that
needs to be reprocessed. If the target represents a TypeInfo corresponding
to a protocol, return it as a second item in the return tuple, otherwise None.
"""
def not_found() -> None:
manager.log_fine_grained(f"Can't find matching target for {target} (stale dependency?)")
modules = manager.modules
items = split_target(modules, target)
if items is None:
not_found() # Stale dependency
return [], None
module, rest = items
if rest:
components = rest.split(".")
else:
components = []
node: SymbolNode | None = modules[module]
file: MypyFile | None = None
active_class = None
for c in components:
if isinstance(node, TypeInfo):
active_class = node
if isinstance(node, MypyFile):
file = node
if not isinstance(node, (MypyFile, TypeInfo)) or c not in node.names:
not_found() # Stale dependency
return [], None
# Don't reprocess plugin generated targets. They should get
# stripped and regenerated when the containing target is
# reprocessed.
if node.names[c].plugin_generated:
return [], None
node = node.names[c].node
if isinstance(node, TypeInfo):
# A ClassDef target covers the body of the class and everything defined
# within it. To get the body we include the entire surrounding target,
# typically a module top-level, since we don't support processing class
# bodies as separate entities for simplicity.
assert file is not None
if node.fullname != target:
# This is a reference to a different TypeInfo, likely due to a stale dependency.
# Processing them would spell trouble -- for example, we could be refreshing
# a deserialized TypeInfo with missing attributes.
not_found()
return [], None
result = [FineGrainedDeferredNode(file, None)]
stale_info: TypeInfo | None = None
if node.is_protocol:
stale_info = node
for name, symnode in node.names.items():
node = symnode.node
if isinstance(node, FuncDef):
method, _ = lookup_target(manager, target + "." + name)
result.extend(method)
return result, stale_info
if isinstance(node, Decorator):
# Decorator targets actually refer to the function definition only.
node = node.func
if not isinstance(node, (FuncDef, MypyFile, OverloadedFuncDef)):
# The target can't be refreshed. It's possible that the target was
# changed to another type and we have a stale dependency pointing to it.
not_found()
return [], None
if node.fullname != target:
# Stale reference points to something unexpected. We shouldn't process since the
# context will be wrong and it could be a partially initialized deserialized node.
not_found()
return [], None
return [FineGrainedDeferredNode(node, active_class)], None
def is_verbose(manager: BuildManager) -> bool:
return manager.options.verbosity >= 1 or DEBUG_FINE_GRAINED
def target_from_node(module: str, node: FuncDef | MypyFile | OverloadedFuncDef) -> str | None:
"""Return the target name corresponding to a deferred node.
Args:
module: Must be module id of the module that defines 'node'
Returns the target name, or None if the node is not a valid target in the given
module (for example, if it's actually defined in another module).
"""
if isinstance(node, MypyFile):
if module != node.fullname:
# Actually a reference to another module -- likely a stale dependency.
return None
return module
else: # OverloadedFuncDef or FuncDef
if node.info:
return f"{node.info.fullname}.{node.name}"
else:
return f"{module}.{node.name}"
if sys.platform != "win32":
INIT_SUFFIXES: Final = ("/__init__.py", "/__init__.pyi")
else:
INIT_SUFFIXES: Final = (
os.sep + "__init__.py",
os.sep + "__init__.pyi",
os.altsep + "__init__.py",
os.altsep + "__init__.pyi",
)
def refresh_suppressed_submodules(
module: str,
path: str | None,
deps: dict[str, set[str]],
graph: Graph,
fscache: FileSystemCache,
refresh_file: Callable[[str, str], list[str]],
) -> list[str] | None:
"""Look for submodules that are now suppressed in target package.
If a submodule a.b gets added, we need to mark it as suppressed
in modules that contain "from a import b". Previously we assumed
that 'a.b' is not a module but a regular name.
This is only relevant when following imports normally.
Args:
module: target package in which to look for submodules
path: path of the module
refresh_file: function that reads the AST of a module (returns error messages)
Return a list of errors from refresh_file() if it was called. If the
return value is None, we didn't call refresh_file().
"""
messages = None
if path is None or not path.endswith(INIT_SUFFIXES):
# Only packages have submodules.
return None
# Find any submodules present in the directory.
pkgdir = os.path.dirname(path)
try:
entries = fscache.listdir(pkgdir)
except FileNotFoundError:
entries = []
for fnam in entries:
if (
not fnam.endswith((".py", ".pyi"))
or fnam.startswith("__init__.")
or fnam.count(".") != 1
):
continue
shortname = fnam.split(".")[0]
submodule = module + "." + shortname
trigger = make_trigger(submodule)
# We may be missing the required fine-grained deps.
ensure_deps_loaded(module, deps, graph)
if trigger in deps:
for dep in deps[trigger]:
# We can ignore <...> deps since a submodule can't trigger any.
state = graph.get(dep)
if not state:
# Maybe it's a non-top-level target. We only care about the module.
dep_module = module_prefix(graph, dep)
if dep_module is not None:
state = graph.get(dep_module)
if state:
# Is the file may missing an AST in case it's read from cache?
if state.tree is None:
# Create AST for the file. This may produce some new errors
# that we need to propagate.
assert state.path is not None
messages = refresh_file(state.id, state.path)
tree = state.tree
assert tree # Will be fine, due to refresh_file() above
for imp in tree.imports:
if isinstance(imp, ImportFrom):
if (
imp.id == module
and any(name == shortname for name, _ in imp.names)
and submodule not in state.suppressed_set
):
state.suppressed.append(submodule)
state.suppressed_set.add(submodule)
return messages
def extract_fnam_from_message(message: str) -> str | None:
m = re.match(r"([^:]+):[0-9]+: (error|note): ", message)
if m:
return m.group(1)
return None
def extract_possible_fnam_from_message(message: str) -> str:
# This may return non-path things if there is some random colon on the line
return message.split(":", 1)[0]
def sort_messages_preserving_file_order(
messages: list[str], prev_messages: list[str]
) -> list[str]:
"""Sort messages so that the order of files is preserved.
An update generates messages so that the files can be in a fairly
arbitrary order. Preserve the order of files to avoid messages
getting reshuffled continuously. If there are messages in
additional files, sort them towards the end.
"""
# Calculate file order from the previous messages
n = 0
order = {}
for msg in prev_messages:
fnam = extract_fnam_from_message(msg)
if fnam and fnam not in order:
order[fnam] = n
n += 1
# Related messages must be sorted as a group of successive lines
groups = []
i = 0
while i < len(messages):
msg = messages[i]
maybe_fnam = extract_possible_fnam_from_message(msg)
group = [msg]
if maybe_fnam in order:
# This looks like a file name. Collect all lines related to this message.
while (
i + 1 < len(messages)
and extract_possible_fnam_from_message(messages[i + 1]) not in order
and extract_fnam_from_message(messages[i + 1]) is None
and not messages[i + 1].startswith("mypy: ")
):
i += 1
group.append(messages[i])
groups.append((order.get(maybe_fnam, n), group))
i += 1
groups = sorted(groups, key=lambda g: g[0])
result = []
for key, group in groups:
result.extend(group)
return result
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/server/update.py
|
Python
|
NOASSERTION
| 53,000 |
from __future__ import annotations
from typing import Final
"""Shared logic between our three mypy parser files."""
_NON_BINARY_MAGIC_METHODS: Final = {
"__abs__",
"__call__",
"__complex__",
"__contains__",
"__del__",
"__delattr__",
"__delitem__",
"__enter__",
"__exit__",
"__float__",
"__getattr__",
"__getattribute__",
"__getitem__",
"__hex__",
"__init__",
"__init_subclass__",
"__int__",
"__invert__",
"__iter__",
"__len__",
"__long__",
"__neg__",
"__new__",
"__oct__",
"__pos__",
"__repr__",
"__reversed__",
"__setattr__",
"__setitem__",
"__str__",
}
MAGIC_METHODS_ALLOWING_KWARGS: Final = {
"__init__",
"__init_subclass__",
"__new__",
"__call__",
"__setattr__",
}
BINARY_MAGIC_METHODS: Final = {
"__add__",
"__and__",
"__divmod__",
"__eq__",
"__floordiv__",
"__ge__",
"__gt__",
"__iadd__",
"__iand__",
"__idiv__",
"__ifloordiv__",
"__ilshift__",
"__imatmul__",
"__imod__",
"__imul__",
"__ior__",
"__ipow__",
"__irshift__",
"__isub__",
"__itruediv__",
"__ixor__",
"__le__",
"__lshift__",
"__lt__",
"__matmul__",
"__mod__",
"__mul__",
"__ne__",
"__or__",
"__pow__",
"__radd__",
"__rand__",
"__rdiv__",
"__rfloordiv__",
"__rlshift__",
"__rmatmul__",
"__rmod__",
"__rmul__",
"__ror__",
"__rpow__",
"__rrshift__",
"__rshift__",
"__rsub__",
"__rtruediv__",
"__rxor__",
"__sub__",
"__truediv__",
"__xor__",
}
assert not (_NON_BINARY_MAGIC_METHODS & BINARY_MAGIC_METHODS)
MAGIC_METHODS: Final = _NON_BINARY_MAGIC_METHODS | BINARY_MAGIC_METHODS
MAGIC_METHODS_POS_ARGS_ONLY: Final = MAGIC_METHODS - MAGIC_METHODS_ALLOWING_KWARGS
def special_function_elide_names(name: str) -> bool:
return name in MAGIC_METHODS_POS_ARGS_ONLY
def argument_elide_name(name: str | None) -> bool:
return name is not None and name.startswith("__") and not name.endswith("__")
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/sharedparse.py
|
Python
|
NOASSERTION
| 2,102 |
"""Type inference constraint solving"""
from __future__ import annotations
from collections import defaultdict
from typing import Iterable, Sequence
from typing_extensions import TypeAlias as _TypeAlias
from mypy.constraints import SUBTYPE_OF, SUPERTYPE_OF, Constraint, infer_constraints, neg_op
from mypy.expandtype import expand_type
from mypy.graph_utils import prepare_sccs, strongly_connected_components, topsort
from mypy.join import join_types
from mypy.meet import meet_type_list, meet_types
from mypy.subtypes import is_subtype
from mypy.typeops import get_all_type_vars
from mypy.types import (
AnyType,
Instance,
NoneType,
ParamSpecType,
ProperType,
TupleType,
Type,
TypeOfAny,
TypeVarId,
TypeVarLikeType,
TypeVarTupleType,
TypeVarType,
UninhabitedType,
UnionType,
UnpackType,
get_proper_type,
)
from mypy.typestate import type_state
Bounds: _TypeAlias = "dict[TypeVarId, set[Type]]"
Graph: _TypeAlias = "set[tuple[TypeVarId, TypeVarId]]"
Solutions: _TypeAlias = "dict[TypeVarId, Type | None]"
def solve_constraints(
original_vars: Sequence[TypeVarLikeType],
constraints: list[Constraint],
strict: bool = True,
allow_polymorphic: bool = False,
skip_unsatisfied: bool = False,
) -> tuple[list[Type | None], list[TypeVarLikeType]]:
"""Solve type constraints.
Return the best type(s) for type variables; each type can be None if the value of
the variable could not be solved.
If a variable has no constraints, if strict=True then arbitrarily
pick UninhabitedType as the value of the type variable. If strict=False, pick AnyType.
If allow_polymorphic=True, then use the full algorithm that can potentially return
free type variables in solutions (these require special care when applying). Otherwise,
use a simplified algorithm that just solves each type variable individually if possible.
The skip_unsatisfied flag matches the same one in applytype.apply_generic_arguments().
"""
vars = [tv.id for tv in original_vars]
if not vars:
return [], []
originals = {tv.id: tv for tv in original_vars}
extra_vars: list[TypeVarId] = []
# Get additional type variables from generic actuals.
for c in constraints:
extra_vars.extend([v.id for v in c.extra_tvars if v.id not in vars + extra_vars])
originals.update({v.id: v for v in c.extra_tvars if v.id not in originals})
if allow_polymorphic:
# Constraints inferred from unions require special handling in polymorphic inference.
constraints = skip_reverse_union_constraints(constraints)
# Collect a list of constraints for each type variable.
cmap: dict[TypeVarId, list[Constraint]] = {tv: [] for tv in vars + extra_vars}
for con in constraints:
if con.type_var in vars + extra_vars:
cmap[con.type_var].append(con)
if allow_polymorphic:
if constraints:
solutions, free_vars = solve_with_dependent(
vars + extra_vars, constraints, vars, originals
)
else:
solutions = {}
free_vars = []
else:
solutions = {}
free_vars = []
for tv, cs in cmap.items():
if not cs:
continue
lowers = [c.target for c in cs if c.op == SUPERTYPE_OF]
uppers = [c.target for c in cs if c.op == SUBTYPE_OF]
solution = solve_one(lowers, uppers)
# Do not leak type variables in non-polymorphic solutions.
if solution is None or not get_vars(
solution, [tv for tv in extra_vars if tv not in vars]
):
solutions[tv] = solution
res: list[Type | None] = []
for v in vars:
if v in solutions:
res.append(solutions[v])
else:
# No constraints for type variable -- 'UninhabitedType' is the most specific type.
candidate: Type
if strict:
candidate = UninhabitedType()
candidate.ambiguous = True
else:
candidate = AnyType(TypeOfAny.special_form)
res.append(candidate)
if not free_vars and not skip_unsatisfied:
# Most of the validation for solutions is done in applytype.py, but here we can
# quickly test solutions w.r.t. to upper bounds, and use the latter (if possible),
# if solutions are actually not valid (due to poor inference context).
res = pre_validate_solutions(res, original_vars, constraints)
return res, free_vars
def solve_with_dependent(
vars: list[TypeVarId],
constraints: list[Constraint],
original_vars: list[TypeVarId],
originals: dict[TypeVarId, TypeVarLikeType],
) -> tuple[Solutions, list[TypeVarLikeType]]:
"""Solve set of constraints that may depend on each other, like T <: List[S].
The whole algorithm consists of five steps:
* Propagate via linear constraints and use secondary constraints to get transitive closure
* Find dependencies between type variables, group them in SCCs, and sort topologically
* Check that all SCC are intrinsically linear, we can't solve (express) T <: List[T]
* Variables in leaf SCCs that don't have constant bounds are free (choose one per SCC)
* Solve constraints iteratively starting from leafs, updating bounds after each step.
"""
graph, lowers, uppers = transitive_closure(vars, constraints)
dmap = compute_dependencies(vars, graph, lowers, uppers)
sccs = list(strongly_connected_components(set(vars), dmap))
if not all(check_linear(scc, lowers, uppers) for scc in sccs):
return {}, []
raw_batches = list(topsort(prepare_sccs(sccs, dmap)))
free_vars = []
free_solutions = {}
for scc in raw_batches[0]:
# If there are no bounds on this SCC, then the only meaningful solution we can
# express, is that each variable is equal to a new free variable. For example,
# if we have T <: S, S <: U, we deduce: T = S = U = <free>.
if all(not lowers[tv] and not uppers[tv] for tv in scc):
best_free = choose_free([originals[tv] for tv in scc], original_vars)
if best_free:
# TODO: failing to choose may cause leaking type variables,
# we need to fail gracefully instead.
free_vars.append(best_free.id)
free_solutions[best_free.id] = best_free
# Update lowers/uppers with free vars, so these can now be used
# as valid solutions.
for l, u in graph:
if l in free_vars:
lowers[u].add(free_solutions[l])
if u in free_vars:
uppers[l].add(free_solutions[u])
# Flatten the SCCs that are independent, we can solve them together,
# since we don't need to update any targets in between.
batches = []
for batch in raw_batches:
next_bc = []
for scc in batch:
next_bc.extend(list(scc))
batches.append(next_bc)
solutions: dict[TypeVarId, Type | None] = {}
for flat_batch in batches:
res = solve_iteratively(flat_batch, graph, lowers, uppers)
solutions.update(res)
return solutions, [free_solutions[tv] for tv in free_vars]
def solve_iteratively(
batch: list[TypeVarId], graph: Graph, lowers: Bounds, uppers: Bounds
) -> Solutions:
"""Solve transitive closure sequentially, updating upper/lower bounds after each step.
Transitive closure is represented as a linear graph plus lower/upper bounds for each
type variable, see transitive_closure() docstring for details.
We solve for type variables that appear in `batch`. If a bound is not constant (i.e. it
looks like T :> F[S, ...]), we substitute solutions found so far in the target F[S, ...]
after solving the batch.
Importantly, after solving each variable in a batch, we move it from linear graph to
upper/lower bounds, this way we can guarantee consistency of solutions (see comment below
for an example when this is important).
"""
solutions = {}
s_batch = set(batch)
while s_batch:
for tv in sorted(s_batch, key=lambda x: x.raw_id):
if lowers[tv] or uppers[tv]:
solvable_tv = tv
break
else:
break
# Solve each solvable type variable separately.
s_batch.remove(solvable_tv)
result = solve_one(lowers[solvable_tv], uppers[solvable_tv])
solutions[solvable_tv] = result
if result is None:
# TODO: support backtracking lower/upper bound choices and order within SCCs.
# (will require switching this function from iterative to recursive).
continue
# Update the (transitive) bounds from graph if there is a solution.
# This is needed to guarantee solutions will never contradict the initial
# constraints. For example, consider {T <: S, T <: A, S :> B} with A :> B.
# If we would not update the uppers/lowers from graph, we would infer T = A, S = B
# which is not correct.
for l, u in graph.copy():
if l == u:
continue
if l == solvable_tv:
lowers[u].add(result)
graph.remove((l, u))
if u == solvable_tv:
uppers[l].add(result)
graph.remove((l, u))
# We can update uppers/lowers only once after solving the whole SCC,
# since uppers/lowers can't depend on type variables in the SCC
# (and we would reject such SCC as non-linear and therefore not solvable).
subs = {tv: s for (tv, s) in solutions.items() if s is not None}
for tv in lowers:
lowers[tv] = {expand_type(lt, subs) for lt in lowers[tv]}
for tv in uppers:
uppers[tv] = {expand_type(ut, subs) for ut in uppers[tv]}
return solutions
def solve_one(lowers: Iterable[Type], uppers: Iterable[Type]) -> Type | None:
"""Solve constraints by finding by using meets of upper bounds, and joins of lower bounds."""
bottom: Type | None = None
top: Type | None = None
candidate: Type | None = None
# Filter out previous results of failed inference, they will only spoil the current pass...
new_uppers = []
for u in uppers:
pu = get_proper_type(u)
if not isinstance(pu, UninhabitedType) or not pu.ambiguous:
new_uppers.append(u)
uppers = new_uppers
# ...unless this is the only information we have, then we just pass it on.
if not uppers and not lowers:
candidate = UninhabitedType()
candidate.ambiguous = True
return candidate
# Process each bound separately, and calculate the lower and upper
# bounds based on constraints. Note that we assume that the constraint
# targets do not have constraint references.
for target in lowers:
if bottom is None:
bottom = target
else:
if type_state.infer_unions:
# This deviates from the general mypy semantics because
# recursive types are union-heavy in 95% of cases.
bottom = UnionType.make_union([bottom, target])
else:
bottom = join_types(bottom, target)
for target in uppers:
if top is None:
top = target
else:
top = meet_types(top, target)
p_top = get_proper_type(top)
p_bottom = get_proper_type(bottom)
if isinstance(p_top, AnyType) or isinstance(p_bottom, AnyType):
source_any = top if isinstance(p_top, AnyType) else bottom
assert isinstance(source_any, ProperType) and isinstance(source_any, AnyType)
return AnyType(TypeOfAny.from_another_any, source_any=source_any)
elif bottom is None:
if top:
candidate = top
else:
# No constraints for type variable
return None
elif top is None:
candidate = bottom
elif is_subtype(bottom, top):
candidate = bottom
else:
candidate = None
return candidate
def choose_free(
scc: list[TypeVarLikeType], original_vars: list[TypeVarId]
) -> TypeVarLikeType | None:
"""Choose the best solution for an SCC containing only type variables.
This is needed to preserve e.g. the upper bound in a situation like this:
def dec(f: Callable[[T], S]) -> Callable[[T], S]: ...
@dec
def test(x: U) -> U: ...
where U <: A.
"""
if len(scc) == 1:
# Fast path, choice is trivial.
return scc[0]
common_upper_bound = meet_type_list([t.upper_bound for t in scc])
common_upper_bound_p = get_proper_type(common_upper_bound)
# We include None for when strict-optional is disabled.
if isinstance(common_upper_bound_p, (UninhabitedType, NoneType)):
# This will cause to infer Never, which is better than a free TypeVar
# that has an upper bound Never.
return None
values: list[Type] = []
for tv in scc:
if isinstance(tv, TypeVarType) and tv.values:
if values:
# It is too tricky to support multiple TypeVars with values
# within the same SCC.
return None
values = tv.values.copy()
if values and not is_trivial_bound(common_upper_bound_p):
# If there are both values and upper bound present, we give up,
# since type variables having both are not supported.
return None
# For convenience with current type application machinery, we use a stable
# choice that prefers the original type variables (not polymorphic ones) in SCC.
best = sorted(scc, key=lambda x: (x.id not in original_vars, x.id.raw_id))[0]
if isinstance(best, TypeVarType):
return best.copy_modified(values=values, upper_bound=common_upper_bound)
if is_trivial_bound(common_upper_bound_p, allow_tuple=True):
# TODO: support more cases for ParamSpecs/TypeVarTuples
return best
return None
def is_trivial_bound(tp: ProperType, allow_tuple: bool = False) -> bool:
if isinstance(tp, Instance) and tp.type.fullname == "builtins.tuple":
return allow_tuple and is_trivial_bound(get_proper_type(tp.args[0]))
return isinstance(tp, Instance) and tp.type.fullname == "builtins.object"
def find_linear(c: Constraint) -> tuple[bool, TypeVarId | None]:
"""Find out if this constraint represent a linear relationship, return target id if yes."""
if isinstance(c.origin_type_var, TypeVarType):
if isinstance(c.target, TypeVarType):
return True, c.target.id
if isinstance(c.origin_type_var, ParamSpecType):
if isinstance(c.target, ParamSpecType) and not c.target.prefix.arg_types:
return True, c.target.id
if isinstance(c.origin_type_var, TypeVarTupleType):
target = get_proper_type(c.target)
if isinstance(target, TupleType) and len(target.items) == 1:
item = target.items[0]
if isinstance(item, UnpackType) and isinstance(item.type, TypeVarTupleType):
return True, item.type.id
return False, None
def transitive_closure(
tvars: list[TypeVarId], constraints: list[Constraint]
) -> tuple[Graph, Bounds, Bounds]:
"""Find transitive closure for given constraints on type variables.
Transitive closure gives maximal set of lower/upper bounds for each type variable,
such that we cannot deduce any further bounds by chaining other existing bounds.
The transitive closure is represented by:
* A set of lower and upper bounds for each type variable, where only constant and
non-linear terms are included in the bounds.
* A graph of linear constraints between type variables (represented as a set of pairs)
Such separation simplifies reasoning, and allows an efficient and simple incremental
transitive closure algorithm that we use here.
For example if we have initial constraints [T <: S, S <: U, U <: int], the transitive
closure is given by:
* {} <: T <: {int}
* {} <: S <: {int}
* {} <: U <: {int}
* {T <: S, S <: U, T <: U}
"""
uppers: Bounds = defaultdict(set)
lowers: Bounds = defaultdict(set)
graph: Graph = {(tv, tv) for tv in tvars}
remaining = set(constraints)
while remaining:
c = remaining.pop()
# Note that ParamSpec constraint P <: Q may be considered linear only if Q has no prefix,
# for cases like P <: Concatenate[T, Q] we should consider this non-linear and put {P} and
# {T, Q} into separate SCCs. Similarly, Ts <: Tuple[*Us] considered linear, while
# Ts <: Tuple[*Us, U] is non-linear.
is_linear, target_id = find_linear(c)
if is_linear and target_id in tvars:
assert target_id is not None
if c.op == SUBTYPE_OF:
lower, upper = c.type_var, target_id
else:
lower, upper = target_id, c.type_var
if (lower, upper) in graph:
continue
graph |= {
(l, u) for l in tvars for u in tvars if (l, lower) in graph and (upper, u) in graph
}
for u in tvars:
if (upper, u) in graph:
lowers[u] |= lowers[lower]
for l in tvars:
if (l, lower) in graph:
uppers[l] |= uppers[upper]
for lt in lowers[lower]:
for ut in uppers[upper]:
add_secondary_constraints(remaining, lt, ut)
elif c.op == SUBTYPE_OF:
if c.target in uppers[c.type_var]:
continue
for l in tvars:
if (l, c.type_var) in graph:
uppers[l].add(c.target)
for lt in lowers[c.type_var]:
add_secondary_constraints(remaining, lt, c.target)
else:
assert c.op == SUPERTYPE_OF
if c.target in lowers[c.type_var]:
continue
for u in tvars:
if (c.type_var, u) in graph:
lowers[u].add(c.target)
for ut in uppers[c.type_var]:
add_secondary_constraints(remaining, c.target, ut)
return graph, lowers, uppers
def add_secondary_constraints(cs: set[Constraint], lower: Type, upper: Type) -> None:
"""Add secondary constraints inferred between lower and upper (in place)."""
if isinstance(get_proper_type(upper), UnionType) and isinstance(
get_proper_type(lower), UnionType
):
# When both types are unions, this can lead to inferring spurious constraints,
# for example Union[T, int] <: S <: Union[T, int] may infer T <: int.
# To avoid this, just skip them for now.
return
# TODO: what if secondary constraints result in inference against polymorphic actual?
cs.update(set(infer_constraints(lower, upper, SUBTYPE_OF)))
cs.update(set(infer_constraints(upper, lower, SUPERTYPE_OF)))
def compute_dependencies(
tvars: list[TypeVarId], graph: Graph, lowers: Bounds, uppers: Bounds
) -> dict[TypeVarId, list[TypeVarId]]:
"""Compute dependencies between type variables induced by constraints.
If we have a constraint like T <: List[S], we say that T depends on S, since
we will need to solve for S first before we can solve for T.
"""
res = {}
for tv in tvars:
deps = set()
for lt in lowers[tv]:
deps |= get_vars(lt, tvars)
for ut in uppers[tv]:
deps |= get_vars(ut, tvars)
for other in tvars:
if other == tv:
continue
if (tv, other) in graph or (other, tv) in graph:
deps.add(other)
res[tv] = list(deps)
return res
def check_linear(scc: set[TypeVarId], lowers: Bounds, uppers: Bounds) -> bool:
"""Check there are only linear constraints between type variables in SCC.
Linear are constraints like T <: S (while T <: F[S] are non-linear).
"""
for tv in scc:
if any(get_vars(lt, list(scc)) for lt in lowers[tv]):
return False
if any(get_vars(ut, list(scc)) for ut in uppers[tv]):
return False
return True
def skip_reverse_union_constraints(cs: list[Constraint]) -> list[Constraint]:
"""Avoid ambiguities for constraints inferred from unions during polymorphic inference.
Polymorphic inference implicitly relies on assumption that a reverse of a linear constraint
is a linear constraint. This is however not true in presence of union types, for example
T :> Union[S, int] vs S <: T. Trying to solve such constraints would be detected ambiguous
as (T, S) form a non-linear SCC. However, simply removing the linear part results in a valid
solution T = Union[S, int], S = <free>. A similar scenario is when we get T <: Union[T, int],
such constraints carry no information, and will equally confuse linearity check.
TODO: a cleaner solution may be to avoid inferring such constraints in first place, but
this would require passing around a flag through all infer_constraints() calls.
"""
reverse_union_cs = set()
for c in cs:
p_target = get_proper_type(c.target)
if isinstance(p_target, UnionType):
for item in p_target.items:
if isinstance(item, TypeVarType):
if item == c.origin_type_var and c.op == SUBTYPE_OF:
reverse_union_cs.add(c)
continue
# These two forms are semantically identical, but are different from
# the point of view of Constraint.__eq__().
reverse_union_cs.add(Constraint(item, neg_op(c.op), c.origin_type_var))
reverse_union_cs.add(Constraint(c.origin_type_var, c.op, item))
return [c for c in cs if c not in reverse_union_cs]
def get_vars(target: Type, vars: list[TypeVarId]) -> set[TypeVarId]:
"""Find type variables for which we are solving in a target type."""
return {tv.id for tv in get_all_type_vars(target)} & set(vars)
def pre_validate_solutions(
solutions: list[Type | None],
original_vars: Sequence[TypeVarLikeType],
constraints: list[Constraint],
) -> list[Type | None]:
"""Check is each solution satisfies the upper bound of the corresponding type variable.
If it doesn't satisfy the bound, check if bound itself satisfies all constraints, and
if yes, use it instead as a fallback solution.
"""
new_solutions: list[Type | None] = []
for t, s in zip(original_vars, solutions):
if is_callable_protocol(t.upper_bound):
# This is really ad-hoc, but a proper fix would be much more complex,
# and otherwise this may cause crash in a relatively common scenario.
new_solutions.append(s)
continue
if s is not None and not is_subtype(s, t.upper_bound):
bound_satisfies_all = True
for c in constraints:
if c.op == SUBTYPE_OF and not is_subtype(t.upper_bound, c.target):
bound_satisfies_all = False
break
if c.op == SUPERTYPE_OF and not is_subtype(c.target, t.upper_bound):
bound_satisfies_all = False
break
if bound_satisfies_all:
new_solutions.append(t.upper_bound)
continue
new_solutions.append(s)
return new_solutions
def is_callable_protocol(t: Type) -> bool:
proper_t = get_proper_type(t)
if isinstance(proper_t, Instance) and proper_t.type.is_protocol:
return "__call__" in proper_t.type.protocol_members
return False
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/solve.py
|
Python
|
NOASSERTION
| 23,820 |
"""Split namespace for argparse to allow separating options by prefix.
We use this to direct some options to an Options object and some to a
regular namespace.
"""
# In its own file largely because mypyc doesn't support its use of
# __getattr__/__setattr__ and has some issues with __dict__
from __future__ import annotations
import argparse
from typing import Any
class SplitNamespace(argparse.Namespace):
def __init__(self, standard_namespace: object, alt_namespace: object, alt_prefix: str) -> None:
self.__dict__["_standard_namespace"] = standard_namespace
self.__dict__["_alt_namespace"] = alt_namespace
self.__dict__["_alt_prefix"] = alt_prefix
def _get(self) -> tuple[Any, Any]:
return (self._standard_namespace, self._alt_namespace)
def __setattr__(self, name: str, value: Any) -> None:
if name.startswith(self._alt_prefix):
setattr(self._alt_namespace, name[len(self._alt_prefix) :], value)
else:
setattr(self._standard_namespace, name, value)
def __getattr__(self, name: str) -> Any:
if name.startswith(self._alt_prefix):
return getattr(self._alt_namespace, name[len(self._alt_prefix) :])
else:
return getattr(self._standard_namespace, name)
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/split_namespace.py
|
Python
|
NOASSERTION
| 1,289 |
from __future__ import annotations
from contextlib import contextmanager
from typing import Final, Iterator
# These are global mutable state. Don't add anything here unless there's a very
# good reason.
class StrictOptionalState:
# Wrap this in a class since it's faster that using a module-level attribute.
def __init__(self, strict_optional: bool) -> None:
# Value varies by file being processed
self.strict_optional = strict_optional
@contextmanager
def strict_optional_set(self, value: bool) -> Iterator[None]:
saved = self.strict_optional
self.strict_optional = value
try:
yield
finally:
self.strict_optional = saved
state: Final = StrictOptionalState(strict_optional=False)
find_occurrences: tuple[str, str] | None = None
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/state.py
|
Python
|
NOASSERTION
| 824 |
"""Utilities for calculating and reporting statistics about types."""
from __future__ import annotations
import os
from collections import Counter
from contextlib import contextmanager
from typing import Final, Iterator
from mypy import nodes
from mypy.argmap import map_formals_to_actuals
from mypy.nodes import (
AssignmentExpr,
AssignmentStmt,
BreakStmt,
BytesExpr,
CallExpr,
ClassDef,
ComparisonExpr,
ComplexExpr,
ContinueStmt,
EllipsisExpr,
Expression,
ExpressionStmt,
FloatExpr,
FuncDef,
Import,
ImportAll,
ImportFrom,
IndexExpr,
IntExpr,
MemberExpr,
MypyFile,
NameExpr,
Node,
OpExpr,
PassStmt,
RefExpr,
StrExpr,
TypeApplication,
UnaryExpr,
YieldFromExpr,
)
from mypy.traverser import TraverserVisitor
from mypy.typeanal import collect_all_inner_types
from mypy.types import (
AnyType,
CallableType,
FunctionLike,
Instance,
TupleType,
Type,
TypeOfAny,
TypeQuery,
TypeVarType,
get_proper_type,
get_proper_types,
)
from mypy.util import correct_relative_import
TYPE_EMPTY: Final = 0
TYPE_UNANALYZED: Final = 1 # type of non-typechecked code
TYPE_PRECISE: Final = 2
TYPE_IMPRECISE: Final = 3
TYPE_ANY: Final = 4
precision_names: Final = ["empty", "unanalyzed", "precise", "imprecise", "any"]
class StatisticsVisitor(TraverserVisitor):
def __init__(
self,
inferred: bool,
filename: str,
modules: dict[str, MypyFile],
typemap: dict[Expression, Type] | None = None,
all_nodes: bool = False,
visit_untyped_defs: bool = True,
) -> None:
self.inferred = inferred
self.filename = filename
self.modules = modules
self.typemap = typemap
self.all_nodes = all_nodes
self.visit_untyped_defs = visit_untyped_defs
self.num_precise_exprs = 0
self.num_imprecise_exprs = 0
self.num_any_exprs = 0
self.num_simple_types = 0
self.num_generic_types = 0
self.num_tuple_types = 0
self.num_function_types = 0
self.num_typevar_types = 0
self.num_complex_types = 0
self.num_any_types = 0
self.line = -1
self.line_map: dict[int, int] = {}
self.type_of_any_counter: Counter[int] = Counter()
self.any_line_map: dict[int, list[AnyType]] = {}
# For each scope (top level/function), whether the scope was type checked
# (annotated function).
#
# TODO: Handle --check-untyped-defs
self.checked_scopes = [True]
self.output: list[str] = []
TraverserVisitor.__init__(self)
def visit_mypy_file(self, o: MypyFile) -> None:
self.cur_mod_node = o
self.cur_mod_id = o.fullname
super().visit_mypy_file(o)
def visit_import_from(self, imp: ImportFrom) -> None:
self.process_import(imp)
def visit_import_all(self, imp: ImportAll) -> None:
self.process_import(imp)
def process_import(self, imp: ImportFrom | ImportAll) -> None:
import_id, ok = correct_relative_import(
self.cur_mod_id, imp.relative, imp.id, self.cur_mod_node.is_package_init_file()
)
if ok and import_id in self.modules:
kind = TYPE_PRECISE
else:
kind = TYPE_ANY
self.record_line(imp.line, kind)
def visit_import(self, imp: Import) -> None:
if all(id in self.modules for id, _ in imp.ids):
kind = TYPE_PRECISE
else:
kind = TYPE_ANY
self.record_line(imp.line, kind)
def visit_func_def(self, o: FuncDef) -> None:
with self.enter_scope(o):
self.line = o.line
if len(o.expanded) > 1 and o.expanded != [o] * len(o.expanded):
if o in o.expanded:
print(
"{}:{}: ERROR: cycle in function expansion; skipping".format(
self.filename, o.line
)
)
return
for defn in o.expanded:
assert isinstance(defn, FuncDef)
self.visit_func_def(defn)
else:
if o.type:
assert isinstance(o.type, CallableType)
sig = o.type
arg_types = sig.arg_types
if sig.arg_names and sig.arg_names[0] == "self" and not self.inferred:
arg_types = arg_types[1:]
for arg in arg_types:
self.type(arg)
self.type(sig.ret_type)
elif self.all_nodes:
self.record_line(self.line, TYPE_ANY)
if not o.is_dynamic() or self.visit_untyped_defs:
super().visit_func_def(o)
@contextmanager
def enter_scope(self, o: FuncDef) -> Iterator[None]:
self.checked_scopes.append(o.type is not None and self.checked_scopes[-1])
yield None
self.checked_scopes.pop()
def is_checked_scope(self) -> bool:
return self.checked_scopes[-1]
def visit_class_def(self, o: ClassDef) -> None:
self.record_line(o.line, TYPE_PRECISE) # TODO: Look at base classes
# Override this method because we don't want to analyze base_type_exprs (base_type_exprs
# are base classes in a class declaration).
# While base_type_exprs are technically expressions, type analyzer does not visit them and
# they are not in the typemap.
for d in o.decorators:
d.accept(self)
o.defs.accept(self)
def visit_type_application(self, o: TypeApplication) -> None:
self.line = o.line
for t in o.types:
self.type(t)
super().visit_type_application(o)
def visit_assignment_stmt(self, o: AssignmentStmt) -> None:
self.line = o.line
if isinstance(o.rvalue, nodes.CallExpr) and isinstance(
o.rvalue.analyzed, nodes.TypeVarExpr
):
# Type variable definition -- not a real assignment.
return
if o.type:
# If there is an explicit type, don't visit the l.h.s. as an expression
# to avoid double-counting and mishandling special forms.
self.type(o.type)
o.rvalue.accept(self)
return
elif self.inferred and not self.all_nodes:
# if self.all_nodes is set, lvalues will be visited later
for lvalue in o.lvalues:
if isinstance(lvalue, nodes.TupleExpr):
items = lvalue.items
else:
items = [lvalue]
for item in items:
if isinstance(item, RefExpr) and item.is_inferred_def:
if self.typemap is not None:
self.type(self.typemap.get(item))
super().visit_assignment_stmt(o)
def visit_expression_stmt(self, o: ExpressionStmt) -> None:
if isinstance(o.expr, (StrExpr, BytesExpr)):
# Docstring
self.record_line(o.line, TYPE_EMPTY)
else:
super().visit_expression_stmt(o)
def visit_pass_stmt(self, o: PassStmt) -> None:
self.record_precise_if_checked_scope(o)
def visit_break_stmt(self, o: BreakStmt) -> None:
self.record_precise_if_checked_scope(o)
def visit_continue_stmt(self, o: ContinueStmt) -> None:
self.record_precise_if_checked_scope(o)
def visit_name_expr(self, o: NameExpr) -> None:
if o.fullname in ("builtins.None", "builtins.True", "builtins.False", "builtins.Ellipsis"):
self.record_precise_if_checked_scope(o)
else:
self.process_node(o)
super().visit_name_expr(o)
def visit_yield_from_expr(self, o: YieldFromExpr) -> None:
if o.expr:
o.expr.accept(self)
def visit_call_expr(self, o: CallExpr) -> None:
self.process_node(o)
if o.analyzed:
o.analyzed.accept(self)
else:
o.callee.accept(self)
for a in o.args:
a.accept(self)
self.record_call_target_precision(o)
def record_call_target_precision(self, o: CallExpr) -> None:
"""Record precision of formal argument types used in a call."""
if not self.typemap or o.callee not in self.typemap:
# Type not available.
return
callee_type = get_proper_type(self.typemap[o.callee])
if isinstance(callee_type, CallableType):
self.record_callable_target_precision(o, callee_type)
else:
pass # TODO: Handle overloaded functions, etc.
def record_callable_target_precision(self, o: CallExpr, callee: CallableType) -> None:
"""Record imprecision caused by callee argument types.
This only considers arguments passed in a call expression. Arguments
with default values that aren't provided in a call arguably don't
contribute to typing imprecision at the *call site* (but they
contribute at the function definition).
"""
assert self.typemap
typemap = self.typemap
actual_to_formal = map_formals_to_actuals(
o.arg_kinds,
o.arg_names,
callee.arg_kinds,
callee.arg_names,
lambda n: typemap[o.args[n]],
)
for formals in actual_to_formal:
for n in formals:
formal = get_proper_type(callee.arg_types[n])
if isinstance(formal, AnyType):
self.record_line(o.line, TYPE_ANY)
elif is_imprecise(formal):
self.record_line(o.line, TYPE_IMPRECISE)
def visit_member_expr(self, o: MemberExpr) -> None:
self.process_node(o)
super().visit_member_expr(o)
def visit_op_expr(self, o: OpExpr) -> None:
self.process_node(o)
super().visit_op_expr(o)
def visit_comparison_expr(self, o: ComparisonExpr) -> None:
self.process_node(o)
super().visit_comparison_expr(o)
def visit_index_expr(self, o: IndexExpr) -> None:
self.process_node(o)
super().visit_index_expr(o)
def visit_assignment_expr(self, o: AssignmentExpr) -> None:
self.process_node(o)
super().visit_assignment_expr(o)
def visit_unary_expr(self, o: UnaryExpr) -> None:
self.process_node(o)
super().visit_unary_expr(o)
def visit_str_expr(self, o: StrExpr) -> None:
self.record_precise_if_checked_scope(o)
def visit_bytes_expr(self, o: BytesExpr) -> None:
self.record_precise_if_checked_scope(o)
def visit_int_expr(self, o: IntExpr) -> None:
self.record_precise_if_checked_scope(o)
def visit_float_expr(self, o: FloatExpr) -> None:
self.record_precise_if_checked_scope(o)
def visit_complex_expr(self, o: ComplexExpr) -> None:
self.record_precise_if_checked_scope(o)
def visit_ellipsis(self, o: EllipsisExpr) -> None:
self.record_precise_if_checked_scope(o)
# Helpers
def process_node(self, node: Expression) -> None:
if self.all_nodes:
if self.typemap is not None:
self.line = node.line
self.type(self.typemap.get(node))
def record_precise_if_checked_scope(self, node: Node) -> None:
if isinstance(node, Expression) and self.typemap and node not in self.typemap:
kind = TYPE_UNANALYZED
elif self.is_checked_scope():
kind = TYPE_PRECISE
else:
kind = TYPE_ANY
self.record_line(node.line, kind)
def type(self, t: Type | None) -> None:
t = get_proper_type(t)
if not t:
# If an expression does not have a type, it is often due to dead code.
# Don't count these because there can be an unanalyzed value on a line with other
# analyzed expressions, which overwrite the TYPE_UNANALYZED.
self.record_line(self.line, TYPE_UNANALYZED)
return
if isinstance(t, AnyType) and is_special_form_any(t):
# TODO: What if there is an error in special form definition?
self.record_line(self.line, TYPE_PRECISE)
return
if isinstance(t, AnyType):
self.log(" !! Any type around line %d" % self.line)
self.num_any_exprs += 1
self.record_line(self.line, TYPE_ANY)
elif (not self.all_nodes and is_imprecise(t)) or (self.all_nodes and is_imprecise2(t)):
self.log(" !! Imprecise type around line %d" % self.line)
self.num_imprecise_exprs += 1
self.record_line(self.line, TYPE_IMPRECISE)
else:
self.num_precise_exprs += 1
self.record_line(self.line, TYPE_PRECISE)
for typ in get_proper_types(collect_all_inner_types(t)) + [t]:
if isinstance(typ, AnyType):
typ = get_original_any(typ)
if is_special_form_any(typ):
continue
self.type_of_any_counter[typ.type_of_any] += 1
self.num_any_types += 1
if self.line in self.any_line_map:
self.any_line_map[self.line].append(typ)
else:
self.any_line_map[self.line] = [typ]
elif isinstance(typ, Instance):
if typ.args:
if any(is_complex(arg) for arg in typ.args):
self.num_complex_types += 1
else:
self.num_generic_types += 1
else:
self.num_simple_types += 1
elif isinstance(typ, FunctionLike):
self.num_function_types += 1
elif isinstance(typ, TupleType):
if any(is_complex(item) for item in typ.items):
self.num_complex_types += 1
else:
self.num_tuple_types += 1
elif isinstance(typ, TypeVarType):
self.num_typevar_types += 1
def log(self, string: str) -> None:
self.output.append(string)
def record_line(self, line: int, precision: int) -> None:
self.line_map[line] = max(precision, self.line_map.get(line, TYPE_EMPTY))
def dump_type_stats(
tree: MypyFile,
path: str,
modules: dict[str, MypyFile],
inferred: bool = False,
typemap: dict[Expression, Type] | None = None,
) -> None:
if is_special_module(path):
return
print(path)
visitor = StatisticsVisitor(inferred, filename=tree.fullname, modules=modules, typemap=typemap)
tree.accept(visitor)
for line in visitor.output:
print(line)
print(" ** precision **")
print(" precise ", visitor.num_precise_exprs)
print(" imprecise", visitor.num_imprecise_exprs)
print(" any ", visitor.num_any_exprs)
print(" ** kinds **")
print(" simple ", visitor.num_simple_types)
print(" generic ", visitor.num_generic_types)
print(" function ", visitor.num_function_types)
print(" tuple ", visitor.num_tuple_types)
print(" TypeVar ", visitor.num_typevar_types)
print(" complex ", visitor.num_complex_types)
print(" any ", visitor.num_any_types)
def is_special_module(path: str) -> bool:
return os.path.basename(path) in ("abc.pyi", "typing.pyi", "builtins.pyi")
def is_imprecise(t: Type) -> bool:
return t.accept(HasAnyQuery())
class HasAnyQuery(TypeQuery[bool]):
def __init__(self) -> None:
super().__init__(any)
def visit_any(self, t: AnyType) -> bool:
return not is_special_form_any(t)
def is_imprecise2(t: Type) -> bool:
return t.accept(HasAnyQuery2())
class HasAnyQuery2(HasAnyQuery):
def visit_callable_type(self, t: CallableType) -> bool:
# We don't want to flag references to functions with some Any
# argument types (etc.) since they generally don't mean trouble.
return False
def is_generic(t: Type) -> bool:
t = get_proper_type(t)
return isinstance(t, Instance) and bool(t.args)
def is_complex(t: Type) -> bool:
t = get_proper_type(t)
return is_generic(t) or isinstance(t, (FunctionLike, TupleType, TypeVarType))
def is_special_form_any(t: AnyType) -> bool:
return get_original_any(t).type_of_any == TypeOfAny.special_form
def get_original_any(t: AnyType) -> AnyType:
if t.type_of_any == TypeOfAny.from_another_any:
assert t.source_any
assert t.source_any.type_of_any != TypeOfAny.from_another_any
t = t.source_any
return t
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/stats.py
|
Python
|
NOASSERTION
| 16,769 |
"""Conversion of parse tree nodes to strings."""
from __future__ import annotations
import os
import re
from typing import TYPE_CHECKING, Any, Sequence
import mypy.nodes
from mypy.options import Options
from mypy.util import IdMapper, short_type
from mypy.visitor import NodeVisitor
if TYPE_CHECKING:
import mypy.patterns
import mypy.types
class StrConv(NodeVisitor[str]):
"""Visitor for converting a node to a human-readable string.
For example, an MypyFile node from program '1' is converted into
something like this:
MypyFile:1(
fnam
ExpressionStmt:1(
IntExpr(1)))
"""
__slots__ = ["options", "show_ids", "id_mapper"]
def __init__(self, *, show_ids: bool = False, options: Options) -> None:
self.options = options
self.show_ids = show_ids
self.id_mapper: IdMapper | None = None
if show_ids:
self.id_mapper = IdMapper()
def stringify_type(self, t: mypy.types.Type) -> str:
import mypy.types
return t.accept(mypy.types.TypeStrVisitor(id_mapper=self.id_mapper, options=self.options))
def get_id(self, o: object) -> int | None:
if self.id_mapper:
return self.id_mapper.id(o)
return None
def format_id(self, o: object) -> str:
if self.id_mapper:
return f"<{self.get_id(o)}>"
else:
return ""
def dump(self, nodes: Sequence[object], obj: mypy.nodes.Context) -> str:
"""Convert a list of items to a multiline pretty-printed string.
The tag is produced from the type name of obj and its line
number. See mypy.util.dump_tagged for a description of the nodes
argument.
"""
tag = short_type(obj) + ":" + str(obj.line)
if self.show_ids:
assert self.id_mapper is not None
tag += f"<{self.get_id(obj)}>"
return dump_tagged(nodes, tag, self)
def func_helper(self, o: mypy.nodes.FuncItem) -> list[object]:
"""Return a list in a format suitable for dump() that represents the
arguments and the body of a function. The caller can then decorate the
array with information specific to methods, global functions or
anonymous functions.
"""
args: list[mypy.nodes.Var | tuple[str, list[mypy.nodes.Node]]] = []
extra: list[tuple[str, list[mypy.nodes.Var]]] = []
for arg in o.arguments:
kind: mypy.nodes.ArgKind = arg.kind
if kind.is_required():
args.append(arg.variable)
elif kind.is_optional():
assert arg.initializer is not None
args.append(("default", [arg.variable, arg.initializer]))
elif kind == mypy.nodes.ARG_STAR:
extra.append(("VarArg", [arg.variable]))
elif kind == mypy.nodes.ARG_STAR2:
extra.append(("DictVarArg", [arg.variable]))
a: list[Any] = []
if o.type_args:
for p in o.type_args:
a.append(self.type_param(p))
if args:
a.append(("Args", args))
if o.type:
a.append(o.type)
if o.is_generator:
a.append("Generator")
a.extend(extra)
a.append(o.body)
return a
# Top-level structures
def visit_mypy_file(self, o: mypy.nodes.MypyFile) -> str:
# Skip implicit definitions.
a: list[Any] = [o.defs]
if o.is_bom:
a.insert(0, "BOM")
# Omit path to special file with name "main". This is used to simplify
# test case descriptions; the file "main" is used by default in many
# test cases.
if o.path != "main":
# Insert path. Normalize directory separators to / to unify test
# case# output in all platforms.
a.insert(0, o.path.replace(os.sep, "/"))
if o.ignored_lines:
a.append("IgnoredLines(%s)" % ", ".join(str(line) for line in sorted(o.ignored_lines)))
return self.dump(a, o)
def visit_import(self, o: mypy.nodes.Import) -> str:
a = []
for id, as_id in o.ids:
if as_id is not None:
a.append(f"{id} : {as_id}")
else:
a.append(id)
return f"Import:{o.line}({', '.join(a)})"
def visit_import_from(self, o: mypy.nodes.ImportFrom) -> str:
a = []
for name, as_name in o.names:
if as_name is not None:
a.append(f"{name} : {as_name}")
else:
a.append(name)
return f"ImportFrom:{o.line}({'.' * o.relative + o.id}, [{', '.join(a)}])"
def visit_import_all(self, o: mypy.nodes.ImportAll) -> str:
return f"ImportAll:{o.line}({'.' * o.relative + o.id})"
# Definitions
def visit_func_def(self, o: mypy.nodes.FuncDef) -> str:
a = self.func_helper(o)
a.insert(0, o.name)
arg_kinds = {arg.kind for arg in o.arguments}
if len(arg_kinds & {mypy.nodes.ARG_NAMED, mypy.nodes.ARG_NAMED_OPT}) > 0:
a.insert(1, f"MaxPos({o.max_pos})")
if o.abstract_status in (mypy.nodes.IS_ABSTRACT, mypy.nodes.IMPLICITLY_ABSTRACT):
a.insert(-1, "Abstract")
if o.is_static:
a.insert(-1, "Static")
if o.is_class:
a.insert(-1, "Class")
if o.is_property:
a.insert(-1, "Property")
return self.dump(a, o)
def visit_overloaded_func_def(self, o: mypy.nodes.OverloadedFuncDef) -> str:
a: Any = o.items.copy()
if o.type:
a.insert(0, o.type)
if o.impl:
a.insert(0, o.impl)
if o.is_static:
a.insert(-1, "Static")
if o.is_class:
a.insert(-1, "Class")
return self.dump(a, o)
def visit_class_def(self, o: mypy.nodes.ClassDef) -> str:
a = [o.name, o.defs.body]
# Display base types unless they are implicitly just builtins.object
# (in this case base_type_exprs is empty).
if o.base_type_exprs:
if o.info and o.info.bases:
if len(o.info.bases) != 1 or o.info.bases[0].type.fullname != "builtins.object":
a.insert(1, ("BaseType", o.info.bases))
else:
a.insert(1, ("BaseTypeExpr", o.base_type_exprs))
if o.type_vars:
a.insert(1, ("TypeVars", o.type_vars))
if o.metaclass:
a.insert(1, f"Metaclass({o.metaclass.accept(self)})")
if o.decorators:
a.insert(1, ("Decorators", o.decorators))
if o.info and o.info._promote:
a.insert(1, f"Promote([{','.join(self.stringify_type(p) for p in o.info._promote)}])")
if o.info and o.info.tuple_type:
a.insert(1, ("TupleType", [o.info.tuple_type]))
if o.info and o.info.fallback_to_any:
a.insert(1, "FallbackToAny")
if o.type_args:
for p in reversed(o.type_args):
a.insert(1, self.type_param(p))
return self.dump(a, o)
def visit_var(self, o: mypy.nodes.Var) -> str:
lst = ""
# Add :nil line number tag if no line number is specified to remain
# compatible with old test case descriptions that assume this.
if o.line < 0:
lst = ":nil"
return "Var" + lst + "(" + o.name + ")"
def visit_global_decl(self, o: mypy.nodes.GlobalDecl) -> str:
return self.dump([o.names], o)
def visit_nonlocal_decl(self, o: mypy.nodes.NonlocalDecl) -> str:
return self.dump([o.names], o)
def visit_decorator(self, o: mypy.nodes.Decorator) -> str:
return self.dump([o.var, o.decorators, o.func], o)
# Statements
def visit_block(self, o: mypy.nodes.Block) -> str:
return self.dump(o.body, o)
def visit_expression_stmt(self, o: mypy.nodes.ExpressionStmt) -> str:
return self.dump([o.expr], o)
def visit_assignment_stmt(self, o: mypy.nodes.AssignmentStmt) -> str:
a: list[Any] = []
if len(o.lvalues) > 1:
a = [("Lvalues", o.lvalues)]
else:
a = [o.lvalues[0]]
a.append(o.rvalue)
if o.type:
a.append(o.type)
return self.dump(a, o)
def visit_operator_assignment_stmt(self, o: mypy.nodes.OperatorAssignmentStmt) -> str:
return self.dump([o.op, o.lvalue, o.rvalue], o)
def visit_while_stmt(self, o: mypy.nodes.WhileStmt) -> str:
a: list[Any] = [o.expr, o.body]
if o.else_body:
a.append(("Else", o.else_body.body))
return self.dump(a, o)
def visit_for_stmt(self, o: mypy.nodes.ForStmt) -> str:
a: list[Any] = []
if o.is_async:
a.append(("Async", ""))
a.append(o.index)
if o.index_type:
a.append(o.index_type)
a.extend([o.expr, o.body])
if o.else_body:
a.append(("Else", o.else_body.body))
return self.dump(a, o)
def visit_return_stmt(self, o: mypy.nodes.ReturnStmt) -> str:
return self.dump([o.expr], o)
def visit_if_stmt(self, o: mypy.nodes.IfStmt) -> str:
a: list[Any] = []
for i in range(len(o.expr)):
a.append(("If", [o.expr[i]]))
a.append(("Then", o.body[i].body))
if not o.else_body:
return self.dump(a, o)
else:
return self.dump([a, ("Else", o.else_body.body)], o)
def visit_break_stmt(self, o: mypy.nodes.BreakStmt) -> str:
return self.dump([], o)
def visit_continue_stmt(self, o: mypy.nodes.ContinueStmt) -> str:
return self.dump([], o)
def visit_pass_stmt(self, o: mypy.nodes.PassStmt) -> str:
return self.dump([], o)
def visit_raise_stmt(self, o: mypy.nodes.RaiseStmt) -> str:
return self.dump([o.expr, o.from_expr], o)
def visit_assert_stmt(self, o: mypy.nodes.AssertStmt) -> str:
if o.msg is not None:
return self.dump([o.expr, o.msg], o)
else:
return self.dump([o.expr], o)
def visit_await_expr(self, o: mypy.nodes.AwaitExpr) -> str:
return self.dump([o.expr], o)
def visit_del_stmt(self, o: mypy.nodes.DelStmt) -> str:
return self.dump([o.expr], o)
def visit_try_stmt(self, o: mypy.nodes.TryStmt) -> str:
a: list[Any] = [o.body]
if o.is_star:
a.append("*")
for i in range(len(o.vars)):
a.append(o.types[i])
if o.vars[i]:
a.append(o.vars[i])
a.append(o.handlers[i])
if o.else_body:
a.append(("Else", o.else_body.body))
if o.finally_body:
a.append(("Finally", o.finally_body.body))
return self.dump(a, o)
def visit_with_stmt(self, o: mypy.nodes.WithStmt) -> str:
a: list[Any] = []
if o.is_async:
a.append(("Async", ""))
for i in range(len(o.expr)):
a.append(("Expr", [o.expr[i]]))
if o.target[i]:
a.append(("Target", [o.target[i]]))
if o.unanalyzed_type:
a.append(o.unanalyzed_type)
return self.dump(a + [o.body], o)
def visit_match_stmt(self, o: mypy.nodes.MatchStmt) -> str:
a: list[Any] = [o.subject]
for i in range(len(o.patterns)):
a.append(("Pattern", [o.patterns[i]]))
if o.guards[i] is not None:
a.append(("Guard", [o.guards[i]]))
a.append(("Body", o.bodies[i].body))
return self.dump(a, o)
def visit_type_alias_stmt(self, o: mypy.nodes.TypeAliasStmt) -> str:
a: list[Any] = [o.name]
for p in o.type_args:
a.append(self.type_param(p))
a.append(o.value)
return self.dump(a, o)
def type_param(self, p: mypy.nodes.TypeParam) -> list[Any]:
a: list[Any] = []
if p.kind == mypy.nodes.PARAM_SPEC_KIND:
prefix = "**"
elif p.kind == mypy.nodes.TYPE_VAR_TUPLE_KIND:
prefix = "*"
else:
prefix = ""
a.append(prefix + p.name)
if p.upper_bound:
a.append(p.upper_bound)
if p.values:
a.append(("Values", p.values))
return [("TypeParam", a)]
# Expressions
# Simple expressions
def visit_int_expr(self, o: mypy.nodes.IntExpr) -> str:
return f"IntExpr({o.value})"
def visit_str_expr(self, o: mypy.nodes.StrExpr) -> str:
return f"StrExpr({self.str_repr(o.value)})"
def visit_bytes_expr(self, o: mypy.nodes.BytesExpr) -> str:
return f"BytesExpr({self.str_repr(o.value)})"
def str_repr(self, s: str) -> str:
s = re.sub(r"\\u[0-9a-fA-F]{4}", lambda m: "\\" + m.group(0), s)
return re.sub("[^\\x20-\\x7e]", lambda m: r"\u%.4x" % ord(m.group(0)), s)
def visit_float_expr(self, o: mypy.nodes.FloatExpr) -> str:
return f"FloatExpr({o.value})"
def visit_complex_expr(self, o: mypy.nodes.ComplexExpr) -> str:
return f"ComplexExpr({o.value})"
def visit_ellipsis(self, o: mypy.nodes.EllipsisExpr) -> str:
return "Ellipsis"
def visit_star_expr(self, o: mypy.nodes.StarExpr) -> str:
return self.dump([o.expr], o)
def visit_name_expr(self, o: mypy.nodes.NameExpr) -> str:
pretty = self.pretty_name(
o.name, o.kind, o.fullname, o.is_inferred_def or o.is_special_form, o.node
)
if isinstance(o.node, mypy.nodes.Var) and o.node.is_final:
pretty += f" = {o.node.final_value}"
return short_type(o) + "(" + pretty + ")"
def pretty_name(
self,
name: str,
kind: int | None,
fullname: str | None,
is_inferred_def: bool,
target_node: mypy.nodes.Node | None = None,
) -> str:
n = name
if is_inferred_def:
n += "*"
if target_node:
id = self.format_id(target_node)
else:
id = ""
if isinstance(target_node, mypy.nodes.MypyFile) and name == fullname:
n += id
elif kind == mypy.nodes.GDEF or (fullname != name and fullname):
# Append fully qualified name for global references.
n += f" [{fullname}{id}]"
elif kind == mypy.nodes.LDEF:
# Add tag to signify a local reference.
n += f" [l{id}]"
elif kind == mypy.nodes.MDEF:
# Add tag to signify a member reference.
n += f" [m{id}]"
else:
n += id
return n
def visit_member_expr(self, o: mypy.nodes.MemberExpr) -> str:
pretty = self.pretty_name(o.name, o.kind, o.fullname, o.is_inferred_def, o.node)
return self.dump([o.expr, pretty], o)
def visit_yield_expr(self, o: mypy.nodes.YieldExpr) -> str:
return self.dump([o.expr], o)
def visit_yield_from_expr(self, o: mypy.nodes.YieldFromExpr) -> str:
if o.expr:
return self.dump([o.expr.accept(self)], o)
else:
return self.dump([], o)
def visit_call_expr(self, o: mypy.nodes.CallExpr) -> str:
if o.analyzed:
return o.analyzed.accept(self)
args: list[mypy.nodes.Expression] = []
extra: list[str | tuple[str, list[Any]]] = []
for i, kind in enumerate(o.arg_kinds):
if kind in [mypy.nodes.ARG_POS, mypy.nodes.ARG_STAR]:
args.append(o.args[i])
if kind == mypy.nodes.ARG_STAR:
extra.append("VarArg")
elif kind == mypy.nodes.ARG_NAMED:
extra.append(("KwArgs", [o.arg_names[i], o.args[i]]))
elif kind == mypy.nodes.ARG_STAR2:
extra.append(("DictVarArg", [o.args[i]]))
else:
raise RuntimeError(f"unknown kind {kind}")
a: list[Any] = [o.callee, ("Args", args)]
return self.dump(a + extra, o)
def visit_op_expr(self, o: mypy.nodes.OpExpr) -> str:
if o.analyzed:
return o.analyzed.accept(self)
return self.dump([o.op, o.left, o.right], o)
def visit_comparison_expr(self, o: mypy.nodes.ComparisonExpr) -> str:
return self.dump([o.operators, o.operands], o)
def visit_cast_expr(self, o: mypy.nodes.CastExpr) -> str:
return self.dump([o.expr, o.type], o)
def visit_assert_type_expr(self, o: mypy.nodes.AssertTypeExpr) -> str:
return self.dump([o.expr, o.type], o)
def visit_reveal_expr(self, o: mypy.nodes.RevealExpr) -> str:
if o.kind == mypy.nodes.REVEAL_TYPE:
return self.dump([o.expr], o)
else:
# REVEAL_LOCALS
return self.dump([o.local_nodes], o)
def visit_assignment_expr(self, o: mypy.nodes.AssignmentExpr) -> str:
return self.dump([o.target, o.value], o)
def visit_unary_expr(self, o: mypy.nodes.UnaryExpr) -> str:
return self.dump([o.op, o.expr], o)
def visit_list_expr(self, o: mypy.nodes.ListExpr) -> str:
return self.dump(o.items, o)
def visit_dict_expr(self, o: mypy.nodes.DictExpr) -> str:
return self.dump([[k, v] for k, v in o.items], o)
def visit_set_expr(self, o: mypy.nodes.SetExpr) -> str:
return self.dump(o.items, o)
def visit_tuple_expr(self, o: mypy.nodes.TupleExpr) -> str:
return self.dump(o.items, o)
def visit_index_expr(self, o: mypy.nodes.IndexExpr) -> str:
if o.analyzed:
return o.analyzed.accept(self)
return self.dump([o.base, o.index], o)
def visit_super_expr(self, o: mypy.nodes.SuperExpr) -> str:
return self.dump([o.name, o.call], o)
def visit_type_application(self, o: mypy.nodes.TypeApplication) -> str:
return self.dump([o.expr, ("Types", o.types)], o)
def visit_type_var_expr(self, o: mypy.nodes.TypeVarExpr) -> str:
import mypy.types
a: list[Any] = []
if o.variance == mypy.nodes.COVARIANT:
a += ["Variance(COVARIANT)"]
if o.variance == mypy.nodes.CONTRAVARIANT:
a += ["Variance(CONTRAVARIANT)"]
if o.values:
a += [("Values", o.values)]
if not mypy.types.is_named_instance(o.upper_bound, "builtins.object"):
a += [f"UpperBound({self.stringify_type(o.upper_bound)})"]
return self.dump(a, o)
def visit_paramspec_expr(self, o: mypy.nodes.ParamSpecExpr) -> str:
import mypy.types
a: list[Any] = []
if o.variance == mypy.nodes.COVARIANT:
a += ["Variance(COVARIANT)"]
if o.variance == mypy.nodes.CONTRAVARIANT:
a += ["Variance(CONTRAVARIANT)"]
if not mypy.types.is_named_instance(o.upper_bound, "builtins.object"):
a += [f"UpperBound({self.stringify_type(o.upper_bound)})"]
return self.dump(a, o)
def visit_type_var_tuple_expr(self, o: mypy.nodes.TypeVarTupleExpr) -> str:
import mypy.types
a: list[Any] = []
if o.variance == mypy.nodes.COVARIANT:
a += ["Variance(COVARIANT)"]
if o.variance == mypy.nodes.CONTRAVARIANT:
a += ["Variance(CONTRAVARIANT)"]
if not mypy.types.is_named_instance(o.upper_bound, "builtins.object"):
a += [f"UpperBound({self.stringify_type(o.upper_bound)})"]
return self.dump(a, o)
def visit_type_alias_expr(self, o: mypy.nodes.TypeAliasExpr) -> str:
return f"TypeAliasExpr({self.stringify_type(o.node.target)})"
def visit_namedtuple_expr(self, o: mypy.nodes.NamedTupleExpr) -> str:
return f"NamedTupleExpr:{o.line}({o.info.name}, {self.stringify_type(o.info.tuple_type) if o.info.tuple_type is not None else None})"
def visit_enum_call_expr(self, o: mypy.nodes.EnumCallExpr) -> str:
return f"EnumCallExpr:{o.line}({o.info.name}, {o.items})"
def visit_typeddict_expr(self, o: mypy.nodes.TypedDictExpr) -> str:
return f"TypedDictExpr:{o.line}({o.info.name})"
def visit__promote_expr(self, o: mypy.nodes.PromoteExpr) -> str:
return f"PromoteExpr:{o.line}({self.stringify_type(o.type)})"
def visit_newtype_expr(self, o: mypy.nodes.NewTypeExpr) -> str:
return f"NewTypeExpr:{o.line}({o.name}, {self.dump([o.old_type], o)})"
def visit_lambda_expr(self, o: mypy.nodes.LambdaExpr) -> str:
a = self.func_helper(o)
return self.dump(a, o)
def visit_generator_expr(self, o: mypy.nodes.GeneratorExpr) -> str:
condlists = o.condlists if any(o.condlists) else None
return self.dump([o.left_expr, o.indices, o.sequences, condlists], o)
def visit_list_comprehension(self, o: mypy.nodes.ListComprehension) -> str:
return self.dump([o.generator], o)
def visit_set_comprehension(self, o: mypy.nodes.SetComprehension) -> str:
return self.dump([o.generator], o)
def visit_dictionary_comprehension(self, o: mypy.nodes.DictionaryComprehension) -> str:
condlists = o.condlists if any(o.condlists) else None
return self.dump([o.key, o.value, o.indices, o.sequences, condlists], o)
def visit_conditional_expr(self, o: mypy.nodes.ConditionalExpr) -> str:
return self.dump([("Condition", [o.cond]), o.if_expr, o.else_expr], o)
def visit_slice_expr(self, o: mypy.nodes.SliceExpr) -> str:
a: list[Any] = [o.begin_index, o.end_index, o.stride]
if not a[0]:
a[0] = "<empty>"
if not a[1]:
a[1] = "<empty>"
return self.dump(a, o)
def visit_temp_node(self, o: mypy.nodes.TempNode) -> str:
return self.dump([o.type], o)
def visit_as_pattern(self, o: mypy.patterns.AsPattern) -> str:
return self.dump([o.pattern, o.name], o)
def visit_or_pattern(self, o: mypy.patterns.OrPattern) -> str:
return self.dump(o.patterns, o)
def visit_value_pattern(self, o: mypy.patterns.ValuePattern) -> str:
return self.dump([o.expr], o)
def visit_singleton_pattern(self, o: mypy.patterns.SingletonPattern) -> str:
return self.dump([o.value], o)
def visit_sequence_pattern(self, o: mypy.patterns.SequencePattern) -> str:
return self.dump(o.patterns, o)
def visit_starred_pattern(self, o: mypy.patterns.StarredPattern) -> str:
return self.dump([o.capture], o)
def visit_mapping_pattern(self, o: mypy.patterns.MappingPattern) -> str:
a: list[Any] = []
for i in range(len(o.keys)):
a.append(("Key", [o.keys[i]]))
a.append(("Value", [o.values[i]]))
if o.rest is not None:
a.append(("Rest", [o.rest]))
return self.dump(a, o)
def visit_class_pattern(self, o: mypy.patterns.ClassPattern) -> str:
a: list[Any] = [o.class_ref]
if len(o.positionals) > 0:
a.append(("Positionals", o.positionals))
for i in range(len(o.keyword_keys)):
a.append(("Keyword", [o.keyword_keys[i], o.keyword_values[i]]))
return self.dump(a, o)
def dump_tagged(nodes: Sequence[object], tag: str | None, str_conv: StrConv) -> str:
"""Convert an array into a pretty-printed multiline string representation.
The format is
tag(
item1..
itemN)
Individual items are formatted like this:
- arrays are flattened
- pairs (str, array) are converted recursively, so that str is the tag
- other items are converted to strings and indented
"""
from mypy.types import Type, TypeStrVisitor
a: list[str] = []
if tag:
a.append(tag + "(")
for n in nodes:
if isinstance(n, list):
if n:
a.append(dump_tagged(n, None, str_conv))
elif isinstance(n, tuple):
s = dump_tagged(n[1], n[0], str_conv)
a.append(indent(s, 2))
elif isinstance(n, mypy.nodes.Node):
a.append(indent(n.accept(str_conv), 2))
elif isinstance(n, Type):
a.append(
indent(n.accept(TypeStrVisitor(str_conv.id_mapper, options=str_conv.options)), 2)
)
elif n is not None:
a.append(indent(str(n), 2))
if tag:
a[-1] += ")"
return "\n".join(a)
def indent(s: str, n: int) -> str:
"""Indent all the lines in s (separated by newlines) by n spaces."""
s = " " * n + s
s = s.replace("\n", "\n" + " " * n)
return s
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/strconv.py
|
Python
|
NOASSERTION
| 24,325 |
"""Parsing/inferring signatures from documentation.
This module provides several functions to generate better stubs using
docstrings and Sphinx docs (.rst files).
"""
from __future__ import annotations
import contextlib
import io
import keyword
import re
import tokenize
from typing import Any, Final, MutableMapping, MutableSequence, NamedTuple, Sequence, Tuple
from typing_extensions import TypeAlias as _TypeAlias
import mypy.util
# Type alias for signatures strings in format ('func_name', '(arg, opt_arg=False)').
Sig: _TypeAlias = Tuple[str, str]
_TYPE_RE: Final = re.compile(r"^[a-zA-Z_][\w\[\], .\"\']*(\.[a-zA-Z_][\w\[\], ]*)*$")
_ARG_NAME_RE: Final = re.compile(r"\**[A-Za-z_][A-Za-z0-9_]*$")
def is_valid_type(s: str) -> bool:
"""Try to determine whether a string might be a valid type annotation."""
if s in ("True", "False", "retval"):
return False
if "," in s and "[" not in s:
return False
return _TYPE_RE.match(s) is not None
class ArgSig:
"""Signature info for a single argument."""
def __init__(
self,
name: str,
type: str | None = None,
*,
default: bool = False,
default_value: str = "...",
) -> None:
self.name = name
self.type = type
# Does this argument have a default value?
self.default = default
self.default_value = default_value
def is_star_arg(self) -> bool:
return self.name.startswith("*") and not self.name.startswith("**")
def is_star_kwarg(self) -> bool:
return self.name.startswith("**")
def __repr__(self) -> str:
return "ArgSig(name={}, type={}, default={})".format(
repr(self.name), repr(self.type), repr(self.default)
)
def __eq__(self, other: Any) -> bool:
if isinstance(other, ArgSig):
return (
self.name == other.name
and self.type == other.type
and self.default == other.default
and self.default_value == other.default_value
)
return False
class FunctionSig(NamedTuple):
name: str
args: list[ArgSig]
ret_type: str | None
def is_special_method(self) -> bool:
return bool(
self.name.startswith("__")
and self.name.endswith("__")
and self.args
and self.args[0].name in ("self", "cls")
)
def has_catchall_args(self) -> bool:
"""Return if this signature has catchall args: (*args, **kwargs)"""
if self.args and self.args[0].name in ("self", "cls"):
args = self.args[1:]
else:
args = self.args
return (
len(args) == 2
and all(a.type in (None, "object", "Any", "typing.Any") for a in args)
and args[0].is_star_arg()
and args[1].is_star_kwarg()
)
def is_catchall_signature(self) -> bool:
"""Return if this signature is the catchall identity: (*args, **kwargs) -> Any"""
return self.has_catchall_args() and self.ret_type in (None, "Any", "typing.Any")
def format_sig(
self,
indent: str = "",
is_async: bool = False,
any_val: str | None = None,
docstring: str | None = None,
) -> str:
args: list[str] = []
for arg in self.args:
arg_def = arg.name
if arg_def in keyword.kwlist:
arg_def = "_" + arg_def
if (
arg.type is None
and any_val is not None
and arg.name not in ("self", "cls")
and not arg.name.startswith("*")
):
arg_type: str | None = any_val
else:
arg_type = arg.type
if arg_type:
arg_def += ": " + arg_type
if arg.default:
arg_def += f" = {arg.default_value}"
elif arg.default:
arg_def += f"={arg.default_value}"
args.append(arg_def)
retfield = ""
ret_type = self.ret_type if self.ret_type else any_val
if ret_type is not None:
retfield = " -> " + ret_type
prefix = "async " if is_async else ""
sig = "{indent}{prefix}def {name}({args}){ret}:".format(
indent=indent, prefix=prefix, name=self.name, args=", ".join(args), ret=retfield
)
if docstring:
suffix = f"\n{indent} {mypy.util.quote_docstring(docstring)}"
else:
suffix = " ..."
return f"{sig}{suffix}"
# States of the docstring parser.
STATE_INIT: Final = 1
STATE_FUNCTION_NAME: Final = 2
STATE_ARGUMENT_LIST: Final = 3
STATE_ARGUMENT_TYPE: Final = 4
STATE_ARGUMENT_DEFAULT: Final = 5
STATE_RETURN_VALUE: Final = 6
STATE_OPEN_BRACKET: Final = 7 # For generic types.
class DocStringParser:
"""Parse function signatures in documentation."""
def __init__(self, function_name: str) -> None:
# Only search for signatures of function with this name.
self.function_name = function_name
self.state = [STATE_INIT]
self.accumulator = ""
self.arg_type: str | None = None
self.arg_name = ""
self.arg_default: str | None = None
self.ret_type = "Any"
self.found = False
self.args: list[ArgSig] = []
# Valid signatures found so far.
self.signatures: list[FunctionSig] = []
def add_token(self, token: tokenize.TokenInfo) -> None:
"""Process next token from the token stream."""
if (
token.type == tokenize.NAME
and token.string == self.function_name
and self.state[-1] == STATE_INIT
):
self.state.append(STATE_FUNCTION_NAME)
elif (
token.type == tokenize.OP
and token.string == "("
and self.state[-1] == STATE_FUNCTION_NAME
):
self.state.pop()
self.accumulator = ""
self.found = True
self.state.append(STATE_ARGUMENT_LIST)
elif self.state[-1] == STATE_FUNCTION_NAME:
# Reset state, function name not followed by '('.
self.state.pop()
elif (
token.type == tokenize.OP
and token.string in ("[", "(", "{")
and self.state[-1] != STATE_INIT
):
self.accumulator += token.string
self.state.append(STATE_OPEN_BRACKET)
elif (
token.type == tokenize.OP
and token.string in ("]", ")", "}")
and self.state[-1] == STATE_OPEN_BRACKET
):
self.accumulator += token.string
self.state.pop()
elif (
token.type == tokenize.OP
and token.string == ":"
and self.state[-1] == STATE_ARGUMENT_LIST
):
self.arg_name = self.accumulator
self.accumulator = ""
self.state.append(STATE_ARGUMENT_TYPE)
elif (
token.type == tokenize.OP
and token.string == "="
and self.state[-1] in (STATE_ARGUMENT_LIST, STATE_ARGUMENT_TYPE)
):
if self.state[-1] == STATE_ARGUMENT_TYPE:
self.arg_type = self.accumulator
self.state.pop()
else:
self.arg_name = self.accumulator
self.accumulator = ""
self.state.append(STATE_ARGUMENT_DEFAULT)
elif (
token.type == tokenize.OP
and token.string in (",", ")")
and self.state[-1]
in (STATE_ARGUMENT_LIST, STATE_ARGUMENT_DEFAULT, STATE_ARGUMENT_TYPE)
):
if self.state[-1] == STATE_ARGUMENT_DEFAULT:
self.arg_default = self.accumulator
self.state.pop()
elif self.state[-1] == STATE_ARGUMENT_TYPE:
self.arg_type = self.accumulator
self.state.pop()
elif self.state[-1] == STATE_ARGUMENT_LIST:
self.arg_name = self.accumulator
if not (
token.string == ")" and self.accumulator.strip() == ""
) and not _ARG_NAME_RE.match(self.arg_name):
# Invalid argument name.
self.reset()
return
if token.string == ")":
self.state.pop()
# arg_name is empty when there are no args. e.g. func()
if self.arg_name:
if self.arg_type and not is_valid_type(self.arg_type):
# wrong type, use Any
self.args.append(
ArgSig(name=self.arg_name, type=None, default=bool(self.arg_default))
)
else:
self.args.append(
ArgSig(
name=self.arg_name, type=self.arg_type, default=bool(self.arg_default)
)
)
self.arg_name = ""
self.arg_type = None
self.arg_default = None
self.accumulator = ""
elif token.type == tokenize.OP and token.string == "->" and self.state[-1] == STATE_INIT:
self.accumulator = ""
self.state.append(STATE_RETURN_VALUE)
# ENDMAKER is necessary for python 3.4 and 3.5.
elif token.type in (tokenize.NEWLINE, tokenize.ENDMARKER) and self.state[-1] in (
STATE_INIT,
STATE_RETURN_VALUE,
):
if self.state[-1] == STATE_RETURN_VALUE:
if not is_valid_type(self.accumulator):
self.reset()
return
self.ret_type = self.accumulator
self.accumulator = ""
self.state.pop()
if self.found:
self.signatures.append(
FunctionSig(name=self.function_name, args=self.args, ret_type=self.ret_type)
)
self.found = False
self.args = []
self.ret_type = "Any"
# Leave state as INIT.
else:
self.accumulator += token.string
def reset(self) -> None:
self.state = [STATE_INIT]
self.args = []
self.found = False
self.accumulator = ""
def get_signatures(self) -> list[FunctionSig]:
"""Return sorted copy of the list of signatures found so far."""
def has_arg(name: str, signature: FunctionSig) -> bool:
return any(x.name == name for x in signature.args)
def args_kwargs(signature: FunctionSig) -> bool:
return has_arg("*args", signature) and has_arg("**kwargs", signature)
# Move functions with (*args, **kwargs) in their signature to last place.
return sorted(self.signatures, key=lambda x: 1 if args_kwargs(x) else 0)
def infer_sig_from_docstring(docstr: str | None, name: str) -> list[FunctionSig] | None:
"""Convert function signature to list of FunctionSig
Look for function signatures of function in docstring. Signature is a string of
the format <function_name>(<signature>) -> <return type> or perhaps without
the return type.
Returns empty list, when no signature is found, one signature in typical case,
multiple signatures, if docstring specifies multiple signatures for overload functions.
Return None if the docstring is empty.
Arguments:
* docstr: docstring
* name: name of function for which signatures are to be found
"""
if not (isinstance(docstr, str) and docstr):
return None
state = DocStringParser(name)
# Return all found signatures, even if there is a parse error after some are found.
with contextlib.suppress(tokenize.TokenError):
try:
tokens = tokenize.tokenize(io.BytesIO(docstr.encode("utf-8")).readline)
for token in tokens:
state.add_token(token)
except IndentationError:
return None
sigs = state.get_signatures()
def is_unique_args(sig: FunctionSig) -> bool:
"""return true if function argument names are unique"""
return len(sig.args) == len({arg.name for arg in sig.args})
# Return only signatures that have unique argument names. Mypy fails on non-unique arg names.
return [sig for sig in sigs if is_unique_args(sig)]
def infer_arg_sig_from_anon_docstring(docstr: str) -> list[ArgSig]:
"""Convert signature in form of "(self: TestClass, arg0: str='ada')" to List[TypedArgList]."""
ret = infer_sig_from_docstring("stub" + docstr, "stub")
if ret:
return ret[0].args
return []
def infer_ret_type_sig_from_docstring(docstr: str, name: str) -> str | None:
"""Convert signature in form of "func(self: TestClass, arg0) -> int" to their return type."""
ret = infer_sig_from_docstring(docstr, name)
if ret:
return ret[0].ret_type
return None
def infer_ret_type_sig_from_anon_docstring(docstr: str) -> str | None:
"""Convert signature in form of "(self: TestClass, arg0) -> int" to their return type."""
lines = ["stub" + line.strip() for line in docstr.splitlines() if line.strip().startswith("(")]
return infer_ret_type_sig_from_docstring("".join(lines), "stub")
def parse_signature(sig: str) -> tuple[str, list[str], list[str]] | None:
"""Split function signature into its name, positional an optional arguments.
The expected format is "func_name(arg, opt_arg=False)". Return the name of function
and lists of positional and optional argument names.
"""
m = re.match(r"([.a-zA-Z0-9_]+)\(([^)]*)\)", sig)
if not m:
return None
name = m.group(1)
name = name.split(".")[-1]
arg_string = m.group(2)
if not arg_string.strip():
# Simple case -- no arguments.
return name, [], []
args = [arg.strip() for arg in arg_string.split(",")]
positional = []
optional = []
i = 0
while i < len(args):
# Accept optional arguments as in both formats: x=None and [x].
if args[i].startswith("[") or "=" in args[i]:
break
positional.append(args[i].rstrip("["))
i += 1
if args[i - 1].endswith("["):
break
while i < len(args):
arg = args[i]
arg = arg.strip("[]")
arg = arg.split("=")[0]
optional.append(arg)
i += 1
return name, positional, optional
def build_signature(positional: Sequence[str], optional: Sequence[str]) -> str:
"""Build function signature from lists of positional and optional argument names."""
args: MutableSequence[str] = []
args.extend(positional)
for arg in optional:
if arg.startswith("*"):
args.append(arg)
else:
args.append(f"{arg}=...")
sig = f"({', '.join(args)})"
# Ad-hoc fixes.
sig = sig.replace("(self)", "")
return sig
def parse_all_signatures(lines: Sequence[str]) -> tuple[list[Sig], list[Sig]]:
"""Parse all signatures in a given reST document.
Return lists of found signatures for functions and classes.
"""
sigs = []
class_sigs = []
for line in lines:
line = line.strip()
m = re.match(r"\.\. *(function|method|class) *:: *[a-zA-Z_]", line)
if m:
sig = line.split("::")[1].strip()
parsed = parse_signature(sig)
if parsed:
name, fixed, optional = parsed
if m.group(1) != "class":
sigs.append((name, build_signature(fixed, optional)))
else:
class_sigs.append((name, build_signature(fixed, optional)))
return sorted(sigs), sorted(class_sigs)
def find_unique_signatures(sigs: Sequence[Sig]) -> list[Sig]:
"""Remove names with duplicate found signatures."""
sig_map: MutableMapping[str, list[str]] = {}
for name, sig in sigs:
sig_map.setdefault(name, []).append(sig)
result = []
for name, name_sigs in sig_map.items():
if len(set(name_sigs)) == 1:
result.append((name, name_sigs[0]))
return sorted(result)
def infer_prop_type_from_docstring(docstr: str | None) -> str | None:
"""Check for Google/Numpy style docstring type annotation for a property.
The docstring has the format "<type>: <descriptions>".
In the type string, we allow the following characters:
* dot: because sometimes classes are annotated using full path
* brackets: to allow type hints like List[int]
* comma/space: things like Tuple[int, int]
"""
if not docstr:
return None
test_str = r"^([a-zA-Z0-9_, \.\[\]]*): "
m = re.match(test_str, docstr)
return m.group(1) if m else None
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/stubdoc.py
|
Python
|
NOASSERTION
| 16,804 |
#!/usr/bin/env python3
"""Generator of dynamically typed draft stubs for arbitrary modules.
The logic of this script can be split in three steps:
* parsing options and finding sources:
- use runtime imports be default (to find also C modules)
- or use mypy's mechanisms, if importing is prohibited
* (optionally) semantically analysing the sources using mypy (as a single set)
* emitting the stubs text:
- for Python modules: from ASTs using ASTStubGenerator
- for C modules using runtime introspection and (optionally) Sphinx docs
During first and third steps some problematic files can be skipped, but any
blocking error during second step will cause the whole program to stop.
Basic usage:
$ stubgen foo.py bar.py some_directory
=> Generate out/foo.pyi, out/bar.pyi, and stubs for some_directory (recursively).
$ stubgen -m urllib.parse
=> Generate out/urllib/parse.pyi.
$ stubgen -p urllib
=> Generate stubs for whole urllib package (recursively).
For C modules, you can get more precise function signatures by parsing .rst (Sphinx)
documentation for extra information. For this, use the --doc-dir option:
$ stubgen --doc-dir <DIR>/Python-3.4.2/Doc/library -m curses
Note: The generated stubs should be verified manually.
TODO:
- maybe use .rst docs also for Python modules
- maybe export more imported names if there is no __all__ (this affects ssl.SSLError, for example)
- a quick and dirty heuristic would be to turn this on if a module has something like
'from x import y as _y'
- we don't seem to always detect properties ('closed' in 'io', for example)
"""
from __future__ import annotations
import argparse
import keyword
import os
import os.path
import sys
import traceback
from typing import Final, Iterable, Iterator
import mypy.build
import mypy.mixedtraverser
import mypy.parse
import mypy.traverser
import mypy.util
import mypy.version
from mypy.build import build
from mypy.errors import CompileError, Errors
from mypy.find_sources import InvalidSourceList, create_source_list
from mypy.modulefinder import (
BuildSource,
FindModuleCache,
ModuleNotFoundReason,
SearchPaths,
default_lib_path,
)
from mypy.moduleinspect import ModuleInspect, is_pyc_only
from mypy.nodes import (
ARG_NAMED,
ARG_POS,
ARG_STAR,
ARG_STAR2,
IS_ABSTRACT,
NOT_ABSTRACT,
AssignmentStmt,
Block,
BytesExpr,
CallExpr,
ClassDef,
ComparisonExpr,
ComplexExpr,
Decorator,
DictExpr,
EllipsisExpr,
Expression,
ExpressionStmt,
FloatExpr,
FuncBase,
FuncDef,
IfStmt,
Import,
ImportAll,
ImportFrom,
IndexExpr,
IntExpr,
ListExpr,
MemberExpr,
MypyFile,
NameExpr,
OpExpr,
OverloadedFuncDef,
SetExpr,
StarExpr,
Statement,
StrExpr,
TempNode,
TupleExpr,
TypeInfo,
UnaryExpr,
Var,
)
from mypy.options import Options as MypyOptions
from mypy.sharedparse import MAGIC_METHODS_POS_ARGS_ONLY
from mypy.stubdoc import ArgSig, FunctionSig
from mypy.stubgenc import InspectionStubGenerator, generate_stub_for_c_module
from mypy.stubutil import (
TYPING_BUILTIN_REPLACEMENTS,
BaseStubGenerator,
CantImport,
ClassInfo,
FunctionContext,
common_dir_prefix,
fail_missing,
find_module_path_and_all_py3,
generate_guarded,
infer_method_arg_types,
infer_method_ret_type,
remove_misplaced_type_comments,
report_missing,
walk_packages,
)
from mypy.traverser import (
all_yield_expressions,
has_return_statement,
has_yield_expression,
has_yield_from_expression,
)
from mypy.types import (
OVERLOAD_NAMES,
TPDICT_NAMES,
TYPED_NAMEDTUPLE_NAMES,
AnyType,
CallableType,
Instance,
TupleType,
Type,
UnboundType,
get_proper_type,
)
from mypy.visitor import NodeVisitor
# Common ways of naming package containing vendored modules.
VENDOR_PACKAGES: Final = ["packages", "vendor", "vendored", "_vendor", "_vendored_packages"]
# Avoid some file names that are unnecessary or likely to cause trouble (\n for end of path).
BLACKLIST: Final = [
"/six.py\n", # Likely vendored six; too dynamic for us to handle
"/vendored/", # Vendored packages
"/vendor/", # Vendored packages
"/_vendor/",
"/_vendored_packages/",
]
# These methods are expected to always return a non-trivial value.
METHODS_WITH_RETURN_VALUE: Final = {
"__ne__",
"__eq__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__hash__",
"__iter__",
}
class Options:
"""Represents stubgen options.
This class is mutable to simplify testing.
"""
def __init__(
self,
pyversion: tuple[int, int],
no_import: bool,
inspect: bool,
doc_dir: str,
search_path: list[str],
interpreter: str,
parse_only: bool,
ignore_errors: bool,
include_private: bool,
output_dir: str,
modules: list[str],
packages: list[str],
files: list[str],
verbose: bool,
quiet: bool,
export_less: bool,
include_docstrings: bool,
) -> None:
# See parse_options for descriptions of the flags.
self.pyversion = pyversion
self.no_import = no_import
self.inspect = inspect
self.doc_dir = doc_dir
self.search_path = search_path
self.interpreter = interpreter
self.decointerpreter = interpreter
self.parse_only = parse_only
self.ignore_errors = ignore_errors
self.include_private = include_private
self.output_dir = output_dir
self.modules = modules
self.packages = packages
self.files = files
self.verbose = verbose
self.quiet = quiet
self.export_less = export_less
self.include_docstrings = include_docstrings
class StubSource:
"""A single source for stub: can be a Python or C module.
A simple extension of BuildSource that also carries the AST and
the value of __all__ detected at runtime.
"""
def __init__(
self, module: str, path: str | None = None, runtime_all: list[str] | None = None
) -> None:
self.source = BuildSource(path, module, None)
self.runtime_all = runtime_all
self.ast: MypyFile | None = None
def __repr__(self) -> str:
return f"StubSource({self.source})"
@property
def module(self) -> str:
return self.source.module
@property
def path(self) -> str | None:
return self.source.path
# What was generated previously in the stub file. We keep track of these to generate
# nicely formatted output (add empty line between non-empty classes, for example).
EMPTY: Final = "EMPTY"
FUNC: Final = "FUNC"
CLASS: Final = "CLASS"
EMPTY_CLASS: Final = "EMPTY_CLASS"
VAR: Final = "VAR"
NOT_IN_ALL: Final = "NOT_IN_ALL"
# Indicates that we failed to generate a reasonable output
# for a given node. These should be manually replaced by a user.
ERROR_MARKER: Final = "<ERROR>"
class AliasPrinter(NodeVisitor[str]):
"""Visitor used to collect type aliases _and_ type variable definitions.
Visit r.h.s of the definition to get the string representation of type alias.
"""
def __init__(self, stubgen: ASTStubGenerator) -> None:
self.stubgen = stubgen
super().__init__()
def visit_call_expr(self, node: CallExpr) -> str:
# Call expressions are not usually types, but we also treat `X = TypeVar(...)` as a
# type alias that has to be preserved (even if TypeVar is not the same as an alias)
callee = node.callee.accept(self)
args = []
for name, arg, kind in zip(node.arg_names, node.args, node.arg_kinds):
if kind == ARG_POS:
args.append(arg.accept(self))
elif kind == ARG_STAR:
args.append("*" + arg.accept(self))
elif kind == ARG_STAR2:
args.append("**" + arg.accept(self))
elif kind == ARG_NAMED:
args.append(f"{name}={arg.accept(self)}")
else:
raise ValueError(f"Unknown argument kind {kind} in call")
return f"{callee}({', '.join(args)})"
def _visit_ref_expr(self, node: NameExpr | MemberExpr) -> str:
fullname = self.stubgen.get_fullname(node)
if fullname in TYPING_BUILTIN_REPLACEMENTS:
return self.stubgen.add_name(TYPING_BUILTIN_REPLACEMENTS[fullname], require=False)
qualname = get_qualified_name(node)
self.stubgen.import_tracker.require_name(qualname)
return qualname
def visit_name_expr(self, node: NameExpr) -> str:
return self._visit_ref_expr(node)
def visit_member_expr(self, o: MemberExpr) -> str:
return self._visit_ref_expr(o)
def _visit_literal_node(
self, node: StrExpr | BytesExpr | IntExpr | FloatExpr | ComplexExpr
) -> str:
return repr(node.value)
def visit_str_expr(self, node: StrExpr) -> str:
return self._visit_literal_node(node)
def visit_bytes_expr(self, node: BytesExpr) -> str:
return f"b{self._visit_literal_node(node)}"
def visit_int_expr(self, node: IntExpr) -> str:
return self._visit_literal_node(node)
def visit_float_expr(self, node: FloatExpr) -> str:
return self._visit_literal_node(node)
def visit_complex_expr(self, node: ComplexExpr) -> str:
return self._visit_literal_node(node)
def visit_index_expr(self, node: IndexExpr) -> str:
base_fullname = self.stubgen.get_fullname(node.base)
if base_fullname == "typing.Union":
if isinstance(node.index, TupleExpr):
return " | ".join([item.accept(self) for item in node.index.items])
return node.index.accept(self)
if base_fullname == "typing.Optional":
if isinstance(node.index, TupleExpr):
return self.stubgen.add_name("_typeshed.Incomplete")
return f"{node.index.accept(self)} | None"
base = node.base.accept(self)
index = node.index.accept(self)
if len(index) > 2 and index.startswith("(") and index.endswith(")"):
index = index[1:-1]
return f"{base}[{index}]"
def visit_tuple_expr(self, node: TupleExpr) -> str:
return f"({', '.join(n.accept(self) for n in node.items)})"
def visit_list_expr(self, node: ListExpr) -> str:
return f"[{', '.join(n.accept(self) for n in node.items)}]"
def visit_dict_expr(self, o: DictExpr) -> str:
dict_items = []
for key, value in o.items:
# This is currently only used for TypedDict where all keys are strings.
assert isinstance(key, StrExpr)
dict_items.append(f"{key.accept(self)}: {value.accept(self)}")
return f"{{{', '.join(dict_items)}}}"
def visit_ellipsis(self, node: EllipsisExpr) -> str:
return "..."
def visit_op_expr(self, o: OpExpr) -> str:
return f"{o.left.accept(self)} {o.op} {o.right.accept(self)}"
def visit_star_expr(self, o: StarExpr) -> str:
return f"*{o.expr.accept(self)}"
def find_defined_names(file: MypyFile) -> set[str]:
finder = DefinitionFinder()
file.accept(finder)
return finder.names
def get_assigned_names(lvalues: Iterable[Expression]) -> Iterator[str]:
for lvalue in lvalues:
if isinstance(lvalue, NameExpr):
yield lvalue.name
elif isinstance(lvalue, TupleExpr):
yield from get_assigned_names(lvalue.items)
class DefinitionFinder(mypy.traverser.TraverserVisitor):
"""Find names of things defined at the top level of a module."""
def __init__(self) -> None:
# Short names of things defined at the top level.
self.names: set[str] = set()
def visit_class_def(self, o: ClassDef) -> None:
# Don't recurse into classes, as we only keep track of top-level definitions.
self.names.add(o.name)
def visit_func_def(self, o: FuncDef) -> None:
# Don't recurse, as we only keep track of top-level definitions.
self.names.add(o.name)
def visit_assignment_stmt(self, o: AssignmentStmt) -> None:
for name in get_assigned_names(o.lvalues):
self.names.add(name)
def find_referenced_names(file: MypyFile) -> set[str]:
finder = ReferenceFinder()
file.accept(finder)
return finder.refs
def is_none_expr(expr: Expression) -> bool:
return isinstance(expr, NameExpr) and expr.name == "None"
class ReferenceFinder(mypy.mixedtraverser.MixedTraverserVisitor):
"""Find all name references (both local and global)."""
# TODO: Filter out local variable and class attribute references
def __init__(self) -> None:
# Short names of things defined at the top level.
self.refs: set[str] = set()
def visit_block(self, block: Block) -> None:
if not block.is_unreachable:
super().visit_block(block)
def visit_name_expr(self, e: NameExpr) -> None:
self.refs.add(e.name)
def visit_instance(self, t: Instance) -> None:
self.add_ref(t.type.name)
super().visit_instance(t)
def visit_unbound_type(self, t: UnboundType) -> None:
if t.name:
self.add_ref(t.name)
def visit_tuple_type(self, t: TupleType) -> None:
# Ignore fallback
for item in t.items:
item.accept(self)
def visit_callable_type(self, t: CallableType) -> None:
# Ignore fallback
for arg in t.arg_types:
arg.accept(self)
t.ret_type.accept(self)
def add_ref(self, fullname: str) -> None:
self.refs.add(fullname)
while "." in fullname:
fullname = fullname.rsplit(".", 1)[0]
self.refs.add(fullname)
class ASTStubGenerator(BaseStubGenerator, mypy.traverser.TraverserVisitor):
"""Generate stub text from a mypy AST."""
def __init__(
self,
_all_: list[str] | None = None,
include_private: bool = False,
analyzed: bool = False,
export_less: bool = False,
include_docstrings: bool = False,
) -> None:
super().__init__(_all_, include_private, export_less, include_docstrings)
self._decorators: list[str] = []
# Stack of defined variables (per scope).
self._vars: list[list[str]] = [[]]
# What was generated previously in the stub file.
self._state = EMPTY
self._current_class: ClassDef | None = None
# Was the tree semantically analysed before?
self.analyzed = analyzed
# Short names of methods defined in the body of the current class
self.method_names: set[str] = set()
self.processing_enum = False
self.processing_dataclass = False
def visit_mypy_file(self, o: MypyFile) -> None:
self.module_name = o.fullname # Current module being processed
self.path = o.path
self.set_defined_names(find_defined_names(o))
self.referenced_names = find_referenced_names(o)
super().visit_mypy_file(o)
self.check_undefined_names()
def visit_overloaded_func_def(self, o: OverloadedFuncDef) -> None:
"""@property with setters and getters, @overload chain and some others."""
overload_chain = False
for item in o.items:
if not isinstance(item, Decorator):
continue
if self.is_private_name(item.func.name, item.func.fullname):
continue
self.process_decorator(item)
if not overload_chain:
self.visit_func_def(item.func)
if item.func.is_overload:
overload_chain = True
elif item.func.is_overload:
self.visit_func_def(item.func)
else:
# skip the overload implementation and clear the decorator we just processed
self.clear_decorators()
def get_default_function_sig(self, func_def: FuncDef, ctx: FunctionContext) -> FunctionSig:
args = self._get_func_args(func_def, ctx)
retname = self._get_func_return(func_def, ctx)
return FunctionSig(func_def.name, args, retname)
def _get_func_args(self, o: FuncDef, ctx: FunctionContext) -> list[ArgSig]:
args: list[ArgSig] = []
# Ignore pos-only status of magic methods whose args names are elided by mypy at parse
actually_pos_only_args = o.name not in MAGIC_METHODS_POS_ARGS_ONLY
pos_only_marker_position = 0 # Where to insert "/", if any
for i, arg_ in enumerate(o.arguments):
var = arg_.variable
kind = arg_.kind
name = var.name
annotated_type = (
o.unanalyzed_type.arg_types[i]
if isinstance(o.unanalyzed_type, CallableType)
else None
)
# I think the name check is incorrect: there are libraries which
# name their 0th argument other than self/cls
is_self_arg = i == 0 and name == "self"
is_cls_arg = i == 0 and name == "cls"
typename: str | None = None
if annotated_type and not is_self_arg and not is_cls_arg:
# Luckily, an argument explicitly annotated with "Any" has
# type "UnboundType" and will not match.
if not isinstance(get_proper_type(annotated_type), AnyType):
typename = self.print_annotation(annotated_type)
if actually_pos_only_args and arg_.pos_only:
pos_only_marker_position += 1
if kind.is_named() and not any(arg.name.startswith("*") for arg in args):
args.append(ArgSig("*"))
default = "..."
if arg_.initializer:
if not typename:
typename = self.get_str_type_of_node(arg_.initializer, True, False)
potential_default, valid = self.get_str_default_of_node(arg_.initializer)
if valid and len(potential_default) <= 200:
default = potential_default
elif kind == ARG_STAR:
name = f"*{name}"
elif kind == ARG_STAR2:
name = f"**{name}"
args.append(
ArgSig(name, typename, default=bool(arg_.initializer), default_value=default)
)
if pos_only_marker_position:
args.insert(pos_only_marker_position, ArgSig("/"))
if ctx.class_info is not None and all(
arg.type is None and arg.default is False for arg in args
):
new_args = infer_method_arg_types(
ctx.name, ctx.class_info.self_var, [arg.name for arg in args]
)
if new_args is not None:
args = new_args
return args
def _get_func_return(self, o: FuncDef, ctx: FunctionContext) -> str | None:
if o.name != "__init__" and isinstance(o.unanalyzed_type, CallableType):
if isinstance(get_proper_type(o.unanalyzed_type.ret_type), AnyType):
# Luckily, a return type explicitly annotated with "Any" has
# type "UnboundType" and will enter the else branch.
return None # implicit Any
else:
return self.print_annotation(o.unanalyzed_type.ret_type)
if o.abstract_status == IS_ABSTRACT or o.name in METHODS_WITH_RETURN_VALUE:
# Always assume abstract methods return Any unless explicitly annotated. Also
# some dunder methods should not have a None return type.
return None # implicit Any
retname = infer_method_ret_type(o.name)
if retname is not None:
return retname
if has_yield_expression(o) or has_yield_from_expression(o):
generator_name = self.add_name("collections.abc.Generator")
yield_name = "None"
send_name: str | None = None
return_name: str | None = None
if has_yield_from_expression(o):
yield_name = send_name = self.add_name("_typeshed.Incomplete")
else:
for expr, in_assignment in all_yield_expressions(o):
if expr.expr is not None and not is_none_expr(expr.expr):
yield_name = self.add_name("_typeshed.Incomplete")
if in_assignment:
send_name = self.add_name("_typeshed.Incomplete")
if has_return_statement(o):
return_name = self.add_name("_typeshed.Incomplete")
if return_name is not None:
if send_name is None:
send_name = "None"
return f"{generator_name}[{yield_name}, {send_name}, {return_name}]"
elif send_name is not None:
return f"{generator_name}[{yield_name}, {send_name}]"
else:
return f"{generator_name}[{yield_name}]"
if not has_return_statement(o) and o.abstract_status == NOT_ABSTRACT:
return "None"
return None
def _get_func_docstring(self, node: FuncDef) -> str | None:
if not node.body.body:
return None
expr = node.body.body[0]
if isinstance(expr, ExpressionStmt) and isinstance(expr.expr, StrExpr):
return expr.expr.value
return None
def visit_func_def(self, o: FuncDef) -> None:
is_dataclass_generated = (
self.analyzed and self.processing_dataclass and o.info.names[o.name].plugin_generated
)
if is_dataclass_generated and o.name != "__init__":
# Skip methods generated by the @dataclass decorator (except for __init__)
return
if (
self.is_private_name(o.name, o.fullname)
or self.is_not_in_all(o.name)
or (self.is_recorded_name(o.name) and not o.is_overload)
):
self.clear_decorators()
return
if self.is_top_level() and self._state not in (EMPTY, FUNC):
self.add("\n")
if not self.is_top_level():
self_inits = find_self_initializers(o)
for init, value in self_inits:
if init in self.method_names:
# Can't have both an attribute and a method/property with the same name.
continue
init_code = self.get_init(init, value)
if init_code:
self.add(init_code)
if self._current_class is not None:
if len(o.arguments):
self_var = o.arguments[0].variable.name
else:
self_var = "self"
class_info = ClassInfo(self._current_class.name, self_var)
else:
class_info = None
ctx = FunctionContext(
module_name=self.module_name,
name=o.name,
docstring=self._get_func_docstring(o),
is_abstract=o.abstract_status != NOT_ABSTRACT,
class_info=class_info,
)
self.record_name(o.name)
default_sig = self.get_default_function_sig(o, ctx)
sigs = self.get_signatures(default_sig, self.sig_generators, ctx)
for output in self.format_func_def(
sigs, is_coroutine=o.is_coroutine, decorators=self._decorators, docstring=ctx.docstring
):
self.add(output + "\n")
self.clear_decorators()
self._state = FUNC
def visit_decorator(self, o: Decorator) -> None:
if self.is_private_name(o.func.name, o.func.fullname):
return
self.process_decorator(o)
self.visit_func_def(o.func)
def process_decorator(self, o: Decorator) -> None:
"""Process a series of decorators.
Only preserve certain special decorators such as @abstractmethod.
"""
o.func.is_overload = False
for decorator in o.original_decorators:
if not isinstance(decorator, (NameExpr, MemberExpr)):
continue
qualname = get_qualified_name(decorator)
fullname = self.get_fullname(decorator)
if fullname in (
"builtins.property",
"builtins.staticmethod",
"builtins.classmethod",
"functools.cached_property",
):
self.add_decorator(qualname, require_name=True)
elif fullname in (
"asyncio.coroutine",
"asyncio.coroutines.coroutine",
"types.coroutine",
):
o.func.is_awaitable_coroutine = True
self.add_decorator(qualname, require_name=True)
elif fullname == "abc.abstractmethod":
self.add_decorator(qualname, require_name=True)
o.func.abstract_status = IS_ABSTRACT
elif fullname in (
"abc.abstractproperty",
"abc.abstractstaticmethod",
"abc.abstractclassmethod",
):
abc_module = qualname.rpartition(".")[0]
if not abc_module:
self.import_tracker.add_import("abc")
builtin_decorator_replacement = fullname[len("abc.abstract") :]
self.add_decorator(builtin_decorator_replacement, require_name=False)
self.add_decorator(f"{abc_module or 'abc'}.abstractmethod", require_name=True)
o.func.abstract_status = IS_ABSTRACT
elif fullname in OVERLOAD_NAMES:
self.add_decorator(qualname, require_name=True)
o.func.is_overload = True
elif qualname.endswith((".setter", ".deleter")):
self.add_decorator(qualname, require_name=False)
def get_fullname(self, expr: Expression) -> str:
"""Return the expression's full name."""
if (
self.analyzed
and isinstance(expr, (NameExpr, MemberExpr))
and expr.fullname
and not (isinstance(expr.node, Var) and expr.node.is_suppressed_import)
):
return expr.fullname
name = get_qualified_name(expr)
return self.resolve_name(name)
def visit_class_def(self, o: ClassDef) -> None:
self._current_class = o
self.method_names = find_method_names(o.defs.body)
sep: int | None = None
if self.is_top_level() and self._state != EMPTY:
sep = len(self._output)
self.add("\n")
decorators = self.get_class_decorators(o)
for d in decorators:
self.add(f"{self._indent}@{d}\n")
self.record_name(o.name)
base_types = self.get_base_types(o)
if base_types:
for base in base_types:
self.import_tracker.require_name(base)
if self.analyzed and o.info.is_enum:
self.processing_enum = True
if isinstance(o.metaclass, (NameExpr, MemberExpr)):
meta = o.metaclass.accept(AliasPrinter(self))
base_types.append("metaclass=" + meta)
elif self.analyzed and o.info.is_abstract and not o.info.is_protocol:
base_types.append("metaclass=abc.ABCMeta")
self.import_tracker.add_import("abc")
self.import_tracker.require_name("abc")
bases = f"({', '.join(base_types)})" if base_types else ""
self.add(f"{self._indent}class {o.name}{bases}:\n")
self.indent()
if self._include_docstrings and o.docstring:
docstring = mypy.util.quote_docstring(o.docstring)
self.add(f"{self._indent}{docstring}\n")
n = len(self._output)
self._vars.append([])
super().visit_class_def(o)
self.dedent()
self._vars.pop()
self._vars[-1].append(o.name)
if len(self._output) == n:
if self._state == EMPTY_CLASS and sep is not None:
self._output[sep] = ""
if not (self._include_docstrings and o.docstring):
self._output[-1] = self._output[-1][:-1] + " ...\n"
self._state = EMPTY_CLASS
else:
self._state = CLASS
self.method_names = set()
self.processing_dataclass = False
self.processing_enum = False
self._current_class = None
def get_base_types(self, cdef: ClassDef) -> list[str]:
"""Get list of base classes for a class."""
base_types: list[str] = []
p = AliasPrinter(self)
for base in cdef.base_type_exprs + cdef.removed_base_type_exprs:
if isinstance(base, (NameExpr, MemberExpr)):
if self.get_fullname(base) != "builtins.object":
base_types.append(get_qualified_name(base))
elif isinstance(base, IndexExpr):
base_types.append(base.accept(p))
elif isinstance(base, CallExpr):
# namedtuple(typename, fields), NamedTuple(typename, fields) calls can
# be used as a base class. The first argument is a string literal that
# is usually the same as the class name.
#
# Note:
# A call-based named tuple as a base class cannot be safely converted to
# a class-based NamedTuple definition because class attributes defined
# in the body of the class inheriting from the named tuple call are not
# namedtuple fields at runtime.
if self.is_namedtuple(base):
nt_fields = self._get_namedtuple_fields(base)
assert isinstance(base.args[0], StrExpr)
typename = base.args[0].value
if nt_fields is None:
# Invalid namedtuple() call, cannot determine fields
base_types.append(self.add_name("_typeshed.Incomplete"))
continue
fields_str = ", ".join(f"({f!r}, {t})" for f, t in nt_fields)
namedtuple_name = self.add_name("typing.NamedTuple")
base_types.append(f"{namedtuple_name}({typename!r}, [{fields_str}])")
elif self.is_typed_namedtuple(base):
base_types.append(base.accept(p))
else:
# At this point, we don't know what the base class is, so we
# just use Incomplete as the base class.
base_types.append(self.add_name("_typeshed.Incomplete"))
for name, value in cdef.keywords.items():
if name == "metaclass":
continue # handled separately
processed_value = value.accept(p) or "..." # at least, don't crash
base_types.append(f"{name}={processed_value}")
return base_types
def get_class_decorators(self, cdef: ClassDef) -> list[str]:
decorators: list[str] = []
p = AliasPrinter(self)
for d in cdef.decorators:
if self.is_dataclass(d):
decorators.append(d.accept(p))
self.import_tracker.require_name(get_qualified_name(d))
self.processing_dataclass = True
return decorators
def is_dataclass(self, expr: Expression) -> bool:
if isinstance(expr, CallExpr):
expr = expr.callee
return self.get_fullname(expr) == "dataclasses.dataclass"
def visit_block(self, o: Block) -> None:
# Unreachable statements may be partially uninitialized and that may
# cause trouble.
if not o.is_unreachable:
super().visit_block(o)
def visit_assignment_stmt(self, o: AssignmentStmt) -> None:
foundl = []
for lvalue in o.lvalues:
if isinstance(lvalue, NameExpr) and isinstance(o.rvalue, CallExpr):
if self.is_namedtuple(o.rvalue) or self.is_typed_namedtuple(o.rvalue):
self.process_namedtuple(lvalue, o.rvalue)
foundl.append(False) # state is updated in process_namedtuple
continue
if self.is_typeddict(o.rvalue):
self.process_typeddict(lvalue, o.rvalue)
foundl.append(False) # state is updated in process_typeddict
continue
if (
isinstance(lvalue, NameExpr)
and not self.is_private_name(lvalue.name)
# it is never an alias with explicit annotation
and not o.unanalyzed_type
and self.is_alias_expression(o.rvalue)
):
self.process_typealias(lvalue, o.rvalue)
continue
if isinstance(lvalue, (TupleExpr, ListExpr)):
items = lvalue.items
if isinstance(o.unanalyzed_type, TupleType): # type: ignore[misc]
annotations: Iterable[Type | None] = o.unanalyzed_type.items
else:
annotations = [None] * len(items)
else:
items = [lvalue]
annotations = [o.unanalyzed_type]
sep = False
found = False
for item, annotation in zip(items, annotations):
if isinstance(item, NameExpr):
init = self.get_init(item.name, o.rvalue, annotation)
if init:
found = True
if not sep and self.is_top_level() and self._state not in (EMPTY, VAR):
init = "\n" + init
sep = True
self.add(init)
self.record_name(item.name)
foundl.append(found)
if all(foundl):
self._state = VAR
def is_namedtuple(self, expr: CallExpr) -> bool:
return self.get_fullname(expr.callee) == "collections.namedtuple"
def is_typed_namedtuple(self, expr: CallExpr) -> bool:
return self.get_fullname(expr.callee) in TYPED_NAMEDTUPLE_NAMES
def _get_namedtuple_fields(self, call: CallExpr) -> list[tuple[str, str]] | None:
if self.is_namedtuple(call):
fields_arg = call.args[1]
if isinstance(fields_arg, StrExpr):
field_names = fields_arg.value.replace(",", " ").split()
elif isinstance(fields_arg, (ListExpr, TupleExpr)):
field_names = []
for field in fields_arg.items:
if not isinstance(field, StrExpr):
return None
field_names.append(field.value)
else:
return None # Invalid namedtuple fields type
if field_names:
incomplete = self.add_name("_typeshed.Incomplete")
return [(field_name, incomplete) for field_name in field_names]
else:
return []
elif self.is_typed_namedtuple(call):
fields_arg = call.args[1]
if not isinstance(fields_arg, (ListExpr, TupleExpr)):
return None
fields: list[tuple[str, str]] = []
p = AliasPrinter(self)
for field in fields_arg.items:
if not (isinstance(field, TupleExpr) and len(field.items) == 2):
return None
field_name, field_type = field.items
if not isinstance(field_name, StrExpr):
return None
fields.append((field_name.value, field_type.accept(p)))
return fields
else:
return None # Not a named tuple call
def process_namedtuple(self, lvalue: NameExpr, rvalue: CallExpr) -> None:
if self._state == CLASS:
self.add("\n")
if not isinstance(rvalue.args[0], StrExpr):
self.annotate_as_incomplete(lvalue)
return
fields = self._get_namedtuple_fields(rvalue)
if fields is None:
self.annotate_as_incomplete(lvalue)
return
bases = self.add_name("typing.NamedTuple")
# TODO: Add support for generic NamedTuples. Requires `Generic` as base class.
class_def = f"{self._indent}class {lvalue.name}({bases}):"
if len(fields) == 0:
self.add(f"{class_def} ...\n")
self._state = EMPTY_CLASS
else:
if self._state not in (EMPTY, CLASS):
self.add("\n")
self.add(f"{class_def}\n")
for f_name, f_type in fields:
self.add(f"{self._indent} {f_name}: {f_type}\n")
self._state = CLASS
def is_typeddict(self, expr: CallExpr) -> bool:
return self.get_fullname(expr.callee) in TPDICT_NAMES
def process_typeddict(self, lvalue: NameExpr, rvalue: CallExpr) -> None:
if self._state == CLASS:
self.add("\n")
if not isinstance(rvalue.args[0], StrExpr):
self.annotate_as_incomplete(lvalue)
return
items: list[tuple[str, Expression]] = []
total: Expression | None = None
if len(rvalue.args) > 1 and rvalue.arg_kinds[1] == ARG_POS:
if not isinstance(rvalue.args[1], DictExpr):
self.annotate_as_incomplete(lvalue)
return
for attr_name, attr_type in rvalue.args[1].items:
if not isinstance(attr_name, StrExpr):
self.annotate_as_incomplete(lvalue)
return
items.append((attr_name.value, attr_type))
if len(rvalue.args) > 2:
if rvalue.arg_kinds[2] != ARG_NAMED or rvalue.arg_names[2] != "total":
self.annotate_as_incomplete(lvalue)
return
total = rvalue.args[2]
else:
for arg_name, arg in zip(rvalue.arg_names[1:], rvalue.args[1:]):
if not isinstance(arg_name, str):
self.annotate_as_incomplete(lvalue)
return
if arg_name == "total":
total = arg
else:
items.append((arg_name, arg))
p = AliasPrinter(self)
if any(not key.isidentifier() or keyword.iskeyword(key) for key, _ in items):
# Keep the call syntax if there are non-identifier or reserved keyword keys.
self.add(f"{self._indent}{lvalue.name} = {rvalue.accept(p)}\n")
self._state = VAR
else:
bases = self.add_name("typing_extensions.TypedDict")
# TODO: Add support for generic TypedDicts. Requires `Generic` as base class.
if total is not None:
bases += f", total={total.accept(p)}"
class_def = f"{self._indent}class {lvalue.name}({bases}):"
if len(items) == 0:
self.add(f"{class_def} ...\n")
self._state = EMPTY_CLASS
else:
if self._state not in (EMPTY, CLASS):
self.add("\n")
self.add(f"{class_def}\n")
for key, key_type in items:
self.add(f"{self._indent} {key}: {key_type.accept(p)}\n")
self._state = CLASS
def annotate_as_incomplete(self, lvalue: NameExpr) -> None:
incomplete = self.add_name("_typeshed.Incomplete")
self.add(f"{self._indent}{lvalue.name}: {incomplete}\n")
self._state = VAR
def is_alias_expression(self, expr: Expression, top_level: bool = True) -> bool:
"""Return True for things that look like target for an alias.
Used to know if assignments look like type aliases, function alias,
or module alias.
"""
# Assignment of TypeVar(...) and other typevar-likes are passed through
if isinstance(expr, CallExpr) and self.get_fullname(expr.callee) in (
"typing.TypeVar",
"typing_extensions.TypeVar",
"typing.ParamSpec",
"typing_extensions.ParamSpec",
"typing.TypeVarTuple",
"typing_extensions.TypeVarTuple",
):
return True
elif isinstance(expr, EllipsisExpr):
return not top_level
elif isinstance(expr, NameExpr):
if expr.name in ("True", "False"):
return False
elif expr.name == "None":
return not top_level
else:
return not self.is_private_name(expr.name)
elif isinstance(expr, MemberExpr) and self.analyzed:
# Also add function and module aliases.
return (
top_level
and isinstance(expr.node, (FuncDef, Decorator, MypyFile))
or isinstance(expr.node, TypeInfo)
) and not self.is_private_member(expr.node.fullname)
elif isinstance(expr, IndexExpr) and (
(isinstance(expr.base, NameExpr) and not self.is_private_name(expr.base.name))
or ( # Also some known aliases that could be member expression
isinstance(expr.base, MemberExpr)
and not self.is_private_member(get_qualified_name(expr.base))
and self.get_fullname(expr.base).startswith(
("builtins.", "typing.", "typing_extensions.", "collections.abc.")
)
)
):
if isinstance(expr.index, TupleExpr):
indices = expr.index.items
else:
indices = [expr.index]
if expr.base.name == "Callable" and len(indices) == 2:
args, ret = indices
if isinstance(args, EllipsisExpr):
indices = [ret]
elif isinstance(args, ListExpr):
indices = args.items + [ret]
else:
return False
return all(self.is_alias_expression(i, top_level=False) for i in indices)
elif isinstance(expr, OpExpr) and expr.op == "|":
return self.is_alias_expression(
expr.left, top_level=False
) and self.is_alias_expression(expr.right, top_level=False)
else:
return False
def process_typealias(self, lvalue: NameExpr, rvalue: Expression) -> None:
p = AliasPrinter(self)
self.add(f"{self._indent}{lvalue.name} = {rvalue.accept(p)}\n")
self.record_name(lvalue.name)
self._vars[-1].append(lvalue.name)
def visit_if_stmt(self, o: IfStmt) -> None:
# Ignore if __name__ == '__main__'.
expr = o.expr[0]
if (
isinstance(expr, ComparisonExpr)
and isinstance(expr.operands[0], NameExpr)
and isinstance(expr.operands[1], StrExpr)
and expr.operands[0].name == "__name__"
and "__main__" in expr.operands[1].value
):
return
super().visit_if_stmt(o)
def visit_import_all(self, o: ImportAll) -> None:
self.add_import_line(f"from {'.' * o.relative}{o.id} import *\n")
def visit_import_from(self, o: ImportFrom) -> None:
exported_names: set[str] = set()
import_names = []
module, relative = translate_module_name(o.id, o.relative)
if self.module_name:
full_module, ok = mypy.util.correct_relative_import(
self.module_name, relative, module, self.path.endswith(".__init__.py")
)
if not ok:
full_module = module
else:
full_module = module
if module == "__future__":
return # Not preserved
for name, as_name in o.names:
if name == "six":
# Vendored six -- translate into plain 'import six'.
self.visit_import(Import([("six", None)]))
continue
if self.should_reexport(name, full_module, as_name is not None):
self.import_tracker.reexport(name)
as_name = name
import_names.append((name, as_name))
self.import_tracker.add_import_from("." * relative + module, import_names)
self._vars[-1].extend(alias or name for name, alias in import_names)
for name, alias in import_names:
self.record_name(alias or name)
if self._all_:
# Include "import from"s that import names defined in __all__.
names = [
name
for name, alias in o.names
if name in self._all_ and alias is None and name not in self.IGNORED_DUNDERS
]
exported_names.update(names)
def visit_import(self, o: Import) -> None:
for id, as_id in o.ids:
self.import_tracker.add_import(id, as_id)
if as_id is None:
target_name = id.split(".")[0]
else:
target_name = as_id
self._vars[-1].append(target_name)
self.record_name(target_name)
def get_init(
self, lvalue: str, rvalue: Expression, annotation: Type | None = None
) -> str | None:
"""Return initializer for a variable.
Return None if we've generated one already or if the variable is internal.
"""
if lvalue in self._vars[-1]:
# We've generated an initializer already for this variable.
return None
# TODO: Only do this at module top level.
if self.is_private_name(lvalue) or self.is_not_in_all(lvalue):
return None
self._vars[-1].append(lvalue)
if annotation is not None:
typename = self.print_annotation(annotation)
if (
isinstance(annotation, UnboundType)
and not annotation.args
and annotation.name == "Final"
and self.import_tracker.module_for.get("Final") in self.TYPING_MODULE_NAMES
):
# Final without type argument is invalid in stubs.
final_arg = self.get_str_type_of_node(rvalue)
typename += f"[{final_arg}]"
elif self.processing_enum:
initializer, _ = self.get_str_default_of_node(rvalue)
return f"{self._indent}{lvalue} = {initializer}\n"
elif self.processing_dataclass:
# attribute without annotation is not a dataclass field, don't add annotation.
return f"{self._indent}{lvalue} = ...\n"
else:
typename = self.get_str_type_of_node(rvalue)
initializer = self.get_assign_initializer(rvalue)
return f"{self._indent}{lvalue}: {typename}{initializer}\n"
def get_assign_initializer(self, rvalue: Expression) -> str:
"""Does this rvalue need some special initializer value?"""
if not self._current_class:
return ""
# Current rules
# 1. Return `...` if we are dealing with `NamedTuple` or `dataclass` field and
# it has an existing default value
if (
self._current_class.info
and self._current_class.info.is_named_tuple
and not isinstance(rvalue, TempNode)
):
return " = ..."
if self.processing_dataclass and not (isinstance(rvalue, TempNode) and rvalue.no_rhs):
return " = ..."
# TODO: support other possible cases, where initializer is important
# By default, no initializer is required:
return ""
def add_decorator(self, name: str, require_name: bool = False) -> None:
if require_name:
self.import_tracker.require_name(name)
self._decorators.append(f"@{name}")
def clear_decorators(self) -> None:
self._decorators.clear()
def is_private_member(self, fullname: str) -> bool:
parts = fullname.split(".")
return any(self.is_private_name(part) for part in parts)
def get_str_type_of_node(
self, rvalue: Expression, can_infer_optional: bool = False, can_be_any: bool = True
) -> str:
rvalue = self.maybe_unwrap_unary_expr(rvalue)
if isinstance(rvalue, IntExpr):
return "int"
if isinstance(rvalue, StrExpr):
return "str"
if isinstance(rvalue, BytesExpr):
return "bytes"
if isinstance(rvalue, FloatExpr):
return "float"
if isinstance(rvalue, ComplexExpr): # 1j
return "complex"
if isinstance(rvalue, OpExpr) and rvalue.op in ("-", "+"): # -1j + 1
if isinstance(self.maybe_unwrap_unary_expr(rvalue.left), ComplexExpr) or isinstance(
self.maybe_unwrap_unary_expr(rvalue.right), ComplexExpr
):
return "complex"
if isinstance(rvalue, NameExpr) and rvalue.name in ("True", "False"):
return "bool"
if can_infer_optional and isinstance(rvalue, NameExpr) and rvalue.name == "None":
return f"{self.add_name('_typeshed.Incomplete')} | None"
if can_be_any:
return self.add_name("_typeshed.Incomplete")
else:
return ""
def maybe_unwrap_unary_expr(self, expr: Expression) -> Expression:
"""Unwrap (possibly nested) unary expressions.
But, some unary expressions can change the type of expression.
While we want to preserve it. For example, `~True` is `int`.
So, we only allow a subset of unary expressions to be unwrapped.
"""
if not isinstance(expr, UnaryExpr):
return expr
# First, try to unwrap `[+-]+ (int|float|complex)` expr:
math_ops = ("+", "-")
if expr.op in math_ops:
while isinstance(expr, UnaryExpr):
if expr.op not in math_ops or not isinstance(
expr.expr, (IntExpr, FloatExpr, ComplexExpr, UnaryExpr)
):
break
expr = expr.expr
return expr
# Next, try `not bool` expr:
if expr.op == "not":
while isinstance(expr, UnaryExpr):
if expr.op != "not" or not isinstance(expr.expr, (NameExpr, UnaryExpr)):
break
if isinstance(expr.expr, NameExpr) and expr.expr.name not in ("True", "False"):
break
expr = expr.expr
return expr
# This is some other unary expr, we cannot do anything with it (yet?).
return expr
def get_str_default_of_node(self, rvalue: Expression) -> tuple[str, bool]:
"""Get a string representation of the default value of a node.
Returns a 2-tuple of the default and whether or not it is valid.
"""
if isinstance(rvalue, NameExpr):
if rvalue.name in ("None", "True", "False"):
return rvalue.name, True
elif isinstance(rvalue, (IntExpr, FloatExpr)):
return f"{rvalue.value}", True
elif isinstance(rvalue, UnaryExpr):
if isinstance(rvalue.expr, (IntExpr, FloatExpr)):
return f"{rvalue.op}{rvalue.expr.value}", True
elif isinstance(rvalue, StrExpr):
return repr(rvalue.value), True
elif isinstance(rvalue, BytesExpr):
return "b" + repr(rvalue.value).replace("\\\\", "\\"), True
elif isinstance(rvalue, TupleExpr):
items_defaults = []
for e in rvalue.items:
e_default, valid = self.get_str_default_of_node(e)
if not valid:
break
items_defaults.append(e_default)
else:
closing = ",)" if len(items_defaults) == 1 else ")"
default = "(" + ", ".join(items_defaults) + closing
return default, True
elif isinstance(rvalue, ListExpr):
items_defaults = []
for e in rvalue.items:
e_default, valid = self.get_str_default_of_node(e)
if not valid:
break
items_defaults.append(e_default)
else:
default = "[" + ", ".join(items_defaults) + "]"
return default, True
elif isinstance(rvalue, SetExpr):
items_defaults = []
for e in rvalue.items:
e_default, valid = self.get_str_default_of_node(e)
if not valid:
break
items_defaults.append(e_default)
else:
if items_defaults:
default = "{" + ", ".join(items_defaults) + "}"
return default, True
elif isinstance(rvalue, DictExpr):
items_defaults = []
for k, v in rvalue.items:
if k is None:
break
k_default, k_valid = self.get_str_default_of_node(k)
v_default, v_valid = self.get_str_default_of_node(v)
if not (k_valid and v_valid):
break
items_defaults.append(f"{k_default}: {v_default}")
else:
default = "{" + ", ".join(items_defaults) + "}"
return default, True
return "...", False
def should_reexport(self, name: str, full_module: str, name_is_alias: bool) -> bool:
is_private = self.is_private_name(name, full_module + "." + name)
if (
not name_is_alias
and name not in self.referenced_names
and (not self._all_ or name in self.IGNORED_DUNDERS)
and not is_private
and full_module not in ("abc", "asyncio") + self.TYPING_MODULE_NAMES
):
# An imported name that is never referenced in the module is assumed to be
# exported, unless there is an explicit __all__. Note that we need to special
# case 'abc' since some references are deleted during semantic analysis.
return True
return super().should_reexport(name, full_module, name_is_alias)
def find_method_names(defs: list[Statement]) -> set[str]:
# TODO: Traverse into nested definitions
result = set()
for defn in defs:
if isinstance(defn, FuncDef):
result.add(defn.name)
elif isinstance(defn, Decorator):
result.add(defn.func.name)
elif isinstance(defn, OverloadedFuncDef):
for item in defn.items:
result.update(find_method_names([item]))
return result
class SelfTraverser(mypy.traverser.TraverserVisitor):
def __init__(self) -> None:
self.results: list[tuple[str, Expression]] = []
def visit_assignment_stmt(self, o: AssignmentStmt) -> None:
lvalue = o.lvalues[0]
if (
isinstance(lvalue, MemberExpr)
and isinstance(lvalue.expr, NameExpr)
and lvalue.expr.name == "self"
):
self.results.append((lvalue.name, o.rvalue))
def find_self_initializers(fdef: FuncBase) -> list[tuple[str, Expression]]:
"""Find attribute initializers in a method.
Return a list of pairs (attribute name, r.h.s. expression).
"""
traverser = SelfTraverser()
fdef.accept(traverser)
return traverser.results
def get_qualified_name(o: Expression) -> str:
if isinstance(o, NameExpr):
return o.name
elif isinstance(o, MemberExpr):
return f"{get_qualified_name(o.expr)}.{o.name}"
else:
return ERROR_MARKER
def remove_blacklisted_modules(modules: list[StubSource]) -> list[StubSource]:
return [
module for module in modules if module.path is None or not is_blacklisted_path(module.path)
]
def split_pyc_from_py(modules: list[StubSource]) -> tuple[list[StubSource], list[StubSource]]:
py_modules = []
pyc_modules = []
for mod in modules:
if is_pyc_only(mod.path):
pyc_modules.append(mod)
else:
py_modules.append(mod)
return pyc_modules, py_modules
def is_blacklisted_path(path: str) -> bool:
return any(substr in (normalize_path_separators(path) + "\n") for substr in BLACKLIST)
def normalize_path_separators(path: str) -> str:
if sys.platform == "win32":
return path.replace("\\", "/")
return path
def collect_build_targets(
options: Options, mypy_opts: MypyOptions
) -> tuple[list[StubSource], list[StubSource], list[StubSource]]:
"""Collect files for which we need to generate stubs.
Return list of py modules, pyc modules, and C modules.
"""
if options.packages or options.modules:
if options.no_import:
py_modules = find_module_paths_using_search(
options.modules, options.packages, options.search_path, options.pyversion
)
c_modules: list[StubSource] = []
else:
# Using imports is the default, since we can also find C modules.
py_modules, c_modules = find_module_paths_using_imports(
options.modules, options.packages, options.verbose, options.quiet
)
else:
# Use mypy native source collection for files and directories.
try:
source_list = create_source_list(options.files, mypy_opts)
except InvalidSourceList as e:
raise SystemExit(str(e)) from e
py_modules = [StubSource(m.module, m.path) for m in source_list]
c_modules = []
py_modules = remove_blacklisted_modules(py_modules)
pyc_mod, py_mod = split_pyc_from_py(py_modules)
return py_mod, pyc_mod, c_modules
def find_module_paths_using_imports(
modules: list[str], packages: list[str], verbose: bool, quiet: bool
) -> tuple[list[StubSource], list[StubSource]]:
"""Find path and runtime value of __all__ (if possible) for modules and packages.
This function uses runtime Python imports to get the information.
"""
with ModuleInspect() as inspect:
py_modules: list[StubSource] = []
c_modules: list[StubSource] = []
found = list(walk_packages(inspect, packages, verbose))
modules = modules + found
modules = [
mod for mod in modules if not is_non_library_module(mod)
] # We don't want to run any tests or scripts
for mod in modules:
try:
result = find_module_path_and_all_py3(inspect, mod, verbose)
except CantImport as e:
tb = traceback.format_exc()
if verbose:
sys.stdout.write(tb)
if not quiet:
report_missing(mod, e.message, tb)
continue
if not result:
c_modules.append(StubSource(mod))
else:
path, runtime_all = result
py_modules.append(StubSource(mod, path, runtime_all))
return py_modules, c_modules
def is_non_library_module(module: str) -> bool:
"""Does module look like a test module or a script?"""
if module.endswith(
(
".tests",
".test",
".testing",
"_tests",
"_test_suite",
"test_util",
"test_utils",
"test_base",
".__main__",
".conftest", # Used by pytest
".setup", # Typically an install script
)
):
return True
if module.split(".")[-1].startswith("test_"):
return True
if (
".tests." in module
or ".test." in module
or ".testing." in module
or ".SelfTest." in module
):
return True
return False
def translate_module_name(module: str, relative: int) -> tuple[str, int]:
for pkg in VENDOR_PACKAGES:
for alt in "six.moves", "six":
substr = f"{pkg}.{alt}"
if module.endswith("." + substr) or (module == substr and relative):
return alt, 0
if "." + substr + "." in module:
return alt + "." + module.partition("." + substr + ".")[2], 0
return module, relative
def find_module_paths_using_search(
modules: list[str], packages: list[str], search_path: list[str], pyversion: tuple[int, int]
) -> list[StubSource]:
"""Find sources for modules and packages requested.
This function just looks for source files at the file system level.
This is used if user passes --no-import, and will not find C modules.
Exit if some of the modules or packages can't be found.
"""
result: list[StubSource] = []
typeshed_path = default_lib_path(mypy.build.default_data_dir(), pyversion, None)
search_paths = SearchPaths((".",) + tuple(search_path), (), (), tuple(typeshed_path))
cache = FindModuleCache(search_paths, fscache=None, options=None)
for module in modules:
m_result = cache.find_module(module)
if isinstance(m_result, ModuleNotFoundReason):
fail_missing(module, m_result)
module_path = None
else:
module_path = m_result
result.append(StubSource(module, module_path))
for package in packages:
p_result = cache.find_modules_recursive(package)
if p_result:
fail_missing(package, ModuleNotFoundReason.NOT_FOUND)
sources = [StubSource(m.module, m.path) for m in p_result]
result.extend(sources)
result = [m for m in result if not is_non_library_module(m.module)]
return result
def mypy_options(stubgen_options: Options) -> MypyOptions:
"""Generate mypy options using the flag passed by user."""
options = MypyOptions()
options.follow_imports = "skip"
options.incremental = False
options.ignore_errors = True
options.semantic_analysis_only = True
options.python_version = stubgen_options.pyversion
options.show_traceback = True
options.transform_source = remove_misplaced_type_comments
options.preserve_asts = True
options.include_docstrings = stubgen_options.include_docstrings
# Override cache_dir if provided in the environment
environ_cache_dir = os.getenv("MYPY_CACHE_DIR", "")
if environ_cache_dir.strip():
options.cache_dir = environ_cache_dir
options.cache_dir = os.path.expanduser(options.cache_dir)
return options
def parse_source_file(mod: StubSource, mypy_options: MypyOptions) -> None:
"""Parse a source file.
On success, store AST in the corresponding attribute of the stub source.
If there are syntax errors, print them and exit.
"""
assert mod.path is not None, "Not found module was not skipped"
with open(mod.path, "rb") as f:
data = f.read()
source = mypy.util.decode_python_encoding(data)
errors = Errors(mypy_options)
mod.ast = mypy.parse.parse(
source, fnam=mod.path, module=mod.module, errors=errors, options=mypy_options
)
mod.ast._fullname = mod.module
if errors.is_blockers():
# Syntax error!
for m in errors.new_messages():
sys.stderr.write(f"{m}\n")
sys.exit(1)
def generate_asts_for_modules(
py_modules: list[StubSource], parse_only: bool, mypy_options: MypyOptions, verbose: bool
) -> None:
"""Use mypy to parse (and optionally analyze) source files."""
if not py_modules:
return # Nothing to do here, but there may be C modules
if verbose:
print(f"Processing {len(py_modules)} files...")
if parse_only:
for mod in py_modules:
parse_source_file(mod, mypy_options)
return
# Perform full semantic analysis of the source set.
try:
res = build([module.source for module in py_modules], mypy_options)
except CompileError as e:
raise SystemExit(f"Critical error during semantic analysis: {e}") from e
for mod in py_modules:
mod.ast = res.graph[mod.module].tree
# Use statically inferred __all__ if there is no runtime one.
if mod.runtime_all is None:
mod.runtime_all = res.manager.semantic_analyzer.export_map[mod.module]
def generate_stub_for_py_module(
mod: StubSource,
target: str,
*,
parse_only: bool = False,
inspect: bool = False,
include_private: bool = False,
export_less: bool = False,
include_docstrings: bool = False,
doc_dir: str = "",
all_modules: list[str],
) -> None:
"""Use analysed (or just parsed) AST to generate type stub for single file.
If directory for target doesn't exist it will created. Existing stub
will be overwritten.
"""
if inspect:
ngen = InspectionStubGenerator(
module_name=mod.module,
known_modules=all_modules,
_all_=mod.runtime_all,
doc_dir=doc_dir,
include_private=include_private,
export_less=export_less,
include_docstrings=include_docstrings,
)
ngen.generate_module()
output = ngen.output()
else:
gen = ASTStubGenerator(
mod.runtime_all,
include_private=include_private,
analyzed=not parse_only,
export_less=export_less,
include_docstrings=include_docstrings,
)
assert mod.ast is not None, "This function must be used only with analyzed modules"
mod.ast.accept(gen)
output = gen.output()
# Write output to file.
subdir = os.path.dirname(target)
if subdir and not os.path.isdir(subdir):
os.makedirs(subdir)
with open(target, "w", encoding="utf-8") as file:
file.write(output)
def generate_stubs(options: Options) -> None:
"""Main entry point for the program."""
mypy_opts = mypy_options(options)
py_modules, pyc_modules, c_modules = collect_build_targets(options, mypy_opts)
all_modules = py_modules + pyc_modules + c_modules
all_module_names = sorted(m.module for m in all_modules)
# Use parsed sources to generate stubs for Python modules.
generate_asts_for_modules(py_modules, options.parse_only, mypy_opts, options.verbose)
files = []
for mod in py_modules + pyc_modules:
assert mod.path is not None, "Not found module was not skipped"
target = mod.module.replace(".", "/")
if os.path.basename(mod.path) in ["__init__.py", "__init__.pyc"]:
target += "/__init__.pyi"
else:
target += ".pyi"
target = os.path.join(options.output_dir, target)
files.append(target)
with generate_guarded(mod.module, target, options.ignore_errors, options.verbose):
generate_stub_for_py_module(
mod,
target,
parse_only=options.parse_only,
inspect=options.inspect or mod in pyc_modules,
include_private=options.include_private,
export_less=options.export_less,
include_docstrings=options.include_docstrings,
doc_dir=options.doc_dir,
all_modules=all_module_names,
)
# Separately analyse C modules using different logic.
for mod in c_modules:
if any(py_mod.module.startswith(mod.module + ".") for py_mod in all_modules):
target = mod.module.replace(".", "/") + "/__init__.pyi"
else:
target = mod.module.replace(".", "/") + ".pyi"
target = os.path.join(options.output_dir, target)
files.append(target)
with generate_guarded(mod.module, target, options.ignore_errors, options.verbose):
generate_stub_for_c_module(
mod.module,
target,
known_modules=all_module_names,
doc_dir=options.doc_dir,
include_private=options.include_private,
export_less=options.export_less,
include_docstrings=options.include_docstrings,
)
num_modules = len(all_modules)
if not options.quiet and num_modules > 0:
print("Processed %d modules" % num_modules)
if len(files) == 1:
print(f"Generated {files[0]}")
else:
print(f"Generated files under {common_dir_prefix(files)}" + os.sep)
HEADER = """%(prog)s [-h] [more options, see -h]
[-m MODULE] [-p PACKAGE] [files ...]"""
DESCRIPTION = """
Generate draft stubs for modules.
Stubs are generated in directory ./out, to avoid overriding files with
manual changes. This directory is assumed to exist.
"""
def parse_options(args: list[str]) -> Options:
parser = argparse.ArgumentParser(prog="stubgen", usage=HEADER, description=DESCRIPTION)
parser.add_argument(
"--ignore-errors",
action="store_true",
help="ignore errors when trying to generate stubs for modules",
)
parser.add_argument(
"--no-import",
action="store_true",
help="don't import the modules, just parse and analyze them "
"(doesn't work with C extension modules and might not "
"respect __all__)",
)
parser.add_argument(
"--no-analysis",
"--parse-only",
dest="parse_only",
action="store_true",
help="don't perform semantic analysis of sources, just parse them "
"(only applies to Python modules, might affect quality of stubs. "
"Not compatible with --inspect-mode)",
)
parser.add_argument(
"--inspect-mode",
dest="inspect",
action="store_true",
help="import and inspect modules instead of parsing source code."
"This is the default behavior for c modules and pyc-only packages, but "
"it is also useful for pure python modules with dynamically generated members.",
)
parser.add_argument(
"--include-private",
action="store_true",
help="generate stubs for objects and members considered private "
"(single leading underscore and no trailing underscores)",
)
parser.add_argument(
"--export-less",
action="store_true",
help="don't implicitly export all names imported from other modules in the same package",
)
parser.add_argument(
"--include-docstrings",
action="store_true",
help="include existing docstrings with the stubs",
)
parser.add_argument("-v", "--verbose", action="store_true", help="show more verbose messages")
parser.add_argument("-q", "--quiet", action="store_true", help="show fewer messages")
parser.add_argument(
"--doc-dir",
metavar="PATH",
default="",
help="use .rst documentation in PATH (this may result in "
"better stubs in some cases; consider setting this to "
"DIR/Python-X.Y.Z/Doc/library)",
)
parser.add_argument(
"--search-path",
metavar="PATH",
default="",
help="specify module search directories, separated by ':' "
"(currently only used if --no-import is given)",
)
parser.add_argument(
"-o",
"--output",
metavar="PATH",
dest="output_dir",
default="out",
help="change the output directory [default: %(default)s]",
)
parser.add_argument(
"-m",
"--module",
action="append",
metavar="MODULE",
dest="modules",
default=[],
help="generate stub for module; can repeat for more modules",
)
parser.add_argument(
"-p",
"--package",
action="append",
metavar="PACKAGE",
dest="packages",
default=[],
help="generate stubs for package recursively; can be repeated",
)
parser.add_argument(
metavar="files",
nargs="*",
dest="files",
help="generate stubs for given files or directories",
)
parser.add_argument(
"--version", action="version", version="%(prog)s " + mypy.version.__version__
)
ns = parser.parse_args(args)
pyversion = sys.version_info[:2]
ns.interpreter = sys.executable
if ns.modules + ns.packages and ns.files:
parser.error("May only specify one of: modules/packages or files.")
if ns.quiet and ns.verbose:
parser.error("Cannot specify both quiet and verbose messages")
if ns.inspect and ns.parse_only:
parser.error("Cannot specify both --parse-only/--no-analysis and --inspect-mode")
# Create the output folder if it doesn't already exist.
os.makedirs(ns.output_dir, exist_ok=True)
return Options(
pyversion=pyversion,
no_import=ns.no_import,
inspect=ns.inspect,
doc_dir=ns.doc_dir,
search_path=ns.search_path.split(":"),
interpreter=ns.interpreter,
ignore_errors=ns.ignore_errors,
parse_only=ns.parse_only,
include_private=ns.include_private,
output_dir=ns.output_dir,
modules=ns.modules,
packages=ns.packages,
files=ns.files,
verbose=ns.verbose,
quiet=ns.quiet,
export_less=ns.export_less,
include_docstrings=ns.include_docstrings,
)
def main(args: list[str] | None = None) -> None:
mypy.util.check_python_version("stubgen")
# Make sure that the current directory is in sys.path so that
# stubgen can be run on packages in the current directory.
if not ("" in sys.path or "." in sys.path):
sys.path.insert(0, "")
options = parse_options(sys.argv[1:] if args is None else args)
generate_stubs(options)
if __name__ == "__main__":
main()
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/stubgen.py
|
Python
|
NOASSERTION
| 73,497 |
#!/usr/bin/env python3
"""Stub generator for C modules.
The public interface is via the mypy.stubgen module.
"""
from __future__ import annotations
import glob
import importlib
import inspect
import keyword
import os.path
from types import FunctionType, ModuleType
from typing import Any, Callable, Mapping
from mypy.fastparse import parse_type_comment
from mypy.moduleinspect import is_c_module
from mypy.stubdoc import (
ArgSig,
FunctionSig,
Sig,
find_unique_signatures,
infer_arg_sig_from_anon_docstring,
infer_prop_type_from_docstring,
infer_ret_type_sig_from_anon_docstring,
infer_ret_type_sig_from_docstring,
infer_sig_from_docstring,
parse_all_signatures,
)
from mypy.stubutil import (
BaseStubGenerator,
ClassInfo,
FunctionContext,
SignatureGenerator,
infer_method_arg_types,
infer_method_ret_type,
)
class ExternalSignatureGenerator(SignatureGenerator):
def __init__(
self, func_sigs: dict[str, str] | None = None, class_sigs: dict[str, str] | None = None
) -> None:
"""
Takes a mapping of function/method names to signatures and class name to
class signatures (usually corresponds to __init__).
"""
self.func_sigs = func_sigs or {}
self.class_sigs = class_sigs or {}
@classmethod
def from_doc_dir(cls, doc_dir: str) -> ExternalSignatureGenerator:
"""Instantiate from a directory of .rst files."""
all_sigs: list[Sig] = []
all_class_sigs: list[Sig] = []
for path in glob.glob(f"{doc_dir}/*.rst"):
with open(path) as f:
loc_sigs, loc_class_sigs = parse_all_signatures(f.readlines())
all_sigs += loc_sigs
all_class_sigs += loc_class_sigs
sigs = dict(find_unique_signatures(all_sigs))
class_sigs = dict(find_unique_signatures(all_class_sigs))
return ExternalSignatureGenerator(sigs, class_sigs)
def get_function_sig(
self, default_sig: FunctionSig, ctx: FunctionContext
) -> list[FunctionSig] | None:
# method:
if (
ctx.class_info
and ctx.name in ("__new__", "__init__")
and ctx.name not in self.func_sigs
and ctx.class_info.name in self.class_sigs
):
return [
FunctionSig(
name=ctx.name,
args=infer_arg_sig_from_anon_docstring(self.class_sigs[ctx.class_info.name]),
ret_type=infer_method_ret_type(ctx.name),
)
]
# function:
if ctx.name not in self.func_sigs:
return None
inferred = [
FunctionSig(
name=ctx.name,
args=infer_arg_sig_from_anon_docstring(self.func_sigs[ctx.name]),
ret_type=None,
)
]
if ctx.class_info:
return self.remove_self_type(inferred, ctx.class_info.self_var)
else:
return inferred
def get_property_type(self, default_type: str | None, ctx: FunctionContext) -> str | None:
return None
class DocstringSignatureGenerator(SignatureGenerator):
def get_function_sig(
self, default_sig: FunctionSig, ctx: FunctionContext
) -> list[FunctionSig] | None:
inferred = infer_sig_from_docstring(ctx.docstring, ctx.name)
if inferred:
assert ctx.docstring is not None
if is_pybind11_overloaded_function_docstring(ctx.docstring, ctx.name):
# Remove pybind11 umbrella (*args, **kwargs) for overloaded functions
del inferred[-1]
if ctx.class_info:
if not inferred and ctx.name == "__init__":
# look for class-level constructor signatures of the form <class_name>(<signature>)
inferred = infer_sig_from_docstring(ctx.class_info.docstring, ctx.class_info.name)
if inferred:
inferred = [sig._replace(name="__init__") for sig in inferred]
return self.remove_self_type(inferred, ctx.class_info.self_var)
else:
return inferred
def get_property_type(self, default_type: str | None, ctx: FunctionContext) -> str | None:
"""Infer property type from docstring or docstring signature."""
if ctx.docstring is not None:
inferred = infer_ret_type_sig_from_anon_docstring(ctx.docstring)
if inferred:
return inferred
inferred = infer_ret_type_sig_from_docstring(ctx.docstring, ctx.name)
if inferred:
return inferred
inferred = infer_prop_type_from_docstring(ctx.docstring)
return inferred
else:
return None
def is_pybind11_overloaded_function_docstring(docstring: str, name: str) -> bool:
return docstring.startswith(f"{name}(*args, **kwargs)\nOverloaded function.\n\n")
def generate_stub_for_c_module(
module_name: str,
target: str,
known_modules: list[str],
doc_dir: str = "",
*,
include_private: bool = False,
export_less: bool = False,
include_docstrings: bool = False,
) -> None:
"""Generate stub for C module.
Signature generators are called in order until a list of signatures is returned. The order
is:
- signatures inferred from .rst documentation (if given)
- simple runtime introspection (looking for docstrings and attributes
with simple builtin types)
- fallback based special method names or "(*args, **kwargs)"
If directory for target doesn't exist it will be created. Existing stub
will be overwritten.
"""
subdir = os.path.dirname(target)
if subdir and not os.path.isdir(subdir):
os.makedirs(subdir)
gen = InspectionStubGenerator(
module_name,
known_modules,
doc_dir,
include_private=include_private,
export_less=export_less,
include_docstrings=include_docstrings,
)
gen.generate_module()
output = gen.output()
with open(target, "w", encoding="utf-8") as file:
file.write(output)
class CFunctionStub:
"""
Class that mimics a C function in order to provide parseable docstrings.
"""
def __init__(self, name: str, doc: str, is_abstract: bool = False) -> None:
self.__name__ = name
self.__doc__ = doc
self.__abstractmethod__ = is_abstract
@classmethod
def _from_sig(cls, sig: FunctionSig, is_abstract: bool = False) -> CFunctionStub:
return CFunctionStub(sig.name, sig.format_sig()[:-4], is_abstract)
@classmethod
def _from_sigs(cls, sigs: list[FunctionSig], is_abstract: bool = False) -> CFunctionStub:
return CFunctionStub(
sigs[0].name, "\n".join(sig.format_sig()[:-4] for sig in sigs), is_abstract
)
def __get__(self) -> None:
"""
This exists to make this object look like a method descriptor and thus
return true for CStubGenerator.ismethod()
"""
pass
class InspectionStubGenerator(BaseStubGenerator):
"""Stub generator that does not parse code.
Generation is performed by inspecting the module's contents, and thus works
for highly dynamic modules, pyc files, and C modules (via the CStubGenerator
subclass).
"""
def __init__(
self,
module_name: str,
known_modules: list[str],
doc_dir: str = "",
_all_: list[str] | None = None,
include_private: bool = False,
export_less: bool = False,
include_docstrings: bool = False,
module: ModuleType | None = None,
) -> None:
self.doc_dir = doc_dir
if module is None:
self.module = importlib.import_module(module_name)
else:
self.module = module
self.is_c_module = is_c_module(self.module)
self.known_modules = known_modules
self.resort_members = self.is_c_module
super().__init__(_all_, include_private, export_less, include_docstrings)
self.module_name = module_name
if self.is_c_module:
# Add additional implicit imports.
# C-extensions are given more lattitude since they do not import the typing module.
self.known_imports.update(
{
"typing": [
"Any",
"Callable",
"ClassVar",
"Dict",
"Iterable",
"Iterator",
"List",
"Literal",
"NamedTuple",
"Optional",
"Tuple",
"Union",
]
}
)
def get_default_function_sig(self, func: object, ctx: FunctionContext) -> FunctionSig:
argspec = None
if not self.is_c_module:
# Get the full argument specification of the function
try:
argspec = inspect.getfullargspec(func)
except TypeError:
# some callables cannot be inspected, e.g. functools.partial
pass
if argspec is None:
if ctx.class_info is not None:
# method:
return FunctionSig(
name=ctx.name,
args=infer_c_method_args(ctx.name, ctx.class_info.self_var),
ret_type=infer_method_ret_type(ctx.name),
)
else:
# function:
return FunctionSig(
name=ctx.name,
args=[ArgSig(name="*args"), ArgSig(name="**kwargs")],
ret_type=None,
)
# Extract the function arguments, defaults, and varargs
args = argspec.args
defaults = argspec.defaults
varargs = argspec.varargs
kwargs = argspec.varkw
annotations = argspec.annotations
kwonlyargs = argspec.kwonlyargs
kwonlydefaults = argspec.kwonlydefaults
def get_annotation(key: str) -> str | None:
if key not in annotations:
return None
argtype = annotations[key]
if argtype is None:
return "None"
if not isinstance(argtype, str):
return self.get_type_fullname(argtype)
return argtype
arglist: list[ArgSig] = []
# Add the arguments to the signature
def add_args(
args: list[str], get_default_value: Callable[[int, str], object | None]
) -> None:
for i, arg in enumerate(args):
# Check if the argument has a default value
default_value = get_default_value(i, arg)
if default_value is not None:
if arg in annotations:
argtype = annotations[arg]
else:
argtype = self.get_type_annotation(default_value)
if argtype == "None":
# None is not a useful annotation, but we can infer that the arg
# is optional
incomplete = self.add_name("_typeshed.Incomplete")
argtype = f"{incomplete} | None"
arglist.append(ArgSig(arg, argtype, default=True))
else:
arglist.append(ArgSig(arg, get_annotation(arg), default=False))
def get_pos_default(i: int, _arg: str) -> Any | None:
if defaults and i >= len(args) - len(defaults):
return defaults[i - (len(args) - len(defaults))]
else:
return None
add_args(args, get_pos_default)
# Add *args if present
if varargs:
arglist.append(ArgSig(f"*{varargs}", get_annotation(varargs)))
# if we have keyword only args, then wee need to add "*"
elif kwonlyargs:
arglist.append(ArgSig("*"))
def get_kw_default(_i: int, arg: str) -> Any | None:
if kwonlydefaults:
return kwonlydefaults.get(arg)
else:
return None
add_args(kwonlyargs, get_kw_default)
# Add **kwargs if present
if kwargs:
arglist.append(ArgSig(f"**{kwargs}", get_annotation(kwargs)))
# add types for known special methods
if ctx.class_info is not None and all(
arg.type is None and arg.default is False for arg in arglist
):
new_args = infer_method_arg_types(
ctx.name, ctx.class_info.self_var, [arg.name for arg in arglist if arg.name]
)
if new_args is not None:
arglist = new_args
ret_type = get_annotation("return") or infer_method_ret_type(ctx.name)
return FunctionSig(ctx.name, arglist, ret_type)
def get_sig_generators(self) -> list[SignatureGenerator]:
if not self.is_c_module:
return []
else:
sig_generators: list[SignatureGenerator] = [DocstringSignatureGenerator()]
if self.doc_dir:
# Collect info from docs (if given). Always check these first.
sig_generators.insert(0, ExternalSignatureGenerator.from_doc_dir(self.doc_dir))
return sig_generators
def strip_or_import(self, type_name: str) -> str:
"""Strips unnecessary module names from typ.
If typ represents a type that is inside module or is a type coming from builtins, remove
module declaration from it. Return stripped name of the type.
Arguments:
typ: name of the type
"""
local_modules = ["builtins", self.module_name]
parsed_type = parse_type_comment(type_name, 0, 0, None)[1]
assert parsed_type is not None, type_name
return self.print_annotation(parsed_type, self.known_modules, local_modules)
def get_obj_module(self, obj: object) -> str | None:
"""Return module name of the object."""
return getattr(obj, "__module__", None)
def is_defined_in_module(self, obj: object) -> bool:
"""Check if object is considered defined in the current module."""
module = self.get_obj_module(obj)
return module is None or module == self.module_name
def generate_module(self) -> None:
all_items = self.get_members(self.module)
if self.resort_members:
all_items = sorted(all_items, key=lambda x: x[0])
items = []
for name, obj in all_items:
if inspect.ismodule(obj) and obj.__name__ in self.known_modules:
module_name = obj.__name__
if module_name.startswith(self.module_name + "."):
# from {.rel_name} import {mod_name} as {name}
pkg_name, mod_name = module_name.rsplit(".", 1)
rel_module = pkg_name[len(self.module_name) :] or "."
self.import_tracker.add_import_from(rel_module, [(mod_name, name)])
self.import_tracker.reexport(name)
else:
# import {module_name} as {name}
self.import_tracker.add_import(module_name, name)
self.import_tracker.reexport(name)
elif self.is_defined_in_module(obj) and not inspect.ismodule(obj):
# process this below
items.append((name, obj))
else:
# from {obj_module} import {obj_name}
obj_module_name = self.get_obj_module(obj)
if obj_module_name:
self.import_tracker.add_import_from(obj_module_name, [(name, None)])
if self.should_reexport(name, obj_module_name, name_is_alias=False):
self.import_tracker.reexport(name)
self.set_defined_names({name for name, obj in all_items if not inspect.ismodule(obj)})
if self.resort_members:
functions: list[str] = []
types: list[str] = []
variables: list[str] = []
else:
output: list[str] = []
functions = types = variables = output
for name, obj in items:
if self.is_function(obj):
self.generate_function_stub(name, obj, output=functions)
elif inspect.isclass(obj):
self.generate_class_stub(name, obj, output=types)
else:
self.generate_variable_stub(name, obj, output=variables)
self._output = []
if self.resort_members:
for line in variables:
self._output.append(line + "\n")
for line in types:
if line.startswith("class") and self._output and self._output[-1]:
self._output.append("\n")
self._output.append(line + "\n")
if self._output and functions:
self._output.append("\n")
for line in functions:
self._output.append(line + "\n")
else:
for i, line in enumerate(output):
if (
self._output
and line.startswith("class")
and (
not self._output[-1].startswith("class")
or (len(output) > i + 1 and output[i + 1].startswith(" "))
)
) or (
self._output
and self._output[-1].startswith("def")
and not line.startswith("def")
):
self._output.append("\n")
self._output.append(line + "\n")
self.check_undefined_names()
def is_skipped_attribute(self, attr: str) -> bool:
return (
attr
in (
"__class__",
"__getattribute__",
"__str__",
"__repr__",
"__doc__",
"__dict__",
"__module__",
"__weakref__",
"__annotations__",
"__firstlineno__",
"__static_attributes__",
"__annotate__",
)
or attr in self.IGNORED_DUNDERS
or is_pybind_skipped_attribute(attr) # For pickling
or keyword.iskeyword(attr)
)
def get_members(self, obj: object) -> list[tuple[str, Any]]:
obj_dict: Mapping[str, Any] = getattr(obj, "__dict__") # noqa: B009
results = []
for name in obj_dict:
if self.is_skipped_attribute(name):
continue
# Try to get the value via getattr
try:
value = getattr(obj, name)
except AttributeError:
continue
else:
results.append((name, value))
return results
def get_type_annotation(self, obj: object) -> str:
"""
Given an instance, return a string representation of its type that is valid
to use as a type annotation.
"""
if obj is None or obj is type(None):
return "None"
elif inspect.isclass(obj):
return f"type[{self.get_type_fullname(obj)}]"
elif isinstance(obj, FunctionType):
return self.add_name("typing.Callable")
elif isinstance(obj, ModuleType):
return self.add_name("types.ModuleType", require=False)
else:
return self.get_type_fullname(type(obj))
def is_function(self, obj: object) -> bool:
if self.is_c_module:
return inspect.isbuiltin(obj)
else:
return inspect.isfunction(obj)
def is_method(self, class_info: ClassInfo, name: str, obj: object) -> bool:
if self.is_c_module:
return inspect.ismethoddescriptor(obj) or type(obj) in (
type(str.index),
type(str.__add__),
type(str.__new__),
)
else:
# this is valid because it is only called on members of a class
return inspect.isfunction(obj)
def is_classmethod(self, class_info: ClassInfo, name: str, obj: object) -> bool:
if self.is_c_module:
return inspect.isbuiltin(obj) or type(obj).__name__ in (
"classmethod",
"classmethod_descriptor",
)
else:
return inspect.ismethod(obj)
def is_staticmethod(self, class_info: ClassInfo | None, name: str, obj: object) -> bool:
if class_info is None:
return False
elif self.is_c_module:
raw_lookup: Mapping[str, Any] = getattr(class_info.cls, "__dict__") # noqa: B009
raw_value = raw_lookup.get(name, obj)
return isinstance(raw_value, staticmethod)
else:
return isinstance(inspect.getattr_static(class_info.cls, name), staticmethod)
@staticmethod
def is_abstract_method(obj: object) -> bool:
return getattr(obj, "__abstractmethod__", False)
@staticmethod
def is_property(class_info: ClassInfo, name: str, obj: object) -> bool:
return inspect.isdatadescriptor(obj) or hasattr(obj, "fget")
@staticmethod
def is_property_readonly(prop: Any) -> bool:
return hasattr(prop, "fset") and prop.fset is None
def is_static_property(self, obj: object) -> bool:
"""For c-modules, whether the property behaves like an attribute"""
if self.is_c_module:
# StaticProperty is from boost-python
return type(obj).__name__ in ("pybind11_static_property", "StaticProperty")
else:
return False
def process_inferred_sigs(self, inferred: list[FunctionSig]) -> None:
for i, sig in enumerate(inferred):
for arg in sig.args:
if arg.type is not None:
arg.type = self.strip_or_import(arg.type)
if sig.ret_type is not None:
inferred[i] = sig._replace(ret_type=self.strip_or_import(sig.ret_type))
def generate_function_stub(
self, name: str, obj: object, *, output: list[str], class_info: ClassInfo | None = None
) -> None:
"""Generate stub for a single function or method.
The result (always a single line) will be appended to 'output'.
If necessary, any required names will be added to 'imports'.
The 'class_name' is used to find signature of __init__ or __new__ in
'class_sigs'.
"""
docstring: Any = getattr(obj, "__doc__", None)
if not isinstance(docstring, str):
docstring = None
ctx = FunctionContext(
self.module_name,
name,
docstring=docstring,
is_abstract=self.is_abstract_method(obj),
class_info=class_info,
)
if self.is_private_name(name, ctx.fullname) or self.is_not_in_all(name):
return
self.record_name(ctx.name)
default_sig = self.get_default_function_sig(obj, ctx)
inferred = self.get_signatures(default_sig, self.sig_generators, ctx)
self.process_inferred_sigs(inferred)
decorators = []
if len(inferred) > 1:
decorators.append("@{}".format(self.add_name("typing.overload")))
if ctx.is_abstract:
decorators.append("@{}".format(self.add_name("abc.abstractmethod")))
if class_info is not None:
if self.is_staticmethod(class_info, name, obj):
decorators.append("@staticmethod")
else:
for sig in inferred:
if not sig.args or sig.args[0].name not in ("self", "cls"):
sig.args.insert(0, ArgSig(name=class_info.self_var))
# a sig generator indicates @classmethod by specifying the cls arg.
if inferred[0].args and inferred[0].args[0].name == "cls":
decorators.append("@classmethod")
if docstring:
docstring = self._indent_docstring(docstring)
output.extend(self.format_func_def(inferred, decorators=decorators, docstring=docstring))
self._fix_iter(ctx, inferred, output)
def _indent_docstring(self, docstring: str) -> str:
"""Fix indentation of docstring extracted from pybind11 or other binding generators."""
lines = docstring.splitlines(keepends=True)
indent = self._indent + " "
if len(lines) > 1:
if not all(line.startswith(indent) or not line.strip() for line in lines):
# if the docstring is not indented, then indent all but the first line
for i, line in enumerate(lines[1:]):
if line.strip():
lines[i + 1] = indent + line
# if there's a trailing newline, add a final line to visually indent the quoted docstring
if lines[-1].endswith("\n"):
if len(lines) > 1:
lines.append(indent)
else:
lines[-1] = lines[-1][:-1]
return "".join(lines)
def _fix_iter(
self, ctx: FunctionContext, inferred: list[FunctionSig], output: list[str]
) -> None:
"""Ensure that objects which implement old-style iteration via __getitem__
are considered iterable.
"""
if (
ctx.class_info
and ctx.class_info.cls is not None
and ctx.name == "__getitem__"
and "__iter__" not in ctx.class_info.cls.__dict__
):
item_type: str | None = None
for sig in inferred:
if sig.args and sig.args[-1].type == "int":
item_type = sig.ret_type
break
if item_type is None:
return
obj = CFunctionStub(
"__iter__", f"def __iter__(self) -> typing.Iterator[{item_type}]\n"
)
self.generate_function_stub("__iter__", obj, output=output, class_info=ctx.class_info)
def generate_property_stub(
self,
name: str,
raw_obj: object,
obj: object,
static_properties: list[str],
rw_properties: list[str],
ro_properties: list[str],
class_info: ClassInfo | None = None,
) -> None:
"""Generate property stub using introspection of 'obj'.
Try to infer type from docstring, append resulting lines to 'output'.
raw_obj : object before evaluation of descriptor (if any)
obj : object after evaluation of descriptor
"""
docstring = getattr(raw_obj, "__doc__", None)
fget = getattr(raw_obj, "fget", None)
if fget:
alt_docstr = getattr(fget, "__doc__", None)
if alt_docstr and docstring:
docstring += "\n" + alt_docstr
elif alt_docstr:
docstring = alt_docstr
ctx = FunctionContext(
self.module_name, name, docstring=docstring, is_abstract=False, class_info=class_info
)
if self.is_private_name(name, ctx.fullname) or self.is_not_in_all(name):
return
self.record_name(ctx.name)
static = self.is_static_property(raw_obj)
readonly = self.is_property_readonly(raw_obj)
if static:
ret_type: str | None = self.strip_or_import(self.get_type_annotation(obj))
else:
default_sig = self.get_default_function_sig(raw_obj, ctx)
ret_type = default_sig.ret_type
inferred_type = self.get_property_type(ret_type, self.sig_generators, ctx)
if inferred_type is not None:
inferred_type = self.strip_or_import(inferred_type)
if static:
classvar = self.add_name("typing.ClassVar")
trailing_comment = " # read-only" if readonly else ""
if inferred_type is None:
inferred_type = self.add_name("_typeshed.Incomplete")
static_properties.append(
f"{self._indent}{name}: {classvar}[{inferred_type}] = ...{trailing_comment}"
)
else: # regular property
if readonly:
ro_properties.append(f"{self._indent}@property")
sig = FunctionSig(name, [ArgSig("self")], inferred_type)
ro_properties.append(sig.format_sig(indent=self._indent))
else:
if inferred_type is None:
inferred_type = self.add_name("_typeshed.Incomplete")
rw_properties.append(f"{self._indent}{name}: {inferred_type}")
def get_type_fullname(self, typ: type) -> str:
"""Given a type, return a string representation"""
if typ is Any: # type: ignore[comparison-overlap]
return "Any"
typename = getattr(typ, "__qualname__", typ.__name__)
module_name = self.get_obj_module(typ)
assert module_name is not None, typ
if module_name != "builtins":
typename = f"{module_name}.{typename}"
return typename
def get_base_types(self, obj: type) -> list[str]:
all_bases = type.mro(obj)
if all_bases[-1] is object:
# TODO: Is this always object?
del all_bases[-1]
# remove pybind11_object. All classes generated by pybind11 have pybind11_object in their MRO,
# which only overrides a few functions in object type
if all_bases and all_bases[-1].__name__ == "pybind11_object":
del all_bases[-1]
# remove the class itself
all_bases = all_bases[1:]
# Remove base classes of other bases as redundant.
bases: list[type] = []
for base in all_bases:
if not any(issubclass(b, base) for b in bases):
bases.append(base)
return [self.strip_or_import(self.get_type_fullname(base)) for base in bases]
def generate_class_stub(self, class_name: str, cls: type, output: list[str]) -> None:
"""Generate stub for a single class using runtime introspection.
The result lines will be appended to 'output'. If necessary, any
required names will be added to 'imports'.
"""
raw_lookup: Mapping[str, Any] = getattr(cls, "__dict__") # noqa: B009
items = self.get_members(cls)
if self.resort_members:
items = sorted(items, key=lambda x: method_name_sort_key(x[0]))
names = {x[0] for x in items}
methods: list[str] = []
types: list[str] = []
static_properties: list[str] = []
rw_properties: list[str] = []
ro_properties: list[str] = []
attrs: list[tuple[str, Any]] = []
self.record_name(class_name)
self.indent()
class_info = ClassInfo(class_name, "", getattr(cls, "__doc__", None), cls)
for attr, value in items:
# use unevaluated descriptors when dealing with property inspection
raw_value = raw_lookup.get(attr, value)
if self.is_method(class_info, attr, value) or self.is_classmethod(
class_info, attr, value
):
if attr == "__new__":
# TODO: We should support __new__.
if "__init__" in names:
# Avoid duplicate functions if both are present.
# But is there any case where .__new__() has a
# better signature than __init__() ?
continue
attr = "__init__"
# FIXME: make this nicer
if self.is_staticmethod(class_info, attr, value):
class_info.self_var = ""
elif self.is_classmethod(class_info, attr, value):
class_info.self_var = "cls"
else:
class_info.self_var = "self"
self.generate_function_stub(attr, value, output=methods, class_info=class_info)
elif self.is_property(class_info, attr, raw_value):
self.generate_property_stub(
attr,
raw_value,
value,
static_properties,
rw_properties,
ro_properties,
class_info,
)
elif inspect.isclass(value) and self.is_defined_in_module(value):
self.generate_class_stub(attr, value, types)
else:
attrs.append((attr, value))
for attr, value in attrs:
if attr == "__hash__" and value is None:
# special case for __hash__
continue
prop_type_name = self.strip_or_import(self.get_type_annotation(value))
classvar = self.add_name("typing.ClassVar")
static_properties.append(f"{self._indent}{attr}: {classvar}[{prop_type_name}] = ...")
self.dedent()
bases = self.get_base_types(cls)
if bases:
bases_str = "(%s)" % ", ".join(bases)
else:
bases_str = ""
if types or static_properties or rw_properties or methods or ro_properties:
output.append(f"{self._indent}class {class_name}{bases_str}:")
for line in types:
if (
output
and output[-1]
and not output[-1].strip().startswith("class")
and line.strip().startswith("class")
):
output.append("")
output.append(line)
for line in static_properties:
output.append(line)
for line in rw_properties:
output.append(line)
for line in methods:
output.append(line)
for line in ro_properties:
output.append(line)
else:
output.append(f"{self._indent}class {class_name}{bases_str}: ...")
def generate_variable_stub(self, name: str, obj: object, output: list[str]) -> None:
"""Generate stub for a single variable using runtime introspection.
The result lines will be appended to 'output'. If necessary, any
required names will be added to 'imports'.
"""
if self.is_private_name(name, f"{self.module_name}.{name}") or self.is_not_in_all(name):
return
self.record_name(name)
type_str = self.strip_or_import(self.get_type_annotation(obj))
output.append(f"{name}: {type_str}")
def method_name_sort_key(name: str) -> tuple[int, str]:
"""Sort methods in classes in a typical order.
I.e.: constructor, normal methods, special methods.
"""
if name in ("__new__", "__init__"):
return 0, name
if name.startswith("__") and name.endswith("__"):
return 2, name
return 1, name
def is_pybind_skipped_attribute(attr: str) -> bool:
return attr.startswith("__pybind11_module_local_")
def infer_c_method_args(
name: str, self_var: str = "self", arg_names: list[str] | None = None
) -> list[ArgSig]:
args: list[ArgSig] | None = None
if name.startswith("__") and name.endswith("__"):
name = name[2:-2]
if name in (
"hash",
"iter",
"next",
"sizeof",
"copy",
"deepcopy",
"reduce",
"getinitargs",
"int",
"float",
"trunc",
"complex",
"bool",
"abs",
"bytes",
"dir",
"len",
"reversed",
"round",
"index",
"enter",
):
args = []
elif name == "getitem":
args = [ArgSig(name="index")]
elif name == "setitem":
args = [ArgSig(name="index"), ArgSig(name="object")]
elif name in ("delattr", "getattr"):
args = [ArgSig(name="name")]
elif name == "setattr":
args = [ArgSig(name="name"), ArgSig(name="value")]
elif name == "getstate":
args = []
elif name == "setstate":
args = [ArgSig(name="state")]
elif name in ("eq", "ne", "lt", "le", "gt", "ge"):
args = [ArgSig(name="other", type="object")]
elif name in (
"add",
"radd",
"sub",
"rsub",
"mul",
"rmul",
"mod",
"rmod",
"floordiv",
"rfloordiv",
"truediv",
"rtruediv",
"divmod",
"rdivmod",
"pow",
"rpow",
"xor",
"rxor",
"or",
"ror",
"and",
"rand",
"lshift",
"rlshift",
"rshift",
"rrshift",
"contains",
"delitem",
"iadd",
"iand",
"ifloordiv",
"ilshift",
"imod",
"imul",
"ior",
"ipow",
"irshift",
"isub",
"itruediv",
"ixor",
):
args = [ArgSig(name="other")]
elif name in ("neg", "pos", "invert"):
args = []
elif name == "get":
args = [ArgSig(name="instance"), ArgSig(name="owner")]
elif name == "set":
args = [ArgSig(name="instance"), ArgSig(name="value")]
elif name == "reduce_ex":
args = [ArgSig(name="protocol")]
elif name == "exit":
args = [
ArgSig(name="type", type="type[BaseException] | None"),
ArgSig(name="value", type="BaseException | None"),
ArgSig(name="traceback", type="types.TracebackType | None"),
]
if args is None:
args = infer_method_arg_types(name, self_var, arg_names)
else:
args = [ArgSig(name=self_var)] + args
if args is None:
args = [ArgSig(name="*args"), ArgSig(name="**kwargs")]
return args
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/stubgenc.py
|
Python
|
NOASSERTION
| 38,314 |
from __future__ import annotations
def is_module_from_legacy_bundled_package(module: str) -> bool:
top_level = module.split(".", 1)[0]
return top_level in legacy_bundled_packages
def approved_stub_package_exists(module: str) -> bool:
top_level = module.split(".", 1)[0]
if top_level in legacy_bundled_packages:
return True
if top_level in non_bundled_packages_flat:
return True
if top_level in non_bundled_packages_namespace:
namespace = non_bundled_packages_namespace[top_level]
components = module.split(".")
for i in range(len(components), 0, -1):
module = ".".join(components[:i])
if module in namespace:
return True
return False
def stub_distribution_name(module: str) -> str | None:
top_level = module.split(".", 1)[0]
dist = legacy_bundled_packages.get(top_level)
if dist:
return dist
dist = non_bundled_packages_flat.get(top_level)
if dist:
return dist
if top_level in non_bundled_packages_namespace:
namespace = non_bundled_packages_namespace[top_level]
components = module.split(".")
for i in range(len(components), 0, -1):
module = ".".join(components[:i])
dist = namespace.get(module)
if dist:
return dist
return None
# Stubs for these third-party packages used to be shipped with mypy.
#
# Map package name to PyPI stub distribution name.
legacy_bundled_packages: dict[str, str] = {
"aiofiles": "types-aiofiles",
"bleach": "types-bleach",
"boto": "types-boto",
"cachetools": "types-cachetools",
"click_spinner": "types-click-spinner",
"contextvars": "types-contextvars",
"croniter": "types-croniter",
"dataclasses": "types-dataclasses",
"dateparser": "types-dateparser",
"dateutil": "types-python-dateutil",
"decorator": "types-decorator",
"deprecated": "types-Deprecated",
"docutils": "types-docutils",
"first": "types-first",
"gflags": "types-python-gflags",
"markdown": "types-Markdown",
"mock": "types-mock",
"OpenSSL": "types-pyOpenSSL",
"paramiko": "types-paramiko",
"pkg_resources": "types-setuptools",
"polib": "types-polib",
"pycurl": "types-pycurl",
"pymysql": "types-PyMySQL",
"pyrfc3339": "types-pyRFC3339",
"python2": "types-six",
"pytz": "types-pytz",
"pyVmomi": "types-pyvmomi",
"redis": "types-redis",
"requests": "types-requests",
"retry": "types-retry",
"simplejson": "types-simplejson",
"singledispatch": "types-singledispatch",
"six": "types-six",
"slugify": "types-python-slugify",
"tabulate": "types-tabulate",
"toml": "types-toml",
"typed_ast": "types-typed-ast",
"tzlocal": "types-tzlocal",
"ujson": "types-ujson",
"waitress": "types-waitress",
"yaml": "types-PyYAML",
}
# Map package name to PyPI stub distribution name from typeshed.
# Stubs for these packages were never bundled with mypy. Don't
# include packages that have a release that includes PEP 561 type
# information.
#
# Note that these packages are omitted for now:
# pika: typeshed's stubs are on PyPI as types-pika-ts.
# types-pika already exists on PyPI, and is more complete in many ways,
# but is a non-typeshed stubs package.
non_bundled_packages_flat: dict[str, str] = {
"MySQLdb": "types-mysqlclient",
"PIL": "types-Pillow",
"PyInstaller": "types-pyinstaller",
"Xlib": "types-python-xlib",
"aws_xray_sdk": "types-aws-xray-sdk",
"babel": "types-babel",
"braintree": "types-braintree",
"bs4": "types-beautifulsoup4",
"bugbear": "types-flake8-bugbear",
"caldav": "types-caldav",
"cffi": "types-cffi",
"chevron": "types-chevron",
"colorama": "types-colorama",
"commonmark": "types-commonmark",
"consolemenu": "types-console-menu",
"crontab": "types-python-crontab",
"d3dshot": "types-D3DShot",
"dockerfile_parse": "types-dockerfile-parse",
"docopt": "types-docopt",
"editdistance": "types-editdistance",
"entrypoints": "types-entrypoints",
"farmhash": "types-pyfarmhash",
"flake8_2020": "types-flake8-2020",
"flake8_builtins": "types-flake8-builtins",
"flake8_docstrings": "types-flake8-docstrings",
"flake8_plugin_utils": "types-flake8-plugin-utils",
"flake8_rst_docstrings": "types-flake8-rst-docstrings",
"flake8_simplify": "types-flake8-simplify",
"flake8_typing_imports": "types-flake8-typing-imports",
"flask_cors": "types-Flask-Cors",
"flask_migrate": "types-Flask-Migrate",
"fpdf": "types-fpdf2",
"gdb": "types-gdb",
"hdbcli": "types-hdbcli",
"html5lib": "types-html5lib",
"httplib2": "types-httplib2",
"humanfriendly": "types-humanfriendly",
"invoke": "types-invoke",
"jack": "types-JACK-Client",
"jmespath": "types-jmespath",
"jose": "types-python-jose",
"jsonschema": "types-jsonschema",
"keyboard": "types-keyboard",
"ldap3": "types-ldap3",
"nmap": "types-python-nmap",
"oauthlib": "types-oauthlib",
"openpyxl": "types-openpyxl",
"opentracing": "types-opentracing",
"parsimonious": "types-parsimonious",
"passlib": "types-passlib",
"passpy": "types-passpy",
"peewee": "types-peewee",
"pep8ext_naming": "types-pep8-naming",
"playsound": "types-playsound",
"psutil": "types-psutil",
"psycopg2": "types-psycopg2",
"pyaudio": "types-pyaudio",
"pyautogui": "types-PyAutoGUI",
"pycocotools": "types-pycocotools",
"pyflakes": "types-pyflakes",
"pygments": "types-Pygments",
"pyi_splash": "types-pyinstaller",
"pynput": "types-pynput",
"pythoncom": "types-pywin32",
"pythonwin": "types-pywin32",
"pyscreeze": "types-PyScreeze",
"pysftp": "types-pysftp",
"pytest_lazyfixture": "types-pytest-lazy-fixture",
"pywintypes": "types-pywin32",
"regex": "types-regex",
"send2trash": "types-Send2Trash",
"slumber": "types-slumber",
"stdlib_list": "types-stdlib-list",
"stripe": "types-stripe",
"toposort": "types-toposort",
"tqdm": "types-tqdm",
"tree_sitter": "types-tree-sitter",
"tree_sitter_languages": "types-tree-sitter-languages",
"ttkthemes": "types-ttkthemes",
"vobject": "types-vobject",
"whatthepatch": "types-whatthepatch",
"win32": "types-pywin32",
"win32api": "types-pywin32",
"win32con": "types-pywin32",
"win32com": "types-pywin32",
"win32comext": "types-pywin32",
"win32gui": "types-pywin32",
"xmltodict": "types-xmltodict",
"zxcvbn": "types-zxcvbn",
# Stub packages that are not from typeshed
# Since these can be installed automatically via --install-types, we have a high trust bar
# for additions here
"pandas": "pandas-stubs", # https://github.com/pandas-dev/pandas-stubs
"lxml": "lxml-stubs", # https://github.com/lxml/lxml-stubs
}
non_bundled_packages_namespace: dict[str, dict[str, str]] = {
"backports": {"backports.ssl_match_hostname": "types-backports.ssl_match_hostname"},
"google": {"google.cloud.ndb": "types-google-cloud-ndb", "google.protobuf": "types-protobuf"},
"paho": {"paho.mqtt": "types-paho-mqtt"},
}
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/stubinfo.py
|
Python
|
NOASSERTION
| 7,272 |
"""Tests for stubs.
Verify that various things in stubs are consistent with how things behave at runtime.
"""
from __future__ import annotations
import argparse
import collections.abc
import copy
import enum
import functools
import importlib
import importlib.machinery
import inspect
import os
import pkgutil
import re
import symtable
import sys
import traceback
import types
import typing
import typing_extensions
import warnings
from collections import defaultdict
from contextlib import redirect_stderr, redirect_stdout
from functools import singledispatch
from pathlib import Path
from typing import AbstractSet, Any, Generic, Iterator, TypeVar, Union
from typing_extensions import get_origin, is_typeddict
import mypy.build
import mypy.modulefinder
import mypy.nodes
import mypy.state
import mypy.types
import mypy.version
from mypy import nodes
from mypy.config_parser import parse_config_file
from mypy.evalexpr import UNKNOWN, evaluate_expression
from mypy.options import Options
from mypy.util import FancyFormatter, bytes_to_human_readable_repr, is_dunder, plural_s
class Missing:
"""Marker object for things that are missing (from a stub or the runtime)."""
def __repr__(self) -> str:
return "MISSING"
MISSING: typing_extensions.Final = Missing()
T = TypeVar("T")
MaybeMissing: typing_extensions.TypeAlias = Union[T, Missing]
class Unrepresentable:
"""Marker object for unrepresentable parameter defaults."""
def __repr__(self) -> str:
return "<unrepresentable>"
UNREPRESENTABLE: typing_extensions.Final = Unrepresentable()
_formatter: typing_extensions.Final = FancyFormatter(sys.stdout, sys.stderr, False)
def _style(message: str, **kwargs: Any) -> str:
"""Wrapper around mypy.util for fancy formatting."""
kwargs.setdefault("color", "none")
return _formatter.style(message, **kwargs)
def _truncate(message: str, length: int) -> str:
if len(message) > length:
return message[: length - 3] + "..."
return message
class StubtestFailure(Exception):
pass
class Error:
def __init__(
self,
object_path: list[str],
message: str,
stub_object: MaybeMissing[nodes.Node],
runtime_object: MaybeMissing[Any],
*,
stub_desc: str | None = None,
runtime_desc: str | None = None,
) -> None:
"""Represents an error found by stubtest.
:param object_path: Location of the object with the error,
e.g. ``["module", "Class", "method"]``
:param message: Error message
:param stub_object: The mypy node representing the stub
:param runtime_object: Actual object obtained from the runtime
:param stub_desc: Specialised description for the stub object, should you wish
:param runtime_desc: Specialised description for the runtime object, should you wish
"""
self.object_path = object_path
self.object_desc = ".".join(object_path)
self.message = message
self.stub_object = stub_object
self.runtime_object = runtime_object
self.stub_desc = stub_desc or str(getattr(stub_object, "type", stub_object))
if runtime_desc is None:
runtime_sig = safe_inspect_signature(runtime_object)
if runtime_sig is None:
self.runtime_desc = _truncate(repr(runtime_object), 100)
else:
runtime_is_async = inspect.iscoroutinefunction(runtime_object)
description = describe_runtime_callable(runtime_sig, is_async=runtime_is_async)
self.runtime_desc = _truncate(description, 100)
else:
self.runtime_desc = runtime_desc
def is_missing_stub(self) -> bool:
"""Whether or not the error is for something missing from the stub."""
return isinstance(self.stub_object, Missing)
def is_positional_only_related(self) -> bool:
"""Whether or not the error is for something being (or not being) positional-only."""
# TODO: This is hacky, use error codes or something more resilient
return "leading double underscore" in self.message
def get_description(self, concise: bool = False) -> str:
"""Returns a description of the error.
:param concise: Whether to return a concise, one-line description
"""
if concise:
return _style(self.object_desc, bold=True) + " " + self.message
stub_line = None
stub_file = None
if not isinstance(self.stub_object, Missing):
stub_line = self.stub_object.line
stub_node = get_stub(self.object_path[0])
if stub_node is not None:
stub_file = stub_node.path or None
stub_loc_str = ""
if stub_file:
stub_loc_str += f" in file {Path(stub_file)}"
if stub_line:
stub_loc_str += f"{':' if stub_file else ' at line '}{stub_line}"
runtime_line = None
runtime_file = None
if not isinstance(self.runtime_object, Missing):
try:
runtime_line = inspect.getsourcelines(self.runtime_object)[1]
except (OSError, TypeError, SyntaxError):
pass
try:
runtime_file = inspect.getsourcefile(self.runtime_object)
except TypeError:
pass
runtime_loc_str = ""
if runtime_file:
runtime_loc_str += f" in file {Path(runtime_file)}"
if runtime_line:
runtime_loc_str += f"{':' if runtime_file else ' at line '}{runtime_line}"
output = [
_style("error: ", color="red", bold=True),
_style(self.object_desc, bold=True),
" ",
self.message,
"\n",
"Stub:",
_style(stub_loc_str, dim=True),
"\n",
_style(self.stub_desc + "\n", color="blue", dim=True),
"Runtime:",
_style(runtime_loc_str, dim=True),
"\n",
_style(self.runtime_desc + "\n", color="blue", dim=True),
]
return "".join(output)
# ====================
# Core logic
# ====================
def silent_import_module(module_name: str) -> types.ModuleType:
with open(os.devnull, "w") as devnull:
with warnings.catch_warnings(), redirect_stdout(devnull), redirect_stderr(devnull):
warnings.simplefilter("ignore")
runtime = importlib.import_module(module_name)
# Also run the equivalent of `from module import *`
# This could have the additional effect of loading not-yet-loaded submodules
# mentioned in __all__
__import__(module_name, fromlist=["*"])
return runtime
def test_module(module_name: str) -> Iterator[Error]:
"""Tests a given module's stub against introspecting it at runtime.
Requires the stub to have been built already, accomplished by a call to ``build_stubs``.
:param module_name: The module to test
"""
stub = get_stub(module_name)
if stub is None:
if not is_probably_private(module_name.split(".")[-1]):
runtime_desc = repr(sys.modules[module_name]) if module_name in sys.modules else "N/A"
yield Error(
[module_name], "failed to find stubs", MISSING, None, runtime_desc=runtime_desc
)
return
try:
runtime = silent_import_module(module_name)
except KeyboardInterrupt:
raise
except BaseException as e:
note = ""
if isinstance(e, ModuleNotFoundError):
note = " Maybe install the runtime package or alter PYTHONPATH?"
yield Error(
[module_name], f"failed to import.{note} {type(e).__name__}: {e}", stub, MISSING
)
return
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
yield from verify(stub, runtime, [module_name])
except Exception as e:
bottom_frame = list(traceback.walk_tb(e.__traceback__))[-1][0]
bottom_module = bottom_frame.f_globals.get("__name__", "")
# Pass on any errors originating from stubtest or mypy
# These can occur expectedly, e.g. StubtestFailure
if bottom_module == "__main__" or bottom_module.split(".")[0] == "mypy":
raise
yield Error(
[module_name],
f"encountered unexpected error, {type(e).__name__}: {e}",
stub,
runtime,
stub_desc="N/A",
runtime_desc=(
"This is most likely the fault of something very dynamic in your library. "
"It's also possible this is a bug in stubtest.\nIf in doubt, please "
"open an issue at https://github.com/python/mypy\n\n"
+ traceback.format_exc().strip()
),
)
@singledispatch
def verify(
stub: MaybeMissing[nodes.Node], runtime: MaybeMissing[Any], object_path: list[str]
) -> Iterator[Error]:
"""Entry point for comparing a stub to a runtime object.
We use single dispatch based on the type of ``stub``.
:param stub: The mypy node representing a part of the stub
:param runtime: The runtime object corresponding to ``stub``
"""
yield Error(object_path, "is an unknown mypy node", stub, runtime)
def _verify_exported_names(
object_path: list[str], stub: nodes.MypyFile, runtime_all_as_set: set[str]
) -> Iterator[Error]:
# note that this includes the case the stub simply defines `__all__: list[str]`
assert "__all__" in stub.names
public_names_in_stub = {m for m, o in stub.names.items() if o.module_public}
names_in_stub_not_runtime = sorted(public_names_in_stub - runtime_all_as_set)
names_in_runtime_not_stub = sorted(runtime_all_as_set - public_names_in_stub)
if not (names_in_runtime_not_stub or names_in_stub_not_runtime):
return
yield Error(
object_path + ["__all__"],
(
"names exported from the stub do not correspond to the names exported at runtime. "
"This is probably due to things being missing from the stub or an inaccurate `__all__` in the stub"
),
# Pass in MISSING instead of the stub and runtime objects, as the line numbers aren't very
# relevant here, and it makes for a prettier error message
# This means this error will be ignored when using `--ignore-missing-stub`, which is
# desirable in at least the `names_in_runtime_not_stub` case
stub_object=MISSING,
runtime_object=MISSING,
stub_desc=(f"Names exported in the stub but not at runtime: {names_in_stub_not_runtime}"),
runtime_desc=(
f"Names exported at runtime but not in the stub: {names_in_runtime_not_stub}"
),
)
@functools.lru_cache
def _module_symbol_table(runtime: types.ModuleType) -> symtable.SymbolTable | None:
"""Retrieve the symbol table for the module (or None on failure).
1) Use inspect to retrieve the source code of the module
2) Use symtable to parse the source (and use what symtable knows for its purposes)
"""
try:
source = inspect.getsource(runtime)
except (OSError, TypeError, SyntaxError):
return None
try:
return symtable.symtable(source, runtime.__name__, "exec")
except SyntaxError:
return None
@verify.register(nodes.MypyFile)
def verify_mypyfile(
stub: nodes.MypyFile, runtime: MaybeMissing[types.ModuleType], object_path: list[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if not isinstance(runtime, types.ModuleType):
yield Error(object_path, "is not a module", stub, runtime)
return
runtime_all_as_set: set[str] | None
if hasattr(runtime, "__all__"):
runtime_all_as_set = set(runtime.__all__)
if "__all__" in stub.names:
# Only verify the contents of the stub's __all__
# if the stub actually defines __all__
yield from _verify_exported_names(object_path, stub, runtime_all_as_set)
else:
runtime_all_as_set = None
# Check things in the stub
to_check = {
m
for m, o in stub.names.items()
if not o.module_hidden and (not is_probably_private(m) or hasattr(runtime, m))
}
def _belongs_to_runtime(r: types.ModuleType, attr: str) -> bool:
"""Heuristics to determine whether a name originates from another module."""
obj = getattr(r, attr)
if isinstance(obj, types.ModuleType):
return False
symbol_table = _module_symbol_table(r)
if symbol_table is not None:
try:
symbol = symbol_table.lookup(attr)
except KeyError:
pass
else:
if symbol.is_imported():
# symtable says we got this from another module
return False
# But we can't just return True here, because symtable doesn't know about symbols
# that come from `from module import *`
if symbol.is_assigned():
# symtable knows we assigned this symbol in the module
return True
# The __module__ attribute is unreliable for anything except functions and classes,
# but it's our best guess at this point
try:
obj_mod = obj.__module__
except Exception:
pass
else:
if isinstance(obj_mod, str):
return bool(obj_mod == r.__name__)
return True
runtime_public_contents = (
runtime_all_as_set
if runtime_all_as_set is not None
else {
m
for m in dir(runtime)
if not is_probably_private(m)
# Filter out objects that originate from other modules (best effort). Note that in the
# absence of __all__, we don't have a way to detect explicit / intentional re-exports
# at runtime
and _belongs_to_runtime(runtime, m)
}
)
# Check all things declared in module's __all__, falling back to our best guess
to_check.update(runtime_public_contents)
to_check.difference_update(IGNORED_MODULE_DUNDERS)
for entry in sorted(to_check):
stub_entry = stub.names[entry].node if entry in stub.names else MISSING
if isinstance(stub_entry, nodes.MypyFile):
# Don't recursively check exported modules, since that leads to infinite recursion
continue
assert stub_entry is not None
try:
runtime_entry = getattr(runtime, entry, MISSING)
except Exception:
# Catch all exceptions in case the runtime raises an unexpected exception
# from __getattr__ or similar.
continue
yield from verify(stub_entry, runtime_entry, object_path + [entry])
def _verify_final(
stub: nodes.TypeInfo, runtime: type[Any], object_path: list[str]
) -> Iterator[Error]:
try:
class SubClass(runtime): # type: ignore[misc]
pass
except TypeError:
# Enum classes are implicitly @final
if not stub.is_final and not issubclass(runtime, enum.Enum):
yield Error(
object_path,
"cannot be subclassed at runtime, but isn't marked with @final in the stub",
stub,
runtime,
stub_desc=repr(stub),
)
except Exception:
# The class probably wants its subclasses to do something special.
# Examples: ctypes.Array, ctypes._SimpleCData
pass
# Runtime class might be annotated with `@final`:
try:
runtime_final = getattr(runtime, "__final__", False)
except Exception:
runtime_final = False
if runtime_final and not stub.is_final:
yield Error(
object_path,
"has `__final__` attribute, but isn't marked with @final in the stub",
stub,
runtime,
stub_desc=repr(stub),
)
def _verify_metaclass(
stub: nodes.TypeInfo, runtime: type[Any], object_path: list[str], *, is_runtime_typeddict: bool
) -> Iterator[Error]:
# We exclude protocols, because of how complex their implementation is in different versions of
# python. Enums are also hard, as are runtime TypedDicts; ignoring.
# TODO: check that metaclasses are identical?
if not stub.is_protocol and not stub.is_enum and not is_runtime_typeddict:
runtime_metaclass = type(runtime)
if runtime_metaclass is not type and stub.metaclass_type is None:
# This means that runtime has a custom metaclass, but a stub does not.
yield Error(
object_path,
"is inconsistent, metaclass differs",
stub,
runtime,
stub_desc="N/A",
runtime_desc=f"{runtime_metaclass}",
)
elif (
runtime_metaclass is type
and stub.metaclass_type is not None
# We ignore extra `ABCMeta` metaclass on stubs, this might be typing hack.
# We also ignore `builtins.type` metaclass as an implementation detail in mypy.
and not mypy.types.is_named_instance(
stub.metaclass_type, ("abc.ABCMeta", "builtins.type")
)
):
# This means that our stub has a metaclass that is not present at runtime.
yield Error(
object_path,
"metaclass mismatch",
stub,
runtime,
stub_desc=f"{stub.metaclass_type.type.fullname}",
runtime_desc="N/A",
)
@verify.register(nodes.TypeInfo)
def verify_typeinfo(
stub: nodes.TypeInfo, runtime: MaybeMissing[type[Any]], object_path: list[str]
) -> Iterator[Error]:
if stub.is_type_check_only:
# This type only exists in stubs, we only check that the runtime part
# is missing. Other checks are not required.
if not isinstance(runtime, Missing):
yield Error(
object_path,
'is marked as "@type_check_only", but also exists at runtime',
stub,
runtime,
stub_desc=repr(stub),
)
return
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime, stub_desc=repr(stub))
return
if not isinstance(runtime, type):
yield Error(object_path, "is not a type", stub, runtime, stub_desc=repr(stub))
return
yield from _verify_final(stub, runtime, object_path)
is_runtime_typeddict = stub.typeddict_type is not None and is_typeddict(runtime)
yield from _verify_metaclass(
stub, runtime, object_path, is_runtime_typeddict=is_runtime_typeddict
)
# Check everything already defined on the stub class itself (i.e. not inherited)
#
# Filter out non-identifier names, as these are (hopefully always?) whacky/fictional things
# (like __mypy-replace or __mypy-post_init, etc.) that don't exist at runtime,
# and exist purely for internal mypy reasons
to_check = {name for name in stub.names if name.isidentifier()}
# Check all public things on the runtime class
to_check.update(
m for m in vars(runtime) if not is_probably_private(m) and m not in IGNORABLE_CLASS_DUNDERS
)
# Special-case the __init__ method for Protocols and the __new__ method for TypedDicts
#
# TODO: On Python <3.11, __init__ methods on Protocol classes
# are silently discarded and replaced.
# However, this is not the case on Python 3.11+.
# Ideally, we'd figure out a good way of validating Protocol __init__ methods on 3.11+.
if stub.is_protocol:
to_check.discard("__init__")
if is_runtime_typeddict:
to_check.discard("__new__")
for entry in sorted(to_check):
mangled_entry = entry
if entry.startswith("__") and not entry.endswith("__"):
mangled_entry = f"_{stub.name.lstrip('_')}{entry}"
stub_to_verify = next((t.names[entry].node for t in stub.mro if entry in t.names), MISSING)
assert stub_to_verify is not None
try:
try:
runtime_attr = getattr(runtime, mangled_entry)
except AttributeError:
runtime_attr = inspect.getattr_static(runtime, mangled_entry, MISSING)
except Exception:
# Catch all exceptions in case the runtime raises an unexpected exception
# from __getattr__ or similar.
continue
# Do not error for an object missing from the stub
# If the runtime object is a types.WrapperDescriptorType object
# and has a non-special dunder name.
# The vast majority of these are false positives.
if not (
isinstance(stub_to_verify, Missing)
and isinstance(runtime_attr, types.WrapperDescriptorType)
and is_dunder(mangled_entry, exclude_special=True)
):
yield from verify(stub_to_verify, runtime_attr, object_path + [entry])
def _static_lookup_runtime(object_path: list[str]) -> MaybeMissing[Any]:
static_runtime = importlib.import_module(object_path[0])
for entry in object_path[1:]:
try:
static_runtime = inspect.getattr_static(static_runtime, entry)
except AttributeError:
# This can happen with mangled names, ignore for now.
# TODO: pass more information about ancestors of nodes/objects to verify, so we don't
# have to do this hacky lookup. Would be useful in several places.
return MISSING
return static_runtime
def _verify_static_class_methods(
stub: nodes.FuncBase, runtime: Any, static_runtime: MaybeMissing[Any], object_path: list[str]
) -> Iterator[str]:
if stub.name in ("__new__", "__init_subclass__", "__class_getitem__"):
# Special cased by Python, so don't bother checking
return
if inspect.isbuiltin(runtime):
# The isinstance checks don't work reliably for builtins, e.g. datetime.datetime.now, so do
# something a little hacky that seems to work well
probably_class_method = isinstance(getattr(runtime, "__self__", None), type)
if probably_class_method and not stub.is_class:
yield "runtime is a classmethod but stub is not"
if not probably_class_method and stub.is_class:
yield "stub is a classmethod but runtime is not"
return
if static_runtime is MISSING:
return
if isinstance(static_runtime, classmethod) and not stub.is_class:
yield "runtime is a classmethod but stub is not"
if not isinstance(static_runtime, classmethod) and stub.is_class:
yield "stub is a classmethod but runtime is not"
if isinstance(static_runtime, staticmethod) and not stub.is_static:
yield "runtime is a staticmethod but stub is not"
if not isinstance(static_runtime, staticmethod) and stub.is_static:
yield "stub is a staticmethod but runtime is not"
def _verify_arg_name(
stub_arg: nodes.Argument, runtime_arg: inspect.Parameter, function_name: str
) -> Iterator[str]:
"""Checks whether argument names match."""
# Ignore exact names for most dunder methods
if is_dunder(function_name, exclude_special=True):
return
def strip_prefix(s: str, prefix: str) -> str:
return s[len(prefix) :] if s.startswith(prefix) else s
if strip_prefix(stub_arg.variable.name, "__") == runtime_arg.name:
return
nonspecific_names = {"object", "args"}
if runtime_arg.name in nonspecific_names:
return
def names_approx_match(a: str, b: str) -> bool:
a = a.strip("_")
b = b.strip("_")
return a.startswith(b) or b.startswith(a) or len(a) == 1 or len(b) == 1
# Be more permissive about names matching for positional-only arguments
if runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY and names_approx_match(
stub_arg.variable.name, runtime_arg.name
):
return
# This comes up with namedtuples, so ignore
if stub_arg.variable.name == "_self":
return
yield (
f'stub argument "{stub_arg.variable.name}" '
f'differs from runtime argument "{runtime_arg.name}"'
)
def _verify_arg_default_value(
stub_arg: nodes.Argument, runtime_arg: inspect.Parameter
) -> Iterator[str]:
"""Checks whether argument default values are compatible."""
if runtime_arg.default != inspect.Parameter.empty:
if stub_arg.kind.is_required():
yield (
f'runtime argument "{runtime_arg.name}" '
"has a default value but stub argument does not"
)
else:
runtime_type = get_mypy_type_of_runtime_value(runtime_arg.default)
# Fallback to the type annotation type if var type is missing. The type annotation
# is an UnboundType, but I don't know enough to know what the pros and cons here are.
# UnboundTypes have ugly question marks following them, so default to var type.
# Note we do this same fallback when constructing signatures in from_overloadedfuncdef
stub_type = stub_arg.variable.type or stub_arg.type_annotation
if isinstance(stub_type, mypy.types.TypeVarType):
stub_type = stub_type.upper_bound
if (
runtime_type is not None
and stub_type is not None
# Avoid false positives for marker objects
and type(runtime_arg.default) is not object
# And ellipsis
and runtime_arg.default is not ...
and not is_subtype_helper(runtime_type, stub_type)
):
yield (
f'runtime argument "{runtime_arg.name}" '
f"has a default value of type {runtime_type}, "
f"which is incompatible with stub argument type {stub_type}"
)
if stub_arg.initializer is not None:
stub_default = evaluate_expression(stub_arg.initializer)
if (
stub_default is not UNKNOWN
and stub_default is not ...
and runtime_arg.default is not UNREPRESENTABLE
and (
stub_default != runtime_arg.default
# We want the types to match exactly, e.g. in case the stub has
# True and the runtime has 1 (or vice versa).
or type(stub_default) is not type(runtime_arg.default) # noqa: E721
)
):
yield (
f'runtime argument "{runtime_arg.name}" '
f"has a default value of {runtime_arg.default!r}, "
f"which is different from stub argument default {stub_default!r}"
)
else:
if stub_arg.kind.is_optional():
yield (
f'stub argument "{stub_arg.variable.name}" has a default value '
f"but runtime argument does not"
)
def maybe_strip_cls(name: str, args: list[nodes.Argument]) -> list[nodes.Argument]:
if args and name in ("__init_subclass__", "__class_getitem__"):
# These are implicitly classmethods. If the stub chooses not to have @classmethod, we
# should remove the cls argument
if args[0].variable.name == "cls":
return args[1:]
return args
class Signature(Generic[T]):
def __init__(self) -> None:
self.pos: list[T] = []
self.kwonly: dict[str, T] = {}
self.varpos: T | None = None
self.varkw: T | None = None
def __str__(self) -> str:
def get_name(arg: Any) -> str:
if isinstance(arg, inspect.Parameter):
return arg.name
if isinstance(arg, nodes.Argument):
return arg.variable.name
raise AssertionError
def get_type(arg: Any) -> str | None:
if isinstance(arg, inspect.Parameter):
return None
if isinstance(arg, nodes.Argument):
return str(arg.variable.type or arg.type_annotation)
raise AssertionError
def has_default(arg: Any) -> bool:
if isinstance(arg, inspect.Parameter):
return bool(arg.default != inspect.Parameter.empty)
if isinstance(arg, nodes.Argument):
return arg.kind.is_optional()
raise AssertionError
def get_desc(arg: Any) -> str:
arg_type = get_type(arg)
return (
get_name(arg)
+ (f": {arg_type}" if arg_type else "")
+ (" = ..." if has_default(arg) else "")
)
kw_only = sorted(self.kwonly.values(), key=lambda a: (has_default(a), get_name(a)))
ret = "def ("
ret += ", ".join(
[get_desc(arg) for arg in self.pos]
+ (["*" + get_name(self.varpos)] if self.varpos else (["*"] if self.kwonly else []))
+ [get_desc(arg) for arg in kw_only]
+ (["**" + get_name(self.varkw)] if self.varkw else [])
)
ret += ")"
return ret
@staticmethod
def from_funcitem(stub: nodes.FuncItem) -> Signature[nodes.Argument]:
stub_sig: Signature[nodes.Argument] = Signature()
stub_args = maybe_strip_cls(stub.name, stub.arguments)
for stub_arg in stub_args:
if stub_arg.kind.is_positional():
stub_sig.pos.append(stub_arg)
elif stub_arg.kind.is_named():
stub_sig.kwonly[stub_arg.variable.name] = stub_arg
elif stub_arg.kind == nodes.ARG_STAR:
stub_sig.varpos = stub_arg
elif stub_arg.kind == nodes.ARG_STAR2:
stub_sig.varkw = stub_arg
else:
raise AssertionError
return stub_sig
@staticmethod
def from_inspect_signature(signature: inspect.Signature) -> Signature[inspect.Parameter]:
runtime_sig: Signature[inspect.Parameter] = Signature()
for runtime_arg in signature.parameters.values():
if runtime_arg.kind in (
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
):
runtime_sig.pos.append(runtime_arg)
elif runtime_arg.kind == inspect.Parameter.KEYWORD_ONLY:
runtime_sig.kwonly[runtime_arg.name] = runtime_arg
elif runtime_arg.kind == inspect.Parameter.VAR_POSITIONAL:
runtime_sig.varpos = runtime_arg
elif runtime_arg.kind == inspect.Parameter.VAR_KEYWORD:
runtime_sig.varkw = runtime_arg
else:
raise AssertionError
return runtime_sig
@staticmethod
def from_overloadedfuncdef(stub: nodes.OverloadedFuncDef) -> Signature[nodes.Argument]:
"""Returns a Signature from an OverloadedFuncDef.
If life were simple, to verify_overloadedfuncdef, we'd just verify_funcitem for each of its
items. Unfortunately, life isn't simple and overloads are pretty deceitful. So instead, we
try and combine the overload's items into a single signature that is compatible with any
lies it might try to tell.
"""
# For most dunder methods, just assume all args are positional-only
assume_positional_only = is_dunder(stub.name, exclude_special=True)
all_args: dict[str, list[tuple[nodes.Argument, int]]] = {}
for func in map(_resolve_funcitem_from_decorator, stub.items):
assert func is not None
args = maybe_strip_cls(stub.name, func.arguments)
for index, arg in enumerate(args):
# For positional-only args, we allow overloads to have different names for the same
# argument. To accomplish this, we just make up a fake index-based name.
name = (
f"__{index}"
if arg.variable.name.startswith("__")
or arg.pos_only
or assume_positional_only
or arg.variable.name.strip("_") == "self"
else arg.variable.name
)
all_args.setdefault(name, []).append((arg, index))
def get_position(arg_name: str) -> int:
# We just need this to return the positional args in the correct order.
return max(index for _, index in all_args[arg_name])
def get_type(arg_name: str) -> mypy.types.ProperType:
with mypy.state.state.strict_optional_set(True):
all_types = [
arg.variable.type or arg.type_annotation for arg, _ in all_args[arg_name]
]
return mypy.typeops.make_simplified_union([t for t in all_types if t])
def get_kind(arg_name: str) -> nodes.ArgKind:
kinds = {arg.kind for arg, _ in all_args[arg_name]}
if nodes.ARG_STAR in kinds:
return nodes.ARG_STAR
if nodes.ARG_STAR2 in kinds:
return nodes.ARG_STAR2
# The logic here is based on two tenets:
# 1) If an arg is ever optional (or unspecified), it is optional
# 2) If an arg is ever positional, it is positional
is_opt = (
len(all_args[arg_name]) < len(stub.items)
or nodes.ARG_OPT in kinds
or nodes.ARG_NAMED_OPT in kinds
)
is_pos = nodes.ARG_OPT in kinds or nodes.ARG_POS in kinds
if is_opt:
return nodes.ARG_OPT if is_pos else nodes.ARG_NAMED_OPT
return nodes.ARG_POS if is_pos else nodes.ARG_NAMED
sig: Signature[nodes.Argument] = Signature()
for arg_name in sorted(all_args, key=get_position):
# example_arg_name gives us a real name (in case we had a fake index-based name)
example_arg_name = all_args[arg_name][0][0].variable.name
arg = nodes.Argument(
nodes.Var(example_arg_name, get_type(arg_name)),
type_annotation=None,
initializer=None,
kind=get_kind(arg_name),
pos_only=all(arg.pos_only for arg, _ in all_args[arg_name]),
)
if arg.kind.is_positional():
sig.pos.append(arg)
elif arg.kind.is_named():
sig.kwonly[arg.variable.name] = arg
elif arg.kind == nodes.ARG_STAR:
sig.varpos = arg
elif arg.kind == nodes.ARG_STAR2:
sig.varkw = arg
else:
raise AssertionError
return sig
def _verify_signature(
stub: Signature[nodes.Argument], runtime: Signature[inspect.Parameter], function_name: str
) -> Iterator[str]:
# Check positional arguments match up
for stub_arg, runtime_arg in zip(stub.pos, runtime.pos):
yield from _verify_arg_name(stub_arg, runtime_arg, function_name)
yield from _verify_arg_default_value(stub_arg, runtime_arg)
if (
runtime_arg.kind == inspect.Parameter.POSITIONAL_ONLY
and not stub_arg.pos_only
and not stub_arg.variable.name.startswith("__")
and stub_arg.variable.name.strip("_") != "self"
and not is_dunder(function_name, exclude_special=True) # noisy for dunder methods
):
yield (
f'stub argument "{stub_arg.variable.name}" should be positional-only '
f'(rename with a leading double underscore, i.e. "__{runtime_arg.name}")'
)
if (
runtime_arg.kind != inspect.Parameter.POSITIONAL_ONLY
and (stub_arg.pos_only or stub_arg.variable.name.startswith("__"))
and stub_arg.variable.name.strip("_") != "self"
and not is_dunder(function_name, exclude_special=True) # noisy for dunder methods
):
yield (
f'stub argument "{stub_arg.variable.name}" should be positional or keyword '
"(remove leading double underscore)"
)
# Check unmatched positional args
if len(stub.pos) > len(runtime.pos):
# There are cases where the stub exhaustively lists out the extra parameters the function
# would take through *args. Hence, a) if runtime accepts *args, we don't check whether the
# runtime has all of the stub's parameters, b) below, we don't enforce that the stub takes
# *args, since runtime logic may prevent arbitrary arguments from actually being accepted.
if runtime.varpos is None:
for stub_arg in stub.pos[len(runtime.pos) :]:
# If the variable is in runtime.kwonly, it's just mislabelled as not a
# keyword-only argument
if stub_arg.variable.name not in runtime.kwonly:
msg = f'runtime does not have argument "{stub_arg.variable.name}"'
if runtime.varkw is not None:
msg += ". Maybe you forgot to make it keyword-only in the stub?"
yield msg
else:
yield f'stub argument "{stub_arg.variable.name}" is not keyword-only'
if stub.varpos is not None:
yield f'runtime does not have *args argument "{stub.varpos.variable.name}"'
elif len(stub.pos) < len(runtime.pos):
for runtime_arg in runtime.pos[len(stub.pos) :]:
if runtime_arg.name not in stub.kwonly:
if not _is_private_parameter(runtime_arg):
yield f'stub does not have argument "{runtime_arg.name}"'
else:
yield f'runtime argument "{runtime_arg.name}" is not keyword-only'
# Checks involving *args
if len(stub.pos) <= len(runtime.pos) or runtime.varpos is None:
if stub.varpos is None and runtime.varpos is not None:
yield f'stub does not have *args argument "{runtime.varpos.name}"'
if stub.varpos is not None and runtime.varpos is None:
yield f'runtime does not have *args argument "{stub.varpos.variable.name}"'
# Check keyword-only args
for arg in sorted(set(stub.kwonly) & set(runtime.kwonly)):
stub_arg, runtime_arg = stub.kwonly[arg], runtime.kwonly[arg]
yield from _verify_arg_name(stub_arg, runtime_arg, function_name)
yield from _verify_arg_default_value(stub_arg, runtime_arg)
# Check unmatched keyword-only args
if runtime.varkw is None or not set(runtime.kwonly).issubset(set(stub.kwonly)):
# There are cases where the stub exhaustively lists out the extra parameters the function
# would take through **kwargs. Hence, a) if runtime accepts **kwargs (and the stub hasn't
# exhaustively listed out params), we don't check whether the runtime has all of the stub's
# parameters, b) below, we don't enforce that the stub takes **kwargs, since runtime logic
# may prevent arbitrary keyword arguments from actually being accepted.
for arg in sorted(set(stub.kwonly) - set(runtime.kwonly)):
if arg in {runtime_arg.name for runtime_arg in runtime.pos}:
# Don't report this if we've reported it before
if arg not in {runtime_arg.name for runtime_arg in runtime.pos[len(stub.pos) :]}:
yield f'runtime argument "{arg}" is not keyword-only'
else:
yield f'runtime does not have argument "{arg}"'
for arg in sorted(set(runtime.kwonly) - set(stub.kwonly)):
if arg in {stub_arg.variable.name for stub_arg in stub.pos}:
# Don't report this if we've reported it before
if not (
runtime.varpos is None
and arg in {stub_arg.variable.name for stub_arg in stub.pos[len(runtime.pos) :]}
):
yield f'stub argument "{arg}" is not keyword-only'
else:
if not _is_private_parameter(runtime.kwonly[arg]):
yield f'stub does not have argument "{arg}"'
# Checks involving **kwargs
if stub.varkw is None and runtime.varkw is not None:
# As mentioned above, don't enforce that the stub takes **kwargs.
# Also check against positional parameters, to avoid a nitpicky message when an argument
# isn't marked as keyword-only
stub_pos_names = {stub_arg.variable.name for stub_arg in stub.pos}
# Ideally we'd do a strict subset check, but in practice the errors from that aren't useful
if not set(runtime.kwonly).issubset(set(stub.kwonly) | stub_pos_names):
yield f'stub does not have **kwargs argument "{runtime.varkw.name}"'
if stub.varkw is not None and runtime.varkw is None:
yield f'runtime does not have **kwargs argument "{stub.varkw.variable.name}"'
def _is_private_parameter(arg: inspect.Parameter) -> bool:
return (
arg.name.startswith("_")
and not arg.name.startswith("__")
and arg.default is not inspect.Parameter.empty
)
@verify.register(nodes.FuncItem)
def verify_funcitem(
stub: nodes.FuncItem, runtime: MaybeMissing[Any], object_path: list[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if not is_probably_a_function(runtime):
yield Error(object_path, "is not a function", stub, runtime)
if not callable(runtime):
return
# Look the object up statically, to avoid binding by the descriptor protocol
static_runtime = _static_lookup_runtime(object_path)
if isinstance(stub, nodes.FuncDef):
for error_text in _verify_abstract_status(stub, runtime):
yield Error(object_path, error_text, stub, runtime)
for error_text in _verify_final_method(stub, runtime, static_runtime):
yield Error(object_path, error_text, stub, runtime)
for message in _verify_static_class_methods(stub, runtime, static_runtime, object_path):
yield Error(object_path, "is inconsistent, " + message, stub, runtime)
signature = safe_inspect_signature(runtime)
runtime_is_coroutine = inspect.iscoroutinefunction(runtime)
if signature:
stub_sig = Signature.from_funcitem(stub)
runtime_sig = Signature.from_inspect_signature(signature)
runtime_sig_desc = describe_runtime_callable(signature, is_async=runtime_is_coroutine)
stub_desc = str(stub_sig)
else:
runtime_sig_desc, stub_desc = None, None
# Don't raise an error if the stub is a coroutine, but the runtime isn't.
# That results in false positives.
# See https://github.com/python/typeshed/issues/7344
if runtime_is_coroutine and not stub.is_coroutine:
yield Error(
object_path,
'is an "async def" function at runtime, but not in the stub',
stub,
runtime,
stub_desc=stub_desc,
runtime_desc=runtime_sig_desc,
)
if not signature:
return
for message in _verify_signature(stub_sig, runtime_sig, function_name=stub.name):
yield Error(
object_path,
"is inconsistent, " + message,
stub,
runtime,
runtime_desc=runtime_sig_desc,
)
@verify.register(Missing)
def verify_none(
stub: Missing, runtime: MaybeMissing[Any], object_path: list[str]
) -> Iterator[Error]:
yield Error(object_path, "is not present in stub", stub, runtime)
@verify.register(nodes.Var)
def verify_var(
stub: nodes.Var, runtime: MaybeMissing[Any], object_path: list[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
# Don't always yield an error here, because we often can't find instance variables
if len(object_path) <= 2:
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if (
stub.is_initialized_in_class
and is_read_only_property(runtime)
and (stub.is_settable_property or not stub.is_property)
):
yield Error(object_path, "is read-only at runtime but not in the stub", stub, runtime)
runtime_type = get_mypy_type_of_runtime_value(runtime)
if (
runtime_type is not None
and stub.type is not None
and not is_subtype_helper(runtime_type, stub.type)
):
should_error = True
# Avoid errors when defining enums, since runtime_type is the enum itself, but we'd
# annotate it with the type of runtime.value
if isinstance(runtime, enum.Enum):
runtime_type = get_mypy_type_of_runtime_value(runtime.value)
if runtime_type is not None and is_subtype_helper(runtime_type, stub.type):
should_error = False
# We always allow setting the stub value to ...
proper_type = mypy.types.get_proper_type(stub.type)
if (
isinstance(proper_type, mypy.types.Instance)
and proper_type.type.fullname == "builtins.ellipsis"
):
should_error = False
if should_error:
yield Error(
object_path, f"variable differs from runtime type {runtime_type}", stub, runtime
)
@verify.register(nodes.OverloadedFuncDef)
def verify_overloadedfuncdef(
stub: nodes.OverloadedFuncDef, runtime: MaybeMissing[Any], object_path: list[str]
) -> Iterator[Error]:
# TODO: support `@type_check_only` decorator
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if stub.is_property:
# Any property with a setter is represented as an OverloadedFuncDef
if is_read_only_property(runtime):
yield Error(object_path, "is read-only at runtime but not in the stub", stub, runtime)
return
if not is_probably_a_function(runtime):
yield Error(object_path, "is not a function", stub, runtime)
if not callable(runtime):
return
# mypy doesn't allow overloads where one overload is abstract but another isn't,
# so it should be okay to just check whether the first overload is abstract or not.
#
# TODO: Mypy *does* allow properties where e.g. the getter is abstract but the setter is not;
# and any property with a setter is represented as an OverloadedFuncDef internally;
# not sure exactly what (if anything) we should do about that.
first_part = stub.items[0]
if isinstance(first_part, nodes.Decorator) and first_part.is_overload:
for msg in _verify_abstract_status(first_part.func, runtime):
yield Error(object_path, msg, stub, runtime)
# Look the object up statically, to avoid binding by the descriptor protocol
static_runtime = _static_lookup_runtime(object_path)
for message in _verify_static_class_methods(stub, runtime, static_runtime, object_path):
yield Error(object_path, "is inconsistent, " + message, stub, runtime)
# TODO: Should call _verify_final_method here,
# but overloaded final methods in stubs cause a stubtest crash: see #14950
signature = safe_inspect_signature(runtime)
if not signature:
return
stub_sig = Signature.from_overloadedfuncdef(stub)
runtime_sig = Signature.from_inspect_signature(signature)
for message in _verify_signature(stub_sig, runtime_sig, function_name=stub.name):
# TODO: This is a little hacky, but the addition here is super useful
if "has a default value of type" in message:
message += (
". This is often caused by overloads failing to account for explicitly passing "
"in the default value."
)
yield Error(
object_path,
"is inconsistent, " + message,
stub,
runtime,
stub_desc=(str(stub.type)) + f"\nInferred signature: {stub_sig}",
runtime_desc="def " + str(signature),
)
@verify.register(nodes.TypeVarExpr)
def verify_typevarexpr(
stub: nodes.TypeVarExpr, runtime: MaybeMissing[Any], object_path: list[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
# We seem to insert these typevars into NamedTuple stubs, but they
# don't exist at runtime. Just ignore!
if stub.name == "_NT":
return
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if not isinstance(runtime, TypeVar):
yield Error(object_path, "is not a TypeVar", stub, runtime)
return
@verify.register(nodes.ParamSpecExpr)
def verify_paramspecexpr(
stub: nodes.ParamSpecExpr, runtime: MaybeMissing[Any], object_path: list[str]
) -> Iterator[Error]:
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
maybe_paramspec_types = (
getattr(typing, "ParamSpec", None),
getattr(typing_extensions, "ParamSpec", None),
)
paramspec_types = tuple(t for t in maybe_paramspec_types if t is not None)
if not paramspec_types or not isinstance(runtime, paramspec_types):
yield Error(object_path, "is not a ParamSpec", stub, runtime)
return
def _verify_readonly_property(stub: nodes.Decorator, runtime: Any) -> Iterator[str]:
assert stub.func.is_property
if isinstance(runtime, property):
yield from _verify_final_method(stub.func, runtime.fget, MISSING)
return
if isinstance(runtime, functools.cached_property):
yield from _verify_final_method(stub.func, runtime.func, MISSING)
return
if inspect.isdatadescriptor(runtime):
# It's enough like a property...
return
# Sometimes attributes pretend to be properties, for instance, to express that they
# are read only. So allowlist if runtime_type matches the return type of stub.
runtime_type = get_mypy_type_of_runtime_value(runtime)
func_type = (
stub.func.type.ret_type if isinstance(stub.func.type, mypy.types.CallableType) else None
)
if (
runtime_type is not None
and func_type is not None
and is_subtype_helper(runtime_type, func_type)
):
return
yield "is inconsistent, cannot reconcile @property on stub with runtime object"
def _verify_abstract_status(stub: nodes.FuncDef, runtime: Any) -> Iterator[str]:
stub_abstract = stub.abstract_status == nodes.IS_ABSTRACT
runtime_abstract = getattr(runtime, "__isabstractmethod__", False)
# The opposite can exist: some implementations omit `@abstractmethod` decorators
if runtime_abstract and not stub_abstract:
item_type = "property" if stub.is_property else "method"
yield f"is inconsistent, runtime {item_type} is abstract but stub is not"
def _verify_final_method(
stub: nodes.FuncDef, runtime: Any, static_runtime: MaybeMissing[Any]
) -> Iterator[str]:
if stub.is_final:
return
if getattr(runtime, "__final__", False) or (
static_runtime is not MISSING and getattr(static_runtime, "__final__", False)
):
yield "is decorated with @final at runtime, but not in the stub"
def _resolve_funcitem_from_decorator(dec: nodes.OverloadPart) -> nodes.FuncItem | None:
"""Returns a FuncItem that corresponds to the output of the decorator.
Returns None if we can't figure out what that would be. For convenience, this function also
accepts FuncItems.
"""
if isinstance(dec, nodes.FuncItem):
return dec
if dec.func.is_property:
return None
def apply_decorator_to_funcitem(
decorator: nodes.Expression, func: nodes.FuncItem
) -> nodes.FuncItem | None:
if (
isinstance(decorator, nodes.CallExpr)
and isinstance(decorator.callee, nodes.RefExpr)
and decorator.callee.fullname in mypy.types.DEPRECATED_TYPE_NAMES
):
return func
if not isinstance(decorator, nodes.RefExpr):
return None
if not decorator.fullname:
# Happens with namedtuple
return None
if (
decorator.fullname in ("builtins.staticmethod", "abc.abstractmethod")
or decorator.fullname in mypy.types.OVERLOAD_NAMES
or decorator.fullname in mypy.types.FINAL_DECORATOR_NAMES
):
return func
if decorator.fullname == "builtins.classmethod":
if func.arguments[0].variable.name not in ("cls", "mcs", "metacls"):
raise StubtestFailure(
f"unexpected class argument name {func.arguments[0].variable.name!r} "
f"in {dec.fullname}"
)
# FuncItem is written so that copy.copy() actually works, even when compiled
ret = copy.copy(func)
# Remove the cls argument, since it's not present in inspect.signature of classmethods
ret.arguments = ret.arguments[1:]
return ret
# Just give up on any other decorators. After excluding properties, we don't run into
# anything else when running on typeshed's stdlib.
return None
func: nodes.FuncItem = dec.func
for decorator in dec.original_decorators:
resulting_func = apply_decorator_to_funcitem(decorator, func)
if resulting_func is None:
return None
func = resulting_func
return func
@verify.register(nodes.Decorator)
def verify_decorator(
stub: nodes.Decorator, runtime: MaybeMissing[Any], object_path: list[str]
) -> Iterator[Error]:
if stub.func.is_type_check_only:
# This function only exists in stubs, we only check that the runtime part
# is missing. Other checks are not required.
if not isinstance(runtime, Missing):
yield Error(
object_path,
'is marked as "@type_check_only", but also exists at runtime',
stub,
runtime,
stub_desc=repr(stub),
)
return
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime)
return
if stub.func.is_property:
for message in _verify_readonly_property(stub, runtime):
yield Error(object_path, message, stub, runtime)
for message in _verify_abstract_status(stub.func, runtime):
yield Error(object_path, message, stub, runtime)
return
func = _resolve_funcitem_from_decorator(stub)
if func is not None:
yield from verify(func, runtime, object_path)
@verify.register(nodes.TypeAlias)
def verify_typealias(
stub: nodes.TypeAlias, runtime: MaybeMissing[Any], object_path: list[str]
) -> Iterator[Error]:
stub_target = mypy.types.get_proper_type(stub.target)
stub_desc = f"Type alias for {stub_target}"
if isinstance(runtime, Missing):
yield Error(object_path, "is not present at runtime", stub, runtime, stub_desc=stub_desc)
return
runtime_origin = get_origin(runtime) or runtime
if isinstance(stub_target, mypy.types.Instance):
if not isinstance(runtime_origin, type):
yield Error(
object_path,
"is inconsistent, runtime is not a type",
stub,
runtime,
stub_desc=stub_desc,
)
return
stub_origin = stub_target.type
# Do our best to figure out the fullname of the runtime object...
runtime_name: object
try:
runtime_name = runtime_origin.__qualname__
except AttributeError:
runtime_name = getattr(runtime_origin, "__name__", MISSING)
if isinstance(runtime_name, str):
runtime_module: object = getattr(runtime_origin, "__module__", MISSING)
if isinstance(runtime_module, str):
if runtime_module == "collections.abc" or (
runtime_module == "re" and runtime_name in {"Match", "Pattern"}
):
runtime_module = "typing"
runtime_fullname = f"{runtime_module}.{runtime_name}"
if re.fullmatch(rf"_?{re.escape(stub_origin.fullname)}", runtime_fullname):
# Okay, we're probably fine.
return
# Okay, either we couldn't construct a fullname
# or the fullname of the stub didn't match the fullname of the runtime.
# Fallback to a full structural check of the runtime vis-a-vis the stub.
yield from verify(stub_origin, runtime_origin, object_path)
return
if isinstance(stub_target, mypy.types.UnionType):
# complain if runtime is not a Union or UnionType
if runtime_origin is not Union and (
not (sys.version_info >= (3, 10) and isinstance(runtime, types.UnionType))
):
yield Error(object_path, "is not a Union", stub, runtime, stub_desc=str(stub_target))
# could check Union contents here...
return
if isinstance(stub_target, mypy.types.TupleType):
if tuple not in getattr(runtime_origin, "__mro__", ()):
yield Error(
object_path, "is not a subclass of tuple", stub, runtime, stub_desc=stub_desc
)
# could check Tuple contents here...
return
if isinstance(stub_target, mypy.types.CallableType):
if runtime_origin is not collections.abc.Callable:
yield Error(
object_path, "is not a type alias for Callable", stub, runtime, stub_desc=stub_desc
)
# could check Callable contents here...
return
if isinstance(stub_target, mypy.types.AnyType):
return
yield Error(object_path, "is not a recognised type alias", stub, runtime, stub_desc=stub_desc)
# ====================
# Helpers
# ====================
IGNORED_MODULE_DUNDERS: typing_extensions.Final = frozenset(
{
"__file__",
"__doc__",
"__name__",
"__builtins__",
"__package__",
"__cached__",
"__loader__",
"__spec__",
"__annotations__",
"__path__", # mypy adds __path__ to packages, but C packages don't have it
"__getattr__", # resulting behaviour might be typed explicitly
# Created by `warnings.warn`, does not make much sense to have in stubs:
"__warningregistry__",
# TODO: remove the following from this list
"__author__",
"__version__",
"__copyright__",
}
)
IGNORABLE_CLASS_DUNDERS: typing_extensions.Final = frozenset(
{
# Special attributes
"__dict__",
"__annotations__",
"__text_signature__",
"__weakref__",
"__hash__",
"__getattr__", # resulting behaviour might be typed explicitly
"__setattr__", # defining this on a class can cause worse type checking
"__vectorcalloffset__", # undocumented implementation detail of the vectorcall protocol
"__firstlineno__",
"__static_attributes__",
# isinstance/issubclass hooks that type-checkers don't usually care about
"__instancecheck__",
"__subclasshook__",
"__subclasscheck__",
# python2 only magic methods:
"__cmp__",
"__nonzero__",
"__unicode__",
"__div__",
# cython methods
"__pyx_vtable__",
# Pickle methods
"__setstate__",
"__getstate__",
"__getnewargs__",
"__getinitargs__",
"__reduce_ex__",
"__reduce__",
# ctypes weirdness
"__ctype_be__",
"__ctype_le__",
"__ctypes_from_outparam__",
# mypy limitations
"__abstractmethods__", # Classes with metaclass=ABCMeta inherit this attribute
"__new_member__", # If an enum defines __new__, the method is renamed as __new_member__
"__dataclass_fields__", # Generated by dataclasses
"__dataclass_params__", # Generated by dataclasses
"__doc__", # mypy's semanal for namedtuples assumes this is str, not Optional[str]
# Added to all protocol classes on 3.12+ (or if using typing_extensions.Protocol)
"__protocol_attrs__",
"__callable_proto_members_only__",
"__non_callable_proto_members__",
# typing implementation details, consider removing some of these:
"__parameters__",
"__origin__",
"__args__",
"__orig_bases__",
"__final__", # Has a specialized check
# Consider removing __slots__?
"__slots__",
}
)
def is_probably_private(name: str) -> bool:
return name.startswith("_") and not is_dunder(name)
def is_probably_a_function(runtime: Any) -> bool:
return (
isinstance(runtime, (types.FunctionType, types.BuiltinFunctionType))
or isinstance(runtime, (types.MethodType, types.BuiltinMethodType))
or (inspect.ismethoddescriptor(runtime) and callable(runtime))
)
def is_read_only_property(runtime: object) -> bool:
return isinstance(runtime, property) and runtime.fset is None
def safe_inspect_signature(runtime: Any) -> inspect.Signature | None:
try:
try:
return inspect.signature(runtime)
except ValueError:
if (
hasattr(runtime, "__text_signature__")
and "<unrepresentable>" in runtime.__text_signature__
):
# Try to fix up the signature. Workaround for
# https://github.com/python/cpython/issues/87233
sig = runtime.__text_signature__.replace("<unrepresentable>", "...")
sig = inspect._signature_fromstr(inspect.Signature, runtime, sig) # type: ignore[attr-defined]
assert isinstance(sig, inspect.Signature)
new_params = [
(
parameter.replace(default=UNREPRESENTABLE)
if parameter.default is ...
else parameter
)
for parameter in sig.parameters.values()
]
return sig.replace(parameters=new_params)
else:
raise
except Exception:
# inspect.signature throws ValueError all the time
# catch RuntimeError because of https://bugs.python.org/issue39504
# catch TypeError because of https://github.com/python/typeshed/pull/5762
# catch AttributeError because of inspect.signature(_curses.window.border)
return None
def describe_runtime_callable(signature: inspect.Signature, *, is_async: bool) -> str:
return f'{"async " if is_async else ""}def {signature}'
def is_subtype_helper(left: mypy.types.Type, right: mypy.types.Type) -> bool:
"""Checks whether ``left`` is a subtype of ``right``."""
left = mypy.types.get_proper_type(left)
right = mypy.types.get_proper_type(right)
if (
isinstance(left, mypy.types.LiteralType)
and isinstance(left.value, int)
and left.value in (0, 1)
and mypy.types.is_named_instance(right, "builtins.bool")
):
# Pretend Literal[0, 1] is a subtype of bool to avoid unhelpful errors.
return True
if isinstance(right, mypy.types.TypedDictType) and mypy.types.is_named_instance(
left, "builtins.dict"
):
# Special case checks against TypedDicts
return True
with mypy.state.state.strict_optional_set(True):
return mypy.subtypes.is_subtype(left, right)
def get_mypy_type_of_runtime_value(runtime: Any) -> mypy.types.Type | None:
"""Returns a mypy type object representing the type of ``runtime``.
Returns None if we can't find something that works.
"""
if runtime is None:
return mypy.types.NoneType()
if isinstance(runtime, property):
# Give up on properties to avoid issues with things that are typed as attributes.
return None
def anytype() -> mypy.types.AnyType:
return mypy.types.AnyType(mypy.types.TypeOfAny.unannotated)
if isinstance(
runtime,
(types.FunctionType, types.BuiltinFunctionType, types.MethodType, types.BuiltinMethodType),
):
builtins = get_stub("builtins")
assert builtins is not None
type_info = builtins.names["function"].node
assert isinstance(type_info, nodes.TypeInfo)
fallback = mypy.types.Instance(type_info, [anytype()])
signature = safe_inspect_signature(runtime)
if signature:
arg_types = []
arg_kinds = []
arg_names = []
for arg in signature.parameters.values():
arg_types.append(anytype())
arg_names.append(
None if arg.kind == inspect.Parameter.POSITIONAL_ONLY else arg.name
)
has_default = arg.default == inspect.Parameter.empty
if arg.kind == inspect.Parameter.POSITIONAL_ONLY:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.KEYWORD_ONLY:
arg_kinds.append(nodes.ARG_NAMED if has_default else nodes.ARG_NAMED_OPT)
elif arg.kind == inspect.Parameter.VAR_POSITIONAL:
arg_kinds.append(nodes.ARG_STAR)
elif arg.kind == inspect.Parameter.VAR_KEYWORD:
arg_kinds.append(nodes.ARG_STAR2)
else:
raise AssertionError
else:
arg_types = [anytype(), anytype()]
arg_kinds = [nodes.ARG_STAR, nodes.ARG_STAR2]
arg_names = [None, None]
return mypy.types.CallableType(
arg_types,
arg_kinds,
arg_names,
ret_type=anytype(),
fallback=fallback,
is_ellipsis_args=True,
)
# Try and look up a stub for the runtime object
stub = get_stub(type(runtime).__module__)
if stub is None:
return None
type_name = type(runtime).__name__
if type_name not in stub.names:
return None
type_info = stub.names[type_name].node
if isinstance(type_info, nodes.Var):
return type_info.type
if not isinstance(type_info, nodes.TypeInfo):
return None
if isinstance(runtime, tuple):
# Special case tuples so we construct a valid mypy.types.TupleType
optional_items = [get_mypy_type_of_runtime_value(v) for v in runtime]
items = [(i if i is not None else anytype()) for i in optional_items]
fallback = mypy.types.Instance(type_info, [anytype()])
return mypy.types.TupleType(items, fallback)
fallback = mypy.types.Instance(type_info, [anytype() for _ in type_info.type_vars])
value: bool | int | str
if isinstance(runtime, enum.Enum) and isinstance(runtime.name, str):
value = runtime.name
elif isinstance(runtime, bytes):
value = bytes_to_human_readable_repr(runtime)
elif isinstance(runtime, (bool, int, str)):
value = runtime
else:
return fallback
return mypy.types.LiteralType(value=value, fallback=fallback)
# ====================
# Build and entrypoint
# ====================
_all_stubs: dict[str, nodes.MypyFile] = {}
def build_stubs(modules: list[str], options: Options, find_submodules: bool = False) -> list[str]:
"""Uses mypy to construct stub objects for the given modules.
This sets global state that ``get_stub`` can access.
Returns all modules we might want to check. If ``find_submodules`` is False, this is equal
to ``modules``.
:param modules: List of modules to build stubs for.
:param options: Mypy options for finding and building stubs.
:param find_submodules: Whether to attempt to find submodules of the given modules as well.
"""
data_dir = mypy.build.default_data_dir()
search_path = mypy.modulefinder.compute_search_paths([], options, data_dir)
find_module_cache = mypy.modulefinder.FindModuleCache(
search_path, fscache=None, options=options
)
all_modules = []
sources = []
for module in modules:
all_modules.append(module)
if not find_submodules:
module_path = find_module_cache.find_module(module)
if not isinstance(module_path, str):
# test_module will yield an error later when it can't find stubs
continue
sources.append(mypy.modulefinder.BuildSource(module_path, module, None))
else:
found_sources = find_module_cache.find_modules_recursive(module)
sources.extend(found_sources)
# find submodules via mypy
all_modules.extend(s.module for s in found_sources if s.module not in all_modules)
# find submodules via pkgutil
try:
runtime = silent_import_module(module)
all_modules.extend(
m.name
for m in pkgutil.walk_packages(runtime.__path__, runtime.__name__ + ".")
if m.name not in all_modules
)
except KeyboardInterrupt:
raise
except BaseException:
pass
if sources:
try:
res = mypy.build.build(sources=sources, options=options)
except mypy.errors.CompileError as e:
raise StubtestFailure(f"failed mypy compile:\n{e}") from e
if res.errors:
raise StubtestFailure("mypy build errors:\n" + "\n".join(res.errors))
global _all_stubs
_all_stubs = res.files
return all_modules
def get_stub(module: str) -> nodes.MypyFile | None:
"""Returns a stub object for the given module, if we've built one."""
return _all_stubs.get(module)
def get_typeshed_stdlib_modules(
custom_typeshed_dir: str | None, version_info: tuple[int, int] | None = None
) -> set[str]:
"""Returns a list of stdlib modules in typeshed (for current Python version)."""
stdlib_py_versions = mypy.modulefinder.load_stdlib_py_versions(custom_typeshed_dir)
if version_info is None:
version_info = sys.version_info[0:2]
def exists_in_version(module: str) -> bool:
assert version_info is not None
parts = module.split(".")
for i in range(len(parts), 0, -1):
current_module = ".".join(parts[:i])
if current_module in stdlib_py_versions:
minver, maxver = stdlib_py_versions[current_module]
return version_info >= minver and (maxver is None or version_info <= maxver)
return False
if custom_typeshed_dir:
typeshed_dir = Path(custom_typeshed_dir)
else:
typeshed_dir = Path(mypy.build.default_data_dir()) / "typeshed"
stdlib_dir = typeshed_dir / "stdlib"
modules: set[str] = set()
for path in stdlib_dir.rglob("*.pyi"):
if path.stem == "__init__":
path = path.parent
module = ".".join(path.relative_to(stdlib_dir).parts[:-1] + (path.stem,))
if exists_in_version(module):
modules.add(module)
return modules
def get_importable_stdlib_modules() -> set[str]:
"""Return all importable stdlib modules at runtime."""
all_stdlib_modules: AbstractSet[str]
if sys.version_info >= (3, 10):
all_stdlib_modules = sys.stdlib_module_names
else:
all_stdlib_modules = set(sys.builtin_module_names)
modules_by_finder: defaultdict[importlib.machinery.FileFinder, set[str]] = defaultdict(set)
for m in pkgutil.iter_modules():
if isinstance(m.module_finder, importlib.machinery.FileFinder):
modules_by_finder[m.module_finder].add(m.name)
for finder, module_group in modules_by_finder.items():
if (
"site-packages" not in Path(finder.path).parts
# if "_queue" is present, it's most likely the module finder
# for stdlib extension modules;
# if "queue" is present, it's most likely the module finder
# for pure-Python stdlib modules.
# In either case, we'll want to add all the modules that the finder has to offer us.
# This is a bit hacky, but seems to work well in a cross-platform way.
and {"_queue", "queue"} & module_group
):
all_stdlib_modules.update(module_group)
importable_stdlib_modules: set[str] = set()
for module_name in all_stdlib_modules:
if module_name in ANNOYING_STDLIB_MODULES:
continue
try:
runtime = silent_import_module(module_name)
except ImportError:
continue
else:
importable_stdlib_modules.add(module_name)
try:
# some stdlib modules (e.g. `nt`) don't have __path__ set...
runtime_path = runtime.__path__
runtime_name = runtime.__name__
except AttributeError:
continue
for submodule in pkgutil.walk_packages(runtime_path, runtime_name + "."):
submodule_name = submodule.name
# There are many annoying *.__main__ stdlib modules,
# and including stubs for them isn't really that useful anyway:
# tkinter.__main__ opens a tkinter windows; unittest.__main__ raises SystemExit; etc.
#
# The idlelib.* submodules are similarly annoying in opening random tkinter windows,
# and we're unlikely to ever add stubs for idlelib in typeshed
# (see discussion in https://github.com/python/typeshed/pull/9193)
#
# test.* modules do weird things like raising exceptions in __del__ methods,
# leading to unraisable exceptions being logged to the terminal
# as a warning at the end of the stubtest run
if submodule_name.endswith(".__main__") or submodule_name.startswith(
("idlelib.", "test.")
):
continue
try:
silent_import_module(submodule_name)
except KeyboardInterrupt:
raise
# importing multiprocessing.popen_forkserver on Windows raises AttributeError...
# some submodules also appear to raise SystemExit as well on some Python versions
# (not sure exactly which)
except BaseException:
continue
else:
importable_stdlib_modules.add(submodule_name)
return importable_stdlib_modules
def get_allowlist_entries(allowlist_file: str) -> Iterator[str]:
def strip_comments(s: str) -> str:
try:
return s[: s.index("#")].strip()
except ValueError:
return s.strip()
with open(allowlist_file) as f:
for line in f:
entry = strip_comments(line)
if entry:
yield entry
class _Arguments:
modules: list[str]
concise: bool
ignore_missing_stub: bool
ignore_positional_only: bool
allowlist: list[str]
generate_allowlist: bool
ignore_unused_allowlist: bool
mypy_config_file: str | None
custom_typeshed_dir: str | None
check_typeshed: bool
version: str
# typeshed added a stub for __main__, but that causes stubtest to check itself
ANNOYING_STDLIB_MODULES: typing_extensions.Final = frozenset(
{"antigravity", "this", "__main__", "_ios_support"}
)
def test_stubs(args: _Arguments, use_builtins_fixtures: bool = False) -> int:
"""This is stubtest! It's time to test the stubs!"""
# Load the allowlist. This is a series of strings corresponding to Error.object_desc
# Values in the dict will store whether we used the allowlist entry or not.
allowlist = {
entry: False
for allowlist_file in args.allowlist
for entry in get_allowlist_entries(allowlist_file)
}
allowlist_regexes = {entry: re.compile(entry) for entry in allowlist}
# If we need to generate an allowlist, we store Error.object_desc for each error here.
generated_allowlist = set()
modules = args.modules
if args.check_typeshed:
if args.modules:
print(
_style("error:", color="red", bold=True),
"cannot pass both --check-typeshed and a list of modules",
)
return 1
typeshed_modules = get_typeshed_stdlib_modules(args.custom_typeshed_dir)
runtime_modules = get_importable_stdlib_modules()
modules = sorted((typeshed_modules | runtime_modules) - ANNOYING_STDLIB_MODULES)
if not modules:
print(_style("error:", color="red", bold=True), "no modules to check")
return 1
options = Options()
options.incremental = False
options.custom_typeshed_dir = args.custom_typeshed_dir
if options.custom_typeshed_dir:
options.abs_custom_typeshed_dir = os.path.abspath(options.custom_typeshed_dir)
options.config_file = args.mypy_config_file
options.use_builtins_fixtures = use_builtins_fixtures
if options.config_file:
def set_strict_flags() -> None: # not needed yet
return
parse_config_file(options, set_strict_flags, options.config_file, sys.stdout, sys.stderr)
def error_callback(msg: str) -> typing.NoReturn:
print(_style("error:", color="red", bold=True), msg)
sys.exit(1)
def warning_callback(msg: str) -> None:
print(_style("warning:", color="yellow", bold=True), msg)
options.process_error_codes(error_callback=error_callback)
options.process_incomplete_features(
error_callback=error_callback, warning_callback=warning_callback
)
try:
modules = build_stubs(modules, options, find_submodules=not args.check_typeshed)
except StubtestFailure as stubtest_failure:
print(
_style("error:", color="red", bold=True),
f"not checking stubs due to {stubtest_failure}",
)
return 1
exit_code = 0
error_count = 0
for module in modules:
for error in test_module(module):
# Filter errors
if args.ignore_missing_stub and error.is_missing_stub():
continue
if args.ignore_positional_only and error.is_positional_only_related():
continue
if error.object_desc in allowlist:
allowlist[error.object_desc] = True
continue
is_allowlisted = False
for w in allowlist:
if allowlist_regexes[w].fullmatch(error.object_desc):
allowlist[w] = True
is_allowlisted = True
break
if is_allowlisted:
continue
# We have errors, so change exit code, and output whatever necessary
exit_code = 1
if args.generate_allowlist:
generated_allowlist.add(error.object_desc)
continue
print(error.get_description(concise=args.concise))
error_count += 1
# Print unused allowlist entries
if not args.ignore_unused_allowlist:
for w in allowlist:
# Don't consider an entry unused if it regex-matches the empty string
# This lets us allowlist errors that don't manifest at all on some systems
if not allowlist[w] and not allowlist_regexes[w].fullmatch(""):
exit_code = 1
error_count += 1
print(f"note: unused allowlist entry {w}")
# Print the generated allowlist
if args.generate_allowlist:
for e in sorted(generated_allowlist):
print(e)
exit_code = 0
elif not args.concise:
if error_count:
print(
_style(
f"Found {error_count} error{plural_s(error_count)}"
f" (checked {len(modules)} module{plural_s(modules)})",
color="red",
bold=True,
)
)
else:
print(
_style(
f"Success: no issues found in {len(modules)} module{plural_s(modules)}",
color="green",
bold=True,
)
)
return exit_code
def parse_options(args: list[str]) -> _Arguments:
parser = argparse.ArgumentParser(
description="Compares stubs to objects introspected from the runtime."
)
parser.add_argument("modules", nargs="*", help="Modules to test")
parser.add_argument(
"--concise",
action="store_true",
help="Makes stubtest's output more concise, one line per error",
)
parser.add_argument(
"--ignore-missing-stub",
action="store_true",
help="Ignore errors for stub missing things that are present at runtime",
)
parser.add_argument(
"--ignore-positional-only",
action="store_true",
help="Ignore errors for whether an argument should or shouldn't be positional-only",
)
parser.add_argument(
"--allowlist",
"--whitelist",
action="append",
metavar="FILE",
default=[],
help=(
"Use file as an allowlist. Can be passed multiple times to combine multiple "
"allowlists. Allowlists can be created with --generate-allowlist. Allowlists "
"support regular expressions."
),
)
parser.add_argument(
"--generate-allowlist",
"--generate-whitelist",
action="store_true",
help="Print an allowlist (to stdout) to be used with --allowlist",
)
parser.add_argument(
"--ignore-unused-allowlist",
"--ignore-unused-whitelist",
action="store_true",
help="Ignore unused allowlist entries",
)
parser.add_argument(
"--mypy-config-file",
metavar="FILE",
help=("Use specified mypy config file to determine mypy plugins and mypy path"),
)
parser.add_argument(
"--custom-typeshed-dir", metavar="DIR", help="Use the custom typeshed in DIR"
)
parser.add_argument(
"--check-typeshed", action="store_true", help="Check all stdlib modules in typeshed"
)
parser.add_argument(
"--version", action="version", version="%(prog)s " + mypy.version.__version__
)
return parser.parse_args(args, namespace=_Arguments())
def main() -> int:
mypy.util.check_python_version("stubtest")
return test_stubs(parse_options(sys.argv[1:]))
if __name__ == "__main__":
sys.exit(main())
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/stubtest.py
|
Python
|
NOASSERTION
| 83,603 |
"""Utilities for mypy.stubgen, mypy.stubgenc, and mypy.stubdoc modules."""
from __future__ import annotations
import os.path
import re
import sys
from abc import abstractmethod
from collections import defaultdict
from contextlib import contextmanager
from typing import Final, Iterable, Iterator, Mapping
from typing_extensions import overload
from mypy_extensions import mypyc_attr
import mypy.options
from mypy.modulefinder import ModuleNotFoundReason
from mypy.moduleinspect import InspectError, ModuleInspect
from mypy.stubdoc import ArgSig, FunctionSig
from mypy.types import AnyType, NoneType, Type, TypeList, TypeStrVisitor, UnboundType, UnionType
# Modules that may fail when imported, or that may have side effects (fully qualified).
NOT_IMPORTABLE_MODULES = ()
# Typing constructs to be replaced by their builtin equivalents.
TYPING_BUILTIN_REPLACEMENTS: Final = {
# From typing
"typing.Text": "builtins.str",
"typing.Tuple": "builtins.tuple",
"typing.List": "builtins.list",
"typing.Dict": "builtins.dict",
"typing.Set": "builtins.set",
"typing.FrozenSet": "builtins.frozenset",
"typing.Type": "builtins.type",
# From typing_extensions
"typing_extensions.Text": "builtins.str",
"typing_extensions.Tuple": "builtins.tuple",
"typing_extensions.List": "builtins.list",
"typing_extensions.Dict": "builtins.dict",
"typing_extensions.Set": "builtins.set",
"typing_extensions.FrozenSet": "builtins.frozenset",
"typing_extensions.Type": "builtins.type",
}
class CantImport(Exception):
def __init__(self, module: str, message: str) -> None:
self.module = module
self.message = message
def walk_packages(
inspect: ModuleInspect, packages: list[str], verbose: bool = False
) -> Iterator[str]:
"""Iterates through all packages and sub-packages in the given list.
This uses runtime imports (in another process) to find both Python and C modules.
For Python packages we simply pass the __path__ attribute to pkgutil.walk_packages() to
get the content of the package (all subpackages and modules). However, packages in C
extensions do not have this attribute, so we have to roll out our own logic: recursively
find all modules imported in the package that have matching names.
"""
for package_name in packages:
if package_name in NOT_IMPORTABLE_MODULES:
print(f"{package_name}: Skipped (blacklisted)")
continue
if verbose:
print(f"Trying to import {package_name!r} for runtime introspection")
try:
prop = inspect.get_package_properties(package_name)
except InspectError:
report_missing(package_name)
continue
yield prop.name
if prop.is_c_module:
# Recursively iterate through the subpackages
yield from walk_packages(inspect, prop.subpackages, verbose)
else:
yield from prop.subpackages
def find_module_path_using_sys_path(module: str, sys_path: list[str]) -> str | None:
relative_candidates = (
module.replace(".", "/") + ".py",
os.path.join(module.replace(".", "/"), "__init__.py"),
)
for base in sys_path:
for relative_path in relative_candidates:
path = os.path.join(base, relative_path)
if os.path.isfile(path):
return path
return None
def find_module_path_and_all_py3(
inspect: ModuleInspect, module: str, verbose: bool
) -> tuple[str | None, list[str] | None] | None:
"""Find module and determine __all__ for a Python 3 module.
Return None if the module is a C or pyc-only module.
Return (module_path, __all__) if it is a Python module.
Raise CantImport if import failed.
"""
if module in NOT_IMPORTABLE_MODULES:
raise CantImport(module, "")
# TODO: Support custom interpreters.
if verbose:
print(f"Trying to import {module!r} for runtime introspection")
try:
mod = inspect.get_package_properties(module)
except InspectError as e:
# Fall back to finding the module using sys.path.
path = find_module_path_using_sys_path(module, sys.path)
if path is None:
raise CantImport(module, str(e)) from e
return path, None
if mod.is_c_module:
return None
return mod.file, mod.all
@contextmanager
def generate_guarded(
mod: str, target: str, ignore_errors: bool = True, verbose: bool = False
) -> Iterator[None]:
"""Ignore or report errors during stub generation.
Optionally report success.
"""
if verbose:
print(f"Processing {mod}")
try:
yield
except Exception as e:
if not ignore_errors:
raise e
else:
# --ignore-errors was passed
print("Stub generation failed for", mod, file=sys.stderr)
else:
if verbose:
print(f"Created {target}")
def report_missing(mod: str, message: str | None = "", traceback: str = "") -> None:
if message:
message = " with error: " + message
print(f"{mod}: Failed to import, skipping{message}")
def fail_missing(mod: str, reason: ModuleNotFoundReason) -> None:
if reason is ModuleNotFoundReason.NOT_FOUND:
clarification = "(consider using --search-path)"
elif reason is ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS:
clarification = "(module likely exists, but is not PEP 561 compatible)"
else:
clarification = f"(unknown reason '{reason}')"
raise SystemExit(f"Can't find module '{mod}' {clarification}")
@overload
def remove_misplaced_type_comments(source: bytes) -> bytes: ...
@overload
def remove_misplaced_type_comments(source: str) -> str: ...
def remove_misplaced_type_comments(source: str | bytes) -> str | bytes:
"""Remove comments from source that could be understood as misplaced type comments.
Normal comments may look like misplaced type comments, and since they cause blocking
parse errors, we want to avoid them.
"""
if isinstance(source, bytes):
# This gives us a 1-1 character code mapping, so it's roundtrippable.
text = source.decode("latin1")
else:
text = source
# Remove something that looks like a variable type comment but that's by itself
# on a line, as it will often generate a parse error (unless it's # type: ignore).
text = re.sub(r'^[ \t]*# +type: +["\'a-zA-Z_].*$', "", text, flags=re.MULTILINE)
# Remove something that looks like a function type comment after docstring,
# which will result in a parse error.
text = re.sub(r'""" *\n[ \t\n]*# +type: +\(.*$', '"""\n', text, flags=re.MULTILINE)
text = re.sub(r"''' *\n[ \t\n]*# +type: +\(.*$", "'''\n", text, flags=re.MULTILINE)
# Remove something that looks like a badly formed function type comment.
text = re.sub(r"^[ \t]*# +type: +\([^()]+(\)[ \t]*)?$", "", text, flags=re.MULTILINE)
if isinstance(source, bytes):
return text.encode("latin1")
else:
return text
def common_dir_prefix(paths: list[str]) -> str:
if not paths:
return "."
cur = os.path.dirname(os.path.normpath(paths[0]))
for path in paths[1:]:
while True:
path = os.path.dirname(os.path.normpath(path))
if (cur + os.sep).startswith(path + os.sep):
cur = path
break
return cur or "."
class AnnotationPrinter(TypeStrVisitor):
"""Visitor used to print existing annotations in a file.
The main difference from TypeStrVisitor is a better treatment of
unbound types.
Notes:
* This visitor doesn't add imports necessary for annotations, this is done separately
by ImportTracker.
* It can print all kinds of types, but the generated strings may not be valid (notably
callable types) since it prints the same string that reveal_type() does.
* For Instance types it prints the fully qualified names.
"""
# TODO: Generate valid string representation for callable types.
# TODO: Use short names for Instances.
def __init__(
self,
stubgen: BaseStubGenerator,
known_modules: list[str] | None = None,
local_modules: list[str] | None = None,
) -> None:
super().__init__(options=mypy.options.Options())
self.stubgen = stubgen
self.known_modules = known_modules
self.local_modules = local_modules or ["builtins"]
def visit_any(self, t: AnyType) -> str:
s = super().visit_any(t)
self.stubgen.import_tracker.require_name(s)
return s
def visit_unbound_type(self, t: UnboundType) -> str:
s = t.name
fullname = self.stubgen.resolve_name(s)
if fullname == "typing.Union":
return " | ".join([item.accept(self) for item in t.args])
if fullname == "typing.Optional":
if len(t.args) == 1:
return f"{t.args[0].accept(self)} | None"
return self.stubgen.add_name("_typeshed.Incomplete")
if fullname in TYPING_BUILTIN_REPLACEMENTS:
s = self.stubgen.add_name(TYPING_BUILTIN_REPLACEMENTS[fullname], require=True)
if self.known_modules is not None and "." in s:
# see if this object is from any of the modules that we're currently processing.
# reverse sort so that subpackages come before parents: e.g. "foo.bar" before "foo".
for module_name in self.local_modules + sorted(self.known_modules, reverse=True):
if s.startswith(module_name + "."):
if module_name in self.local_modules:
s = s[len(module_name) + 1 :]
arg_module = module_name
break
else:
arg_module = s[: s.rindex(".")]
if arg_module not in self.local_modules:
self.stubgen.import_tracker.add_import(arg_module, require=True)
elif s == "NoneType":
# when called without analysis all types are unbound, so this won't hit
# visit_none_type().
s = "None"
else:
self.stubgen.import_tracker.require_name(s)
if t.args:
s += f"[{self.args_str(t.args)}]"
elif t.empty_tuple_index:
s += "[()]"
return s
def visit_none_type(self, t: NoneType) -> str:
return "None"
def visit_type_list(self, t: TypeList) -> str:
return f"[{self.list_str(t.items)}]"
def visit_union_type(self, t: UnionType) -> str:
return " | ".join([item.accept(self) for item in t.items])
def args_str(self, args: Iterable[Type]) -> str:
"""Convert an array of arguments to strings and join the results with commas.
The main difference from list_str is the preservation of quotes for string
arguments
"""
types = ["builtins.bytes", "builtins.str"]
res = []
for arg in args:
arg_str = arg.accept(self)
if isinstance(arg, UnboundType) and arg.original_str_fallback in types:
res.append(f"'{arg_str}'")
else:
res.append(arg_str)
return ", ".join(res)
class ClassInfo:
def __init__(
self, name: str, self_var: str, docstring: str | None = None, cls: type | None = None
) -> None:
self.name = name
self.self_var = self_var
self.docstring = docstring
self.cls = cls
class FunctionContext:
def __init__(
self,
module_name: str,
name: str,
docstring: str | None = None,
is_abstract: bool = False,
class_info: ClassInfo | None = None,
) -> None:
self.module_name = module_name
self.name = name
self.docstring = docstring
self.is_abstract = is_abstract
self.class_info = class_info
self._fullname: str | None = None
@property
def fullname(self) -> str:
if self._fullname is None:
if self.class_info:
self._fullname = f"{self.module_name}.{self.class_info.name}.{self.name}"
else:
self._fullname = f"{self.module_name}.{self.name}"
return self._fullname
def infer_method_ret_type(name: str) -> str | None:
"""Infer return types for known special methods"""
if name.startswith("__") and name.endswith("__"):
name = name[2:-2]
if name in ("float", "bool", "bytes", "int", "complex", "str"):
return name
# Note: __eq__ and co may return arbitrary types, but bool is good enough for stubgen.
elif name in ("eq", "ne", "lt", "le", "gt", "ge", "contains"):
return "bool"
elif name in ("len", "length_hint", "index", "hash", "sizeof", "trunc", "floor", "ceil"):
return "int"
elif name in ("format", "repr"):
return "str"
elif name in ("init", "setitem", "del", "delitem"):
return "None"
return None
def infer_method_arg_types(
name: str, self_var: str = "self", arg_names: list[str] | None = None
) -> list[ArgSig] | None:
"""Infer argument types for known special methods"""
args: list[ArgSig] | None = None
if name.startswith("__") and name.endswith("__"):
if arg_names and len(arg_names) >= 1 and arg_names[0] == "self":
arg_names = arg_names[1:]
name = name[2:-2]
if name == "exit":
if arg_names is None:
arg_names = ["type", "value", "traceback"]
if len(arg_names) == 3:
arg_types = [
"type[BaseException] | None",
"BaseException | None",
"types.TracebackType | None",
]
args = [
ArgSig(name=arg_name, type=arg_type)
for arg_name, arg_type in zip(arg_names, arg_types)
]
if args is not None:
return [ArgSig(name=self_var)] + args
return None
@mypyc_attr(allow_interpreted_subclasses=True)
class SignatureGenerator:
"""Abstract base class for extracting a list of FunctionSigs for each function."""
def remove_self_type(
self, inferred: list[FunctionSig] | None, self_var: str
) -> list[FunctionSig] | None:
"""Remove type annotation from self/cls argument"""
if inferred:
for signature in inferred:
if signature.args:
if signature.args[0].name == self_var:
signature.args[0].type = None
return inferred
@abstractmethod
def get_function_sig(
self, default_sig: FunctionSig, ctx: FunctionContext
) -> list[FunctionSig] | None:
"""Return a list of signatures for the given function.
If no signature can be found, return None. If all of the registered SignatureGenerators
for the stub generator return None, then the default_sig will be used.
"""
pass
@abstractmethod
def get_property_type(self, default_type: str | None, ctx: FunctionContext) -> str | None:
"""Return the type of the given property"""
pass
class ImportTracker:
"""Record necessary imports during stub generation."""
def __init__(self) -> None:
# module_for['foo'] has the module name where 'foo' was imported from, or None if
# 'foo' is a module imported directly;
# direct_imports['foo'] is the module path used when the name 'foo' was added to the
# namespace.
# reverse_alias['foo'] is the name that 'foo' had originally when imported with an
# alias; examples
# 'from pkg import mod' ==> module_for['mod'] == 'pkg'
# 'from pkg import mod as m' ==> module_for['m'] == 'pkg'
# ==> reverse_alias['m'] == 'mod'
# 'import pkg.mod as m' ==> module_for['m'] == None
# ==> reverse_alias['m'] == 'pkg.mod'
# 'import pkg.mod' ==> module_for['pkg'] == None
# ==> module_for['pkg.mod'] == None
# ==> direct_imports['pkg'] == 'pkg.mod'
# ==> direct_imports['pkg.mod'] == 'pkg.mod'
self.module_for: dict[str, str | None] = {}
self.direct_imports: dict[str, str] = {}
self.reverse_alias: dict[str, str] = {}
# required_names is the set of names that are actually used in a type annotation
self.required_names: set[str] = set()
# Names that should be reexported if they come from another module
self.reexports: set[str] = set()
def add_import_from(
self, module: str, names: list[tuple[str, str | None]], require: bool = False
) -> None:
for name, alias in names:
if alias:
# 'from {module} import {name} as {alias}'
self.module_for[alias] = module
self.reverse_alias[alias] = name
else:
# 'from {module} import {name}'
self.module_for[name] = module
self.reverse_alias.pop(name, None)
if require:
self.require_name(alias or name)
self.direct_imports.pop(alias or name, None)
def add_import(self, module: str, alias: str | None = None, require: bool = False) -> None:
if alias:
# 'import {module} as {alias}'
assert "." not in alias # invalid syntax
self.module_for[alias] = None
self.reverse_alias[alias] = module
if require:
self.required_names.add(alias)
else:
# 'import {module}'
name = module
if require:
self.required_names.add(name)
# add module and its parent packages
while name:
self.module_for[name] = None
self.direct_imports[name] = module
self.reverse_alias.pop(name, None)
name = name.rpartition(".")[0]
def require_name(self, name: str) -> None:
while name not in self.direct_imports and "." in name:
name = name.rsplit(".", 1)[0]
self.required_names.add(name)
def reexport(self, name: str) -> None:
"""Mark a given non qualified name as needed in __all__.
This means that in case it comes from a module, it should be
imported with an alias even if the alias is the same as the name.
"""
self.require_name(name)
self.reexports.add(name)
def import_lines(self) -> list[str]:
"""The list of required import lines (as strings with python code).
In order for a module be included in this output, an identifier must be both
'required' via require_name() and 'imported' via add_import_from()
or add_import()
"""
result = []
# To summarize multiple names imported from a same module, we collect those
# in the `module_map` dictionary, mapping a module path to the list of names that should
# be imported from it. the names can also be alias in the form 'original as alias'
module_map: Mapping[str, list[str]] = defaultdict(list)
for name in sorted(
self.required_names,
key=lambda n: (self.reverse_alias[n], n) if n in self.reverse_alias else (n, ""),
):
# If we haven't seen this name in an import statement, ignore it
if name not in self.module_for:
continue
m = self.module_for[name]
if m is not None:
# This name was found in a from ... import ...
# Collect the name in the module_map
if name in self.reverse_alias:
name = f"{self.reverse_alias[name]} as {name}"
elif name in self.reexports:
name = f"{name} as {name}"
module_map[m].append(name)
else:
# This name was found in an import ...
# We can already generate the import line
if name in self.reverse_alias:
source = self.reverse_alias[name]
result.append(f"import {source} as {name}\n")
elif name in self.reexports:
assert "." not in name # Because reexports only has nonqualified names
result.append(f"import {name} as {name}\n")
else:
result.append(f"import {name}\n")
# Now generate all the from ... import ... lines collected in module_map
for module, names in sorted(module_map.items()):
result.append(f"from {module} import {', '.join(sorted(names))}\n")
return result
@mypyc_attr(allow_interpreted_subclasses=True)
class BaseStubGenerator:
# These names should be omitted from generated stubs.
IGNORED_DUNDERS: Final = {
"__all__",
"__author__",
"__about__",
"__copyright__",
"__email__",
"__license__",
"__summary__",
"__title__",
"__uri__",
"__str__",
"__repr__",
"__getstate__",
"__setstate__",
"__slots__",
"__builtins__",
"__cached__",
"__file__",
"__name__",
"__package__",
"__path__",
"__spec__",
"__loader__",
}
TYPING_MODULE_NAMES: Final = ("typing", "typing_extensions")
# Special-cased names that are implicitly exported from the stub (from m import y as y).
EXTRA_EXPORTED: Final = {
"pyasn1_modules.rfc2437.univ",
"pyasn1_modules.rfc2459.char",
"pyasn1_modules.rfc2459.univ",
}
def __init__(
self,
_all_: list[str] | None = None,
include_private: bool = False,
export_less: bool = False,
include_docstrings: bool = False,
) -> None:
# Best known value of __all__.
self._all_ = _all_
self._include_private = include_private
self._include_docstrings = include_docstrings
# Disable implicit exports of package-internal imports?
self.export_less = export_less
self._import_lines: list[str] = []
self._output: list[str] = []
# Current indent level (indent is hardcoded to 4 spaces).
self._indent = ""
self._toplevel_names: list[str] = []
self.import_tracker = ImportTracker()
# Top-level members
self.defined_names: set[str] = set()
self.sig_generators = self.get_sig_generators()
# populated by visit_mypy_file
self.module_name: str = ""
# These are "soft" imports for objects which might appear in annotations but not have
# a corresponding import statement.
self.known_imports = {
"_typeshed": ["Incomplete"],
"typing": ["Any", "TypeVar", "NamedTuple", "TypedDict"],
"collections.abc": ["Generator"],
"typing_extensions": ["ParamSpec", "TypeVarTuple"],
}
def get_sig_generators(self) -> list[SignatureGenerator]:
return []
def resolve_name(self, name: str) -> str:
"""Return the full name resolving imports and import aliases."""
if "." not in name:
real_module = self.import_tracker.module_for.get(name)
real_short = self.import_tracker.reverse_alias.get(name, name)
if real_module is None and real_short not in self.defined_names:
real_module = "builtins" # not imported and not defined, must be a builtin
else:
name_module, real_short = name.split(".", 1)
real_module = self.import_tracker.reverse_alias.get(name_module, name_module)
resolved_name = real_short if real_module is None else f"{real_module}.{real_short}"
return resolved_name
def add_name(self, fullname: str, require: bool = True) -> str:
"""Add a name to be imported and return the name reference.
The import will be internal to the stub (i.e don't reexport).
"""
module, name = fullname.rsplit(".", 1)
alias = "_" + name if name in self.defined_names else None
while alias in self.defined_names:
alias = "_" + alias
if module != "builtins" or alias: # don't import from builtins unless needed
self.import_tracker.add_import_from(module, [(name, alias)], require=require)
return alias or name
def add_import_line(self, line: str) -> None:
"""Add a line of text to the import section, unless it's already there."""
if line not in self._import_lines:
self._import_lines.append(line)
def get_imports(self) -> str:
"""Return the import statements for the stub."""
imports = ""
if self._import_lines:
imports += "".join(self._import_lines)
imports += "".join(self.import_tracker.import_lines())
return imports
def output(self) -> str:
"""Return the text for the stub."""
pieces: list[str] = []
if imports := self.get_imports():
pieces.append(imports)
if dunder_all := self.get_dunder_all():
pieces.append(dunder_all)
if self._output:
pieces.append("".join(self._output))
return "\n".join(pieces)
def get_dunder_all(self) -> str:
"""Return the __all__ list for the stub."""
if self._all_:
# Note we emit all names in the runtime __all__ here, even if they
# don't actually exist. If that happens, the runtime has a bug, and
# it's not obvious what the correct behavior should be. We choose
# to reflect the runtime __all__ as closely as possible.
return f"__all__ = {self._all_!r}\n"
return ""
def add(self, string: str) -> None:
"""Add text to generated stub."""
self._output.append(string)
def is_top_level(self) -> bool:
"""Are we processing the top level of a file?"""
return self._indent == ""
def indent(self) -> None:
"""Add one level of indentation."""
self._indent += " "
def dedent(self) -> None:
"""Remove one level of indentation."""
self._indent = self._indent[:-4]
def record_name(self, name: str) -> None:
"""Mark a name as defined.
This only does anything if at the top level of a module.
"""
if self.is_top_level():
self._toplevel_names.append(name)
def is_recorded_name(self, name: str) -> bool:
"""Has this name been recorded previously?"""
return self.is_top_level() and name in self._toplevel_names
def set_defined_names(self, defined_names: set[str]) -> None:
self.defined_names = defined_names
# Names in __all__ are required
for name in self._all_ or ():
self.import_tracker.reexport(name)
for pkg, imports in self.known_imports.items():
for t in imports:
# require=False means that the import won't be added unless require_name() is called
# for the object during generation.
self.add_name(f"{pkg}.{t}", require=False)
def check_undefined_names(self) -> None:
undefined_names = [name for name in self._all_ or [] if name not in self._toplevel_names]
if undefined_names:
if self._output:
self.add("\n")
self.add("# Names in __all__ with no definition:\n")
for name in sorted(undefined_names):
self.add(f"# {name}\n")
def get_signatures(
self,
default_signature: FunctionSig,
sig_generators: list[SignatureGenerator],
func_ctx: FunctionContext,
) -> list[FunctionSig]:
for sig_gen in sig_generators:
inferred = sig_gen.get_function_sig(default_signature, func_ctx)
if inferred:
return inferred
return [default_signature]
def get_property_type(
self,
default_type: str | None,
sig_generators: list[SignatureGenerator],
func_ctx: FunctionContext,
) -> str | None:
for sig_gen in sig_generators:
inferred = sig_gen.get_property_type(default_type, func_ctx)
if inferred:
return inferred
return default_type
def format_func_def(
self,
sigs: list[FunctionSig],
is_coroutine: bool = False,
decorators: list[str] | None = None,
docstring: str | None = None,
) -> list[str]:
lines: list[str] = []
if decorators is None:
decorators = []
for signature in sigs:
# dump decorators, just before "def ..."
for deco in decorators:
lines.append(f"{self._indent}{deco}")
lines.append(
signature.format_sig(
indent=self._indent,
is_async=is_coroutine,
docstring=docstring if self._include_docstrings else None,
)
)
return lines
def print_annotation(
self,
t: Type,
known_modules: list[str] | None = None,
local_modules: list[str] | None = None,
) -> str:
printer = AnnotationPrinter(self, known_modules, local_modules)
return t.accept(printer)
def is_not_in_all(self, name: str) -> bool:
if self.is_private_name(name):
return False
if self._all_:
return self.is_top_level() and name not in self._all_
return False
def is_private_name(self, name: str, fullname: str | None = None) -> bool:
if self._include_private:
return False
if fullname in self.EXTRA_EXPORTED:
return False
if name == "_":
return False
if not name.startswith("_"):
return False
if self._all_ and name in self._all_:
return False
if name.startswith("__") and name.endswith("__"):
return name in self.IGNORED_DUNDERS
return True
def should_reexport(self, name: str, full_module: str, name_is_alias: bool) -> bool:
if (
not name_is_alias
and self.module_name
and (self.module_name + "." + name) in self.EXTRA_EXPORTED
):
# Special case certain names that should be exported, against our general rules.
return True
if name_is_alias:
return False
if self.export_less:
return False
if not self.module_name:
return False
is_private = self.is_private_name(name, full_module + "." + name)
if is_private:
return False
top_level = full_module.split(".")[0]
self_top_level = self.module_name.split(".", 1)[0]
if top_level not in (self_top_level, "_" + self_top_level):
# Export imports from the same package, since we can't reliably tell whether they
# are part of the public API.
return False
if self._all_:
return name in self._all_
return True
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/stubutil.py
|
Python
|
NOASSERTION
| 31,488 |
from __future__ import annotations
from contextlib import contextmanager
from typing import Any, Callable, Final, Iterator, List, TypeVar, cast
from typing_extensions import TypeAlias as _TypeAlias
import mypy.applytype
import mypy.constraints
import mypy.typeops
from mypy.erasetype import erase_type
from mypy.expandtype import (
expand_self_type,
expand_type,
expand_type_by_instance,
freshen_function_type_vars,
)
from mypy.maptype import map_instance_to_supertype
# Circular import; done in the function instead.
# import mypy.solve
from mypy.nodes import (
ARG_STAR,
ARG_STAR2,
CONTRAVARIANT,
COVARIANT,
INVARIANT,
VARIANCE_NOT_READY,
Decorator,
FuncBase,
OverloadedFuncDef,
TypeInfo,
Var,
)
from mypy.options import Options
from mypy.state import state
from mypy.types import (
MYPYC_NATIVE_INT_NAMES,
TUPLE_LIKE_INSTANCE_NAMES,
TYPED_NAMEDTUPLE_NAMES,
AnyType,
CallableType,
DeletedType,
ErasedType,
FormalArgument,
FunctionLike,
Instance,
LiteralType,
NoneType,
NormalizedCallableType,
Overloaded,
Parameters,
ParamSpecType,
PartialType,
ProperType,
TupleType,
Type,
TypeAliasType,
TypedDictType,
TypeOfAny,
TypeType,
TypeVarTupleType,
TypeVarType,
TypeVisitor,
UnboundType,
UninhabitedType,
UnionType,
UnpackType,
find_unpack_in_list,
get_proper_type,
is_named_instance,
split_with_prefix_and_suffix,
)
from mypy.types_utils import flatten_types
from mypy.typestate import SubtypeKind, type_state
from mypy.typevars import fill_typevars, fill_typevars_with_any
# Flags for detected protocol members
IS_SETTABLE: Final = 1
IS_CLASSVAR: Final = 2
IS_CLASS_OR_STATIC: Final = 3
IS_VAR: Final = 4
TypeParameterChecker: _TypeAlias = Callable[[Type, Type, int, bool, "SubtypeContext"], bool]
class SubtypeContext:
def __init__(
self,
*,
# Non-proper subtype flags
ignore_type_params: bool = False,
ignore_pos_arg_names: bool = False,
ignore_declared_variance: bool = False,
# Supported for both proper and non-proper
always_covariant: bool = False,
ignore_promotions: bool = False,
# Proper subtype flags
erase_instances: bool = False,
keep_erased_types: bool = False,
options: Options | None = None,
) -> None:
self.ignore_type_params = ignore_type_params
self.ignore_pos_arg_names = ignore_pos_arg_names
self.ignore_declared_variance = ignore_declared_variance
self.always_covariant = always_covariant
self.ignore_promotions = ignore_promotions
self.erase_instances = erase_instances
self.keep_erased_types = keep_erased_types
self.options = options
def check_context(self, proper_subtype: bool) -> None:
# Historically proper and non-proper subtypes were defined using different helpers
# and different visitors. Check if flag values are such that we definitely support.
if proper_subtype:
assert not self.ignore_pos_arg_names and not self.ignore_declared_variance
else:
assert not self.erase_instances and not self.keep_erased_types
def is_subtype(
left: Type,
right: Type,
*,
subtype_context: SubtypeContext | None = None,
ignore_type_params: bool = False,
ignore_pos_arg_names: bool = False,
ignore_declared_variance: bool = False,
always_covariant: bool = False,
ignore_promotions: bool = False,
options: Options | None = None,
) -> bool:
"""Is 'left' subtype of 'right'?
Also consider Any to be a subtype of any type, and vice versa. This
recursively applies to components of composite types (List[int] is subtype
of List[Any], for example).
type_parameter_checker is used to check the type parameters (for example,
A with B in is_subtype(C[A], C[B]). The default checks for subtype relation
between the type arguments (e.g., A and B), taking the variance of the
type var into account.
"""
if subtype_context is None:
subtype_context = SubtypeContext(
ignore_type_params=ignore_type_params,
ignore_pos_arg_names=ignore_pos_arg_names,
ignore_declared_variance=ignore_declared_variance,
always_covariant=always_covariant,
ignore_promotions=ignore_promotions,
options=options,
)
else:
assert not any(
{
ignore_type_params,
ignore_pos_arg_names,
ignore_declared_variance,
always_covariant,
ignore_promotions,
options,
}
), "Don't pass both context and individual flags"
if type_state.is_assumed_subtype(left, right):
return True
if mypy.typeops.is_recursive_pair(left, right):
# This case requires special care because it may cause infinite recursion.
# Our view on recursive types is known under a fancy name of iso-recursive mu-types.
# Roughly this means that a recursive type is defined as an alias where right hand side
# can refer to the type as a whole, for example:
# A = Union[int, Tuple[A, ...]]
# and an alias unrolled once represents the *same type*, in our case all these represent
# the same type:
# A
# Union[int, Tuple[A, ...]]
# Union[int, Tuple[Union[int, Tuple[A, ...]], ...]]
# The algorithm for subtyping is then essentially under the assumption that left <: right,
# check that get_proper_type(left) <: get_proper_type(right). On the example above,
# If we start with:
# A = Union[int, Tuple[A, ...]]
# B = Union[int, Tuple[B, ...]]
# When checking if A <: B we push pair (A, B) onto 'assuming' stack, then when after few
# steps we come back to initial call is_subtype(A, B) and immediately return True.
with pop_on_exit(type_state.get_assumptions(is_proper=False), left, right):
return _is_subtype(left, right, subtype_context, proper_subtype=False)
return _is_subtype(left, right, subtype_context, proper_subtype=False)
def is_proper_subtype(
left: Type,
right: Type,
*,
subtype_context: SubtypeContext | None = None,
ignore_promotions: bool = False,
erase_instances: bool = False,
keep_erased_types: bool = False,
) -> bool:
"""Is left a proper subtype of right?
For proper subtypes, there's no need to rely on compatibility due to
Any types. Every usable type is a proper subtype of itself.
If erase_instances is True, erase left instance *after* mapping it to supertype
(this is useful for runtime isinstance() checks). If keep_erased_types is True,
do not consider ErasedType a subtype of all types (used by type inference against unions).
"""
if subtype_context is None:
subtype_context = SubtypeContext(
ignore_promotions=ignore_promotions,
erase_instances=erase_instances,
keep_erased_types=keep_erased_types,
)
else:
assert not any(
{ignore_promotions, erase_instances, keep_erased_types}
), "Don't pass both context and individual flags"
if type_state.is_assumed_proper_subtype(left, right):
return True
if mypy.typeops.is_recursive_pair(left, right):
# Same as for non-proper subtype, see detailed comment there for explanation.
with pop_on_exit(type_state.get_assumptions(is_proper=True), left, right):
return _is_subtype(left, right, subtype_context, proper_subtype=True)
return _is_subtype(left, right, subtype_context, proper_subtype=True)
def is_equivalent(
a: Type,
b: Type,
*,
ignore_type_params: bool = False,
ignore_pos_arg_names: bool = False,
options: Options | None = None,
subtype_context: SubtypeContext | None = None,
) -> bool:
return is_subtype(
a,
b,
ignore_type_params=ignore_type_params,
ignore_pos_arg_names=ignore_pos_arg_names,
options=options,
subtype_context=subtype_context,
) and is_subtype(
b,
a,
ignore_type_params=ignore_type_params,
ignore_pos_arg_names=ignore_pos_arg_names,
options=options,
subtype_context=subtype_context,
)
def is_same_type(
a: Type, b: Type, ignore_promotions: bool = True, subtype_context: SubtypeContext | None = None
) -> bool:
"""Are these types proper subtypes of each other?
This means types may have different representation (e.g. an alias, or
a non-simplified union) but are semantically exchangeable in all contexts.
"""
# First, use fast path for some common types. This is performance-critical.
if (
type(a) is Instance
and type(b) is Instance
and a.type == b.type
and len(a.args) == len(b.args)
and a.last_known_value is b.last_known_value
):
return all(is_same_type(x, y) for x, y in zip(a.args, b.args))
elif isinstance(a, TypeVarType) and isinstance(b, TypeVarType) and a.id == b.id:
return True
# Note that using ignore_promotions=True (default) makes types like int and int64
# considered not the same type (which is the case at runtime).
# Also Union[bool, int] (if it wasn't simplified before) will be different
# from plain int, etc.
return is_proper_subtype(
a, b, ignore_promotions=ignore_promotions, subtype_context=subtype_context
) and is_proper_subtype(
b, a, ignore_promotions=ignore_promotions, subtype_context=subtype_context
)
# This is a common entry point for subtyping checks (both proper and non-proper).
# Never call this private function directly, use the public versions.
def _is_subtype(
left: Type, right: Type, subtype_context: SubtypeContext, proper_subtype: bool
) -> bool:
subtype_context.check_context(proper_subtype)
orig_right = right
orig_left = left
left = get_proper_type(left)
right = get_proper_type(right)
# Note: Unpack type should not be a subtype of Any, since it may represent
# multiple types. This should always go through the visitor, to check arity.
if (
not proper_subtype
and isinstance(right, (AnyType, UnboundType, ErasedType))
and not isinstance(left, UnpackType)
):
# TODO: should we consider all types proper subtypes of UnboundType and/or
# ErasedType as we do for non-proper subtyping.
return True
if isinstance(right, UnionType) and not isinstance(left, UnionType):
# Normally, when 'left' is not itself a union, the only way
# 'left' can be a subtype of the union 'right' is if it is a
# subtype of one of the items making up the union.
if proper_subtype:
is_subtype_of_item = any(
is_proper_subtype(orig_left, item, subtype_context=subtype_context)
for item in right.items
)
else:
is_subtype_of_item = any(
is_subtype(orig_left, item, subtype_context=subtype_context)
for item in right.items
)
# Recombine rhs literal types, to make an enum type a subtype
# of a union of all enum items as literal types. Only do it if
# the previous check didn't succeed, since recombining can be
# expensive.
# `bool` is a special case, because `bool` is `Literal[True, False]`.
if (
not is_subtype_of_item
and isinstance(left, Instance)
and (left.type.is_enum or left.type.fullname == "builtins.bool")
):
right = UnionType(mypy.typeops.try_contracting_literals_in_union(right.items))
if proper_subtype:
is_subtype_of_item = any(
is_proper_subtype(orig_left, item, subtype_context=subtype_context)
for item in right.items
)
else:
is_subtype_of_item = any(
is_subtype(orig_left, item, subtype_context=subtype_context)
for item in right.items
)
# However, if 'left' is a type variable T, T might also have
# an upper bound which is itself a union. This case will be
# handled below by the SubtypeVisitor. We have to check both
# possibilities, to handle both cases like T <: Union[T, U]
# and cases like T <: B where B is the upper bound of T and is
# a union. (See #2314.)
if not isinstance(left, TypeVarType):
return is_subtype_of_item
elif is_subtype_of_item:
return True
# otherwise, fall through
return left.accept(SubtypeVisitor(orig_right, subtype_context, proper_subtype))
def check_type_parameter(
left: Type, right: Type, variance: int, proper_subtype: bool, subtype_context: SubtypeContext
) -> bool:
# It is safe to consider empty collection literals and similar as covariant, since
# such type can't be stored in a variable, see checker.is_valid_inferred_type().
if variance == INVARIANT:
p_left = get_proper_type(left)
if isinstance(p_left, UninhabitedType) and p_left.ambiguous:
variance = COVARIANT
# If variance hasn't been inferred yet, we are lenient and default to
# covariance. This shouldn't happen often, but it's very difficult to
# avoid these cases altogether.
if variance == COVARIANT or variance == VARIANCE_NOT_READY:
if proper_subtype:
return is_proper_subtype(left, right, subtype_context=subtype_context)
else:
return is_subtype(left, right, subtype_context=subtype_context)
elif variance == CONTRAVARIANT:
if proper_subtype:
return is_proper_subtype(right, left, subtype_context=subtype_context)
else:
return is_subtype(right, left, subtype_context=subtype_context)
else:
if proper_subtype:
# We pass ignore_promotions=False because it is a default for subtype checks.
# The actual value will be taken from the subtype_context, and it is whatever
# the original caller passed.
return is_same_type(
left, right, ignore_promotions=False, subtype_context=subtype_context
)
else:
return is_equivalent(left, right, subtype_context=subtype_context)
class SubtypeVisitor(TypeVisitor[bool]):
def __init__(self, right: Type, subtype_context: SubtypeContext, proper_subtype: bool) -> None:
self.right = get_proper_type(right)
self.orig_right = right
self.proper_subtype = proper_subtype
self.subtype_context = subtype_context
self.options = subtype_context.options
self._subtype_kind = SubtypeVisitor.build_subtype_kind(subtype_context, proper_subtype)
@staticmethod
def build_subtype_kind(subtype_context: SubtypeContext, proper_subtype: bool) -> SubtypeKind:
return (
state.strict_optional,
proper_subtype,
subtype_context.ignore_type_params,
subtype_context.ignore_pos_arg_names,
subtype_context.ignore_declared_variance,
subtype_context.always_covariant,
subtype_context.ignore_promotions,
subtype_context.erase_instances,
subtype_context.keep_erased_types,
)
def _is_subtype(self, left: Type, right: Type) -> bool:
if self.proper_subtype:
return is_proper_subtype(left, right, subtype_context=self.subtype_context)
return is_subtype(left, right, subtype_context=self.subtype_context)
# visit_x(left) means: is left (which is an instance of X) a subtype of right?
def visit_unbound_type(self, left: UnboundType) -> bool:
# This can be called if there is a bad type annotation. The result probably
# doesn't matter much but by returning True we simplify these bad types away
# from unions, which could filter out some bogus messages.
return True
def visit_any(self, left: AnyType) -> bool:
return isinstance(self.right, AnyType) if self.proper_subtype else True
def visit_none_type(self, left: NoneType) -> bool:
if state.strict_optional:
if isinstance(self.right, NoneType) or is_named_instance(
self.right, "builtins.object"
):
return True
if isinstance(self.right, Instance) and self.right.type.is_protocol:
members = self.right.type.protocol_members
# None is compatible with Hashable (and other similar protocols). This is
# slightly sloppy since we don't check the signature of "__hash__".
# None is also compatible with `SupportsStr` protocol.
return not members or all(member in ("__hash__", "__str__") for member in members)
return False
else:
return True
def visit_uninhabited_type(self, left: UninhabitedType) -> bool:
return True
def visit_erased_type(self, left: ErasedType) -> bool:
# This may be encountered during type inference. The result probably doesn't
# matter much.
# TODO: it actually does matter, figure out more principled logic about this.
return not self.subtype_context.keep_erased_types
def visit_deleted_type(self, left: DeletedType) -> bool:
return True
def visit_instance(self, left: Instance) -> bool:
if left.type.fallback_to_any and not self.proper_subtype:
# NOTE: `None` is a *non-subclassable* singleton, therefore no class
# can by a subtype of it, even with an `Any` fallback.
# This special case is needed to treat descriptors in classes with
# dynamic base classes correctly, see #5456.
return not isinstance(self.right, NoneType)
right = self.right
if isinstance(right, TupleType) and right.partial_fallback.type.is_enum:
return self._is_subtype(left, mypy.typeops.tuple_fallback(right))
if isinstance(right, TupleType):
if len(right.items) == 1:
# Non-normalized Tuple type (may be left after semantic analysis
# because semanal_typearg visitor is not a type translator).
item = right.items[0]
if isinstance(item, UnpackType):
unpacked = get_proper_type(item.type)
if isinstance(unpacked, Instance):
return self._is_subtype(left, unpacked)
if left.type.has_base(right.partial_fallback.type.fullname):
if not self.proper_subtype:
# Special case to consider Foo[*tuple[Any, ...]] (i.e. bare Foo) a
# subtype of Foo[<whatever>], when Foo is user defined variadic tuple type.
mapped = map_instance_to_supertype(left, right.partial_fallback.type)
for arg in map(get_proper_type, mapped.args):
if isinstance(arg, UnpackType):
unpacked = get_proper_type(arg.type)
if not isinstance(unpacked, Instance):
break
assert unpacked.type.fullname == "builtins.tuple"
if not isinstance(get_proper_type(unpacked.args[0]), AnyType):
break
elif not isinstance(arg, AnyType):
break
else:
return True
return False
if isinstance(right, TypeVarTupleType):
# tuple[Any, ...] is like Any in the world of tuples (see special case above).
if left.type.has_base("builtins.tuple"):
mapped = map_instance_to_supertype(left, right.tuple_fallback.type)
if isinstance(get_proper_type(mapped.args[0]), AnyType):
return not self.proper_subtype
if isinstance(right, Instance):
if type_state.is_cached_subtype_check(self._subtype_kind, left, right):
return True
if type_state.is_cached_negative_subtype_check(self._subtype_kind, left, right):
return False
if not self.subtype_context.ignore_promotions:
for base in left.type.mro:
if base._promote and any(
self._is_subtype(p, self.right) for p in base._promote
):
type_state.record_subtype_cache_entry(self._subtype_kind, left, right)
return True
# Special case: Low-level integer types are compatible with 'int'. We can't
# use promotions, since 'int' is already promoted to low-level integer types,
# and we can't have circular promotions.
if left.type.alt_promote and left.type.alt_promote.type is right.type:
return True
rname = right.type.fullname
# Always try a nominal check if possible,
# there might be errors that a user wants to silence *once*.
# NamedTuples are a special case, because `NamedTuple` is not listed
# in `TypeInfo.mro`, so when `(a: NamedTuple) -> None` is used,
# we need to check for `is_named_tuple` property
if (
left.type.has_base(rname)
or rname == "builtins.object"
or (
rname in TYPED_NAMEDTUPLE_NAMES
and any(l.is_named_tuple for l in left.type.mro)
)
) and not self.subtype_context.ignore_declared_variance:
# Map left type to corresponding right instances.
t = map_instance_to_supertype(left, right.type)
if self.subtype_context.erase_instances:
erased = erase_type(t)
assert isinstance(erased, Instance)
t = erased
nominal = True
if right.type.has_type_var_tuple_type:
# For variadic instances we simply find the correct type argument mappings,
# all the heavy lifting is done by the tuple subtyping.
assert right.type.type_var_tuple_prefix is not None
assert right.type.type_var_tuple_suffix is not None
prefix = right.type.type_var_tuple_prefix
suffix = right.type.type_var_tuple_suffix
tvt = right.type.defn.type_vars[prefix]
assert isinstance(tvt, TypeVarTupleType)
fallback = tvt.tuple_fallback
left_prefix, left_middle, left_suffix = split_with_prefix_and_suffix(
t.args, prefix, suffix
)
right_prefix, right_middle, right_suffix = split_with_prefix_and_suffix(
right.args, prefix, suffix
)
left_args = (
left_prefix + (TupleType(list(left_middle), fallback),) + left_suffix
)
right_args = (
right_prefix + (TupleType(list(right_middle), fallback),) + right_suffix
)
if not self.proper_subtype and t.args:
for arg in map(get_proper_type, t.args):
if isinstance(arg, UnpackType):
unpacked = get_proper_type(arg.type)
if not isinstance(unpacked, Instance):
break
assert unpacked.type.fullname == "builtins.tuple"
if not isinstance(get_proper_type(unpacked.args[0]), AnyType):
break
elif not isinstance(arg, AnyType):
break
else:
return True
if len(left_args) != len(right_args):
return False
type_params = zip(left_args, right_args, right.type.defn.type_vars)
else:
type_params = zip(t.args, right.args, right.type.defn.type_vars)
if not self.subtype_context.ignore_type_params:
tried_infer = False
for lefta, righta, tvar in type_params:
if isinstance(tvar, TypeVarType):
if tvar.variance == VARIANCE_NOT_READY and not tried_infer:
infer_class_variances(right.type)
tried_infer = True
if (
self.subtype_context.always_covariant
and tvar.variance == INVARIANT
):
variance = COVARIANT
else:
variance = tvar.variance
if not check_type_parameter(
lefta, righta, variance, self.proper_subtype, self.subtype_context
):
nominal = False
else:
# TODO: everywhere else ParamSpecs are handled as invariant.
if not check_type_parameter(
lefta, righta, COVARIANT, self.proper_subtype, self.subtype_context
):
nominal = False
if nominal:
type_state.record_subtype_cache_entry(self._subtype_kind, left, right)
else:
type_state.record_negative_subtype_cache_entry(self._subtype_kind, left, right)
return nominal
if right.type.is_protocol and is_protocol_implementation(
left, right, proper_subtype=self.proper_subtype, options=self.options
):
return True
# We record negative cache entry here, and not in the protocol check like we do for
# positive cache, to avoid accidentally adding a type that is not a structural
# subtype, but is a nominal subtype (involving type: ignore override).
type_state.record_negative_subtype_cache_entry(self._subtype_kind, left, right)
return False
if isinstance(right, TypeType):
item = right.item
if isinstance(item, TupleType):
item = mypy.typeops.tuple_fallback(item)
# TODO: this is a bit arbitrary, we should only skip Any-related cases.
if not self.proper_subtype:
if is_named_instance(left, "builtins.type"):
return self._is_subtype(TypeType(AnyType(TypeOfAny.special_form)), right)
if left.type.is_metaclass():
if isinstance(item, AnyType):
return True
if isinstance(item, Instance):
return is_named_instance(item, "builtins.object")
if isinstance(right, LiteralType) and left.last_known_value is not None:
return self._is_subtype(left.last_known_value, right)
if isinstance(right, CallableType):
# Special case: Instance can be a subtype of Callable.
call = find_member("__call__", left, left, is_operator=True)
if call:
return self._is_subtype(call, right)
return False
else:
return False
def visit_type_var(self, left: TypeVarType) -> bool:
right = self.right
if isinstance(right, TypeVarType) and left.id == right.id:
return True
if left.values and self._is_subtype(UnionType.make_union(left.values), right):
return True
return self._is_subtype(left.upper_bound, self.right)
def visit_param_spec(self, left: ParamSpecType) -> bool:
right = self.right
if (
isinstance(right, ParamSpecType)
and right.id == left.id
and right.flavor == left.flavor
):
return self._is_subtype(left.prefix, right.prefix)
if isinstance(right, Parameters) and are_trivial_parameters(right):
return True
return self._is_subtype(left.upper_bound, self.right)
def visit_type_var_tuple(self, left: TypeVarTupleType) -> bool:
right = self.right
if isinstance(right, TypeVarTupleType) and right.id == left.id:
return left.min_len >= right.min_len
return self._is_subtype(left.upper_bound, self.right)
def visit_unpack_type(self, left: UnpackType) -> bool:
# TODO: Ideally we should not need this (since it is not a real type).
# Instead callers (upper level types) should handle it when it appears in type list.
if isinstance(self.right, UnpackType):
return self._is_subtype(left.type, self.right.type)
if isinstance(self.right, Instance) and self.right.type.fullname == "builtins.object":
return True
return False
def visit_parameters(self, left: Parameters) -> bool:
if isinstance(self.right, Parameters):
return are_parameters_compatible(
left,
self.right,
is_compat=self._is_subtype,
# TODO: this should pass the current value, but then couple tests fail.
is_proper_subtype=False,
ignore_pos_arg_names=self.subtype_context.ignore_pos_arg_names,
)
elif isinstance(self.right, Instance):
return self.right.type.fullname == "builtins.object"
else:
return False
def visit_callable_type(self, left: CallableType) -> bool:
right = self.right
if isinstance(right, CallableType):
if left.type_guard is not None and right.type_guard is not None:
if not self._is_subtype(left.type_guard, right.type_guard):
return False
elif left.type_is is not None and right.type_is is not None:
# For TypeIs we have to check both ways; it is unsafe to pass
# a TypeIs[Child] when a TypeIs[Parent] is expected, because
# if the narrower returns False, we assume that the narrowed value is
# *not* a Parent.
if not self._is_subtype(left.type_is, right.type_is) or not self._is_subtype(
right.type_is, left.type_is
):
return False
elif right.type_guard is not None and left.type_guard is None:
# This means that one function has `TypeGuard` and other does not.
# They are not compatible. See https://github.com/python/mypy/issues/11307
return False
elif right.type_is is not None and left.type_is is None:
# Similarly, if one function has `TypeIs` and the other does not,
# they are not compatible.
return False
return is_callable_compatible(
left,
right,
is_compat=self._is_subtype,
is_proper_subtype=self.proper_subtype,
ignore_pos_arg_names=self.subtype_context.ignore_pos_arg_names,
strict_concatenate=(
(self.options.extra_checks or self.options.strict_concatenate)
if self.options
else False
),
)
elif isinstance(right, Overloaded):
return all(self._is_subtype(left, item) for item in right.items)
elif isinstance(right, Instance):
if right.type.is_protocol and "__call__" in right.type.protocol_members:
# OK, a callable can implement a protocol with a `__call__` member.
# TODO: we should probably explicitly exclude self-types in this case.
call = find_member("__call__", right, left, is_operator=True)
assert call is not None
if self._is_subtype(left, call):
if len(right.type.protocol_members) == 1:
return True
if is_protocol_implementation(left.fallback, right, skip=["__call__"]):
return True
if right.type.is_protocol and left.is_type_obj():
ret_type = get_proper_type(left.ret_type)
if isinstance(ret_type, TupleType):
ret_type = mypy.typeops.tuple_fallback(ret_type)
if isinstance(ret_type, Instance) and is_protocol_implementation(
ret_type, right, proper_subtype=self.proper_subtype, class_obj=True
):
return True
return self._is_subtype(left.fallback, right)
elif isinstance(right, TypeType):
# This is unsound, we don't check the __init__ signature.
return left.is_type_obj() and self._is_subtype(left.ret_type, right.item)
else:
return False
def visit_tuple_type(self, left: TupleType) -> bool:
right = self.right
if isinstance(right, Instance):
if is_named_instance(right, "typing.Sized"):
return True
elif is_named_instance(right, TUPLE_LIKE_INSTANCE_NAMES):
if right.args:
iter_type = right.args[0]
else:
if self.proper_subtype:
return False
iter_type = AnyType(TypeOfAny.special_form)
if is_named_instance(right, "builtins.tuple") and isinstance(
get_proper_type(iter_type), AnyType
):
# TODO: We shouldn't need this special case. This is currently needed
# for isinstance(x, tuple), though it's unclear why.
return True
for li in left.items:
if isinstance(li, UnpackType):
unpack = get_proper_type(li.type)
if isinstance(unpack, TypeVarTupleType):
unpack = get_proper_type(unpack.upper_bound)
assert (
isinstance(unpack, Instance)
and unpack.type.fullname == "builtins.tuple"
)
li = unpack.args[0]
if not self._is_subtype(li, iter_type):
return False
return True
elif self._is_subtype(left.partial_fallback, right) and self._is_subtype(
mypy.typeops.tuple_fallback(left), right
):
return True
return False
elif isinstance(right, TupleType):
# If right has a variadic unpack this needs special handling. If there is a TypeVarTuple
# unpack, item count must coincide. If the left has variadic unpack but right
# doesn't have one, we will fall through to False down the line.
if self.variadic_tuple_subtype(left, right):
return True
if len(left.items) != len(right.items):
return False
if any(not self._is_subtype(l, r) for l, r in zip(left.items, right.items)):
return False
if is_named_instance(right.partial_fallback, "builtins.tuple"):
# No need to verify fallback. This is useful since the calculated fallback
# may be inconsistent due to how we calculate joins between unions vs.
# non-unions. For example, join(int, str) == object, whereas
# join(Union[int, C], Union[str, C]) == Union[int, str, C].
return True
if is_named_instance(left.partial_fallback, "builtins.tuple"):
# Again, no need to verify. At this point we know the right fallback
# is a subclass of tuple, so if left is plain tuple, it cannot be a subtype.
return False
# At this point we know both fallbacks are non-tuple.
return self._is_subtype(left.partial_fallback, right.partial_fallback)
else:
return False
def variadic_tuple_subtype(self, left: TupleType, right: TupleType) -> bool:
"""Check subtyping between two potentially variadic tuples.
Most non-trivial cases here are due to variadic unpacks like *tuple[X, ...],
we handle such unpacks as infinite unions Tuple[()] | Tuple[X] | Tuple[X, X] | ...
Note: the cases where right is fixed or has *Ts unpack should be handled
by the caller.
"""
right_unpack_index = find_unpack_in_list(right.items)
if right_unpack_index is None:
# This case should be handled by the caller.
return False
right_unpack = right.items[right_unpack_index]
assert isinstance(right_unpack, UnpackType)
right_unpacked = get_proper_type(right_unpack.type)
if not isinstance(right_unpacked, Instance):
# This case should be handled by the caller.
return False
assert right_unpacked.type.fullname == "builtins.tuple"
right_item = right_unpacked.args[0]
right_prefix = right_unpack_index
right_suffix = len(right.items) - right_prefix - 1
left_unpack_index = find_unpack_in_list(left.items)
if left_unpack_index is None:
# Simple case: left is fixed, simply find correct mapping to the right
# (effectively selecting item with matching length from an infinite union).
if len(left.items) < right_prefix + right_suffix:
return False
prefix, middle, suffix = split_with_prefix_and_suffix(
tuple(left.items), right_prefix, right_suffix
)
if not all(
self._is_subtype(li, ri) for li, ri in zip(prefix, right.items[:right_prefix])
):
return False
if right_suffix and not all(
self._is_subtype(li, ri) for li, ri in zip(suffix, right.items[-right_suffix:])
):
return False
return all(self._is_subtype(li, right_item) for li in middle)
else:
if len(left.items) < len(right.items):
# There are some items on the left that will never have a matching length
# on the right.
return False
left_unpack = left.items[left_unpack_index]
assert isinstance(left_unpack, UnpackType)
left_unpacked = get_proper_type(left_unpack.type)
if not isinstance(left_unpacked, Instance):
# *Ts unpacks can't be split.
return False
assert left_unpacked.type.fullname == "builtins.tuple"
left_item = left_unpacked.args[0]
# The most tricky case with two variadic unpacks we handle similar to union
# subtyping: *each* item on the left, must be a subtype of *some* item on the right.
# For this we first check the "asymptotic case", i.e. that both unpacks a subtypes,
# and then check subtyping for all finite overlaps.
if not self._is_subtype(left_item, right_item):
return False
left_prefix = left_unpack_index
left_suffix = len(left.items) - left_prefix - 1
max_overlap = max(0, right_prefix - left_prefix, right_suffix - left_suffix)
for overlap in range(max_overlap + 1):
repr_items = left.items[:left_prefix] + [left_item] * overlap
if left_suffix:
repr_items += left.items[-left_suffix:]
left_repr = left.copy_modified(items=repr_items)
if not self._is_subtype(left_repr, right):
return False
return True
def visit_typeddict_type(self, left: TypedDictType) -> bool:
right = self.right
if isinstance(right, Instance):
return self._is_subtype(left.fallback, right)
elif isinstance(right, TypedDictType):
if left == right:
return True # Fast path
if not left.names_are_wider_than(right):
return False
for name, l, r in left.zip(right):
# TODO: should we pass on the full subtype_context here and below?
right_readonly = name in right.readonly_keys
if not right_readonly:
if self.proper_subtype:
check = is_same_type(l, r)
else:
check = is_equivalent(
l,
r,
ignore_type_params=self.subtype_context.ignore_type_params,
options=self.options,
)
else:
# Read-only items behave covariantly
check = self._is_subtype(l, r)
if not check:
return False
# Non-required key is not compatible with a required key since
# indexing may fail unexpectedly if a required key is missing.
# Required key is not compatible with a non-required key since
# the prior doesn't support 'del' but the latter should support
# it.
#
# NOTE: 'del' support is currently not implemented (#3550). We
# don't want to have to change subtyping after 'del' support
# lands so here we are anticipating that change.
if (name in left.required_keys) != (name in right.required_keys):
return False
# Readonly fields check:
#
# A = TypedDict('A', {'x': ReadOnly[int]})
# B = TypedDict('B', {'x': int})
# def reset_x(b: B) -> None:
# b['x'] = 0
#
# So, `A` cannot be a subtype of `B`, while `B` can be a subtype of `A`,
# because you can use `B` everywhere you use `A`, but not the other way around.
if name in left.readonly_keys and name not in right.readonly_keys:
return False
# (NOTE: Fallbacks don't matter.)
return True
else:
return False
def visit_literal_type(self, left: LiteralType) -> bool:
if isinstance(self.right, LiteralType):
return left == self.right
else:
return self._is_subtype(left.fallback, self.right)
def visit_overloaded(self, left: Overloaded) -> bool:
right = self.right
if isinstance(right, Instance):
if right.type.is_protocol and "__call__" in right.type.protocol_members:
# same as for CallableType
call = find_member("__call__", right, left, is_operator=True)
assert call is not None
if self._is_subtype(left, call):
if len(right.type.protocol_members) == 1:
return True
if is_protocol_implementation(left.fallback, right, skip=["__call__"]):
return True
return self._is_subtype(left.fallback, right)
elif isinstance(right, CallableType):
for item in left.items:
if self._is_subtype(item, right):
return True
return False
elif isinstance(right, Overloaded):
if left == self.right:
# When it is the same overload, then the types are equal.
return True
# Ensure each overload on the right side (the supertype) is accounted for.
previous_match_left_index = -1
matched_overloads = set()
for right_item in right.items:
found_match = False
for left_index, left_item in enumerate(left.items):
subtype_match = self._is_subtype(left_item, right_item)
# Order matters: we need to make sure that the index of
# this item is at least the index of the previous one.
if subtype_match and previous_match_left_index <= left_index:
previous_match_left_index = left_index
found_match = True
matched_overloads.add(left_index)
break
else:
# If this one overlaps with the supertype in any way, but it wasn't
# an exact match, then it's a potential error.
strict_concat = (
(self.options.extra_checks or self.options.strict_concatenate)
if self.options
else False
)
if left_index not in matched_overloads and (
is_callable_compatible(
left_item,
right_item,
is_compat=self._is_subtype,
is_proper_subtype=self.proper_subtype,
ignore_return=True,
ignore_pos_arg_names=self.subtype_context.ignore_pos_arg_names,
strict_concatenate=strict_concat,
)
or is_callable_compatible(
right_item,
left_item,
is_compat=self._is_subtype,
is_proper_subtype=self.proper_subtype,
ignore_return=True,
ignore_pos_arg_names=self.subtype_context.ignore_pos_arg_names,
strict_concatenate=strict_concat,
)
):
return False
if not found_match:
return False
return True
elif isinstance(right, UnboundType):
return True
elif isinstance(right, TypeType):
# All the items must have the same type object status, so
# it's sufficient to query only (any) one of them.
# This is unsound, we don't check all the __init__ signatures.
return left.is_type_obj() and self._is_subtype(left.items[0], right)
else:
return False
def visit_union_type(self, left: UnionType) -> bool:
if isinstance(self.right, Instance):
literal_types: set[Instance] = set()
# avoid redundant check for union of literals
for item in left.relevant_items():
p_item = get_proper_type(item)
lit_type = mypy.typeops.simple_literal_type(p_item)
if lit_type is not None:
if lit_type in literal_types:
continue
literal_types.add(lit_type)
item = lit_type
if not self._is_subtype(item, self.orig_right):
return False
return True
elif isinstance(self.right, UnionType):
# prune literals early to avoid nasty quadratic behavior which would otherwise arise when checking
# subtype relationships between slightly different narrowings of an Enum
# we achieve O(N+M) instead of O(N*M)
fast_check: set[ProperType] = set()
for item in flatten_types(self.right.relevant_items()):
p_item = get_proper_type(item)
fast_check.add(p_item)
if isinstance(p_item, Instance) and p_item.last_known_value is not None:
fast_check.add(p_item.last_known_value)
for item in left.relevant_items():
p_item = get_proper_type(item)
if p_item in fast_check:
continue
lit_type = mypy.typeops.simple_literal_type(p_item)
if lit_type in fast_check:
continue
if not self._is_subtype(item, self.orig_right):
return False
return True
return all(self._is_subtype(item, self.orig_right) for item in left.items)
def visit_partial_type(self, left: PartialType) -> bool:
# This is indeterminate as we don't really know the complete type yet.
if self.proper_subtype:
# TODO: What's the right thing to do here?
return False
if left.type is None:
# Special case, partial `None`. This might happen when defining
# class-level attributes with explicit `None`.
# We can still recover from this.
# https://github.com/python/mypy/issues/11105
return self.visit_none_type(NoneType())
raise RuntimeError(f'Partial type "{left}" cannot be checked with "issubtype()"')
def visit_type_type(self, left: TypeType) -> bool:
right = self.right
if isinstance(right, TypeType):
return self._is_subtype(left.item, right.item)
if isinstance(right, CallableType):
if self.proper_subtype and not right.is_type_obj():
# We can't accept `Type[X]` as a *proper* subtype of Callable[P, X]
# since this will break transitivity of subtyping.
return False
# This is unsound, we don't check the __init__ signature.
return self._is_subtype(left.item, right.ret_type)
if isinstance(right, Instance):
if right.type.fullname in ["builtins.object", "builtins.type"]:
# TODO: Strictly speaking, the type builtins.type is considered equivalent to
# Type[Any]. However, this would break the is_proper_subtype check in
# conditional_types for cases like isinstance(x, type) when the type
# of x is Type[int]. It's unclear what's the right way to address this.
return True
item = left.item
if isinstance(item, TypeVarType):
item = get_proper_type(item.upper_bound)
if isinstance(item, Instance):
if right.type.is_protocol and is_protocol_implementation(
item, right, proper_subtype=self.proper_subtype, class_obj=True
):
return True
metaclass = item.type.metaclass_type
return metaclass is not None and self._is_subtype(metaclass, right)
return False
def visit_type_alias_type(self, left: TypeAliasType) -> bool:
assert False, f"This should be never called, got {left}"
T = TypeVar("T", bound=Type)
@contextmanager
def pop_on_exit(stack: list[tuple[T, T]], left: T, right: T) -> Iterator[None]:
stack.append((left, right))
yield
stack.pop()
def is_protocol_implementation(
left: Instance,
right: Instance,
proper_subtype: bool = False,
class_obj: bool = False,
skip: list[str] | None = None,
options: Options | None = None,
) -> bool:
"""Check whether 'left' implements the protocol 'right'.
If 'proper_subtype' is True, then check for a proper subtype.
Treat recursive protocols by using the 'assuming' structural subtype matrix
(in sparse representation, i.e. as a list of pairs (subtype, supertype)),
see also comment in nodes.TypeInfo. When we enter a check for classes
(A, P), defined as following::
class P(Protocol):
def f(self) -> P: ...
class A:
def f(self) -> A: ...
this results in A being a subtype of P without infinite recursion.
On every false result, we pop the assumption, thus avoiding an infinite recursion
as well.
"""
assert right.type.is_protocol
if skip is None:
skip = []
# We need to record this check to generate protocol fine-grained dependencies.
type_state.record_protocol_subtype_check(left.type, right.type)
# nominal subtyping currently ignores '__init__' and '__new__' signatures
members_not_to_check = {"__init__", "__new__"}
members_not_to_check.update(skip)
# Trivial check that circumvents the bug described in issue 9771:
if left.type.is_protocol:
members_right = set(right.type.protocol_members) - members_not_to_check
members_left = set(left.type.protocol_members) - members_not_to_check
if not members_right.issubset(members_left):
return False
assuming = right.type.assuming_proper if proper_subtype else right.type.assuming
for l, r in reversed(assuming):
if l == left and r == right:
return True
with pop_on_exit(assuming, left, right):
for member in right.type.protocol_members:
if member in members_not_to_check:
continue
ignore_names = member != "__call__" # __call__ can be passed kwargs
# The third argument below indicates to what self type is bound.
# We always bind self to the subtype. (Similarly to nominal types).
supertype = get_proper_type(find_member(member, right, left))
assert supertype is not None
subtype = mypy.typeops.get_protocol_member(left, member, class_obj)
# Useful for debugging:
# print(member, 'of', left, 'has type', subtype)
# print(member, 'of', right, 'has type', supertype)
if not subtype:
return False
if isinstance(subtype, PartialType):
subtype = (
NoneType()
if subtype.type is None
else Instance(
subtype.type,
[AnyType(TypeOfAny.unannotated)] * len(subtype.type.type_vars),
)
)
if not proper_subtype:
# Nominal check currently ignores arg names
# NOTE: If we ever change this, be sure to also change the call to
# SubtypeVisitor.build_subtype_kind(...) down below.
is_compat = is_subtype(
subtype, supertype, ignore_pos_arg_names=ignore_names, options=options
)
else:
is_compat = is_proper_subtype(subtype, supertype)
if not is_compat:
return False
if isinstance(subtype, NoneType) and isinstance(supertype, CallableType):
# We want __hash__ = None idiom to work even without --strict-optional
return False
subflags = get_member_flags(member, left, class_obj=class_obj)
superflags = get_member_flags(member, right)
if IS_SETTABLE in superflags:
# Check opposite direction for settable attributes.
if not is_subtype(supertype, subtype, options=options):
return False
if not class_obj:
if IS_SETTABLE not in superflags:
if IS_CLASSVAR in superflags and IS_CLASSVAR not in subflags:
return False
elif (IS_CLASSVAR in subflags) != (IS_CLASSVAR in superflags):
return False
else:
if IS_VAR in superflags and IS_CLASSVAR not in subflags:
# Only class variables are allowed for class object access.
return False
if IS_CLASSVAR in superflags:
# This can be never matched by a class object.
return False
if IS_SETTABLE in superflags and IS_SETTABLE not in subflags:
return False
# This rule is copied from nominal check in checker.py
if IS_CLASS_OR_STATIC in superflags and IS_CLASS_OR_STATIC not in subflags:
return False
if not proper_subtype:
# Nominal check currently ignores arg names, but __call__ is special for protocols
ignore_names = right.type.protocol_members != ["__call__"]
else:
ignore_names = False
subtype_kind = SubtypeVisitor.build_subtype_kind(
subtype_context=SubtypeContext(ignore_pos_arg_names=ignore_names),
proper_subtype=proper_subtype,
)
type_state.record_subtype_cache_entry(subtype_kind, left, right)
return True
def find_member(
name: str, itype: Instance, subtype: Type, is_operator: bool = False, class_obj: bool = False
) -> Type | None:
"""Find the type of member by 'name' in 'itype's TypeInfo.
Find the member type after applying type arguments from 'itype', and binding
'self' to 'subtype'. Return None if member was not found.
"""
# TODO: this code shares some logic with checkmember.analyze_member_access,
# consider refactoring.
info = itype.type
method = info.get_method(name)
if method:
if isinstance(method, Decorator):
return find_node_type(method.var, itype, subtype, class_obj=class_obj)
if method.is_property:
assert isinstance(method, OverloadedFuncDef)
dec = method.items[0]
assert isinstance(dec, Decorator)
return find_node_type(dec.var, itype, subtype, class_obj=class_obj)
return find_node_type(method, itype, subtype, class_obj=class_obj)
else:
# don't have such method, maybe variable or decorator?
node = info.get(name)
v = node.node if node else None
if isinstance(v, Var):
return find_node_type(v, itype, subtype, class_obj=class_obj)
if (
not v
and name not in ["__getattr__", "__setattr__", "__getattribute__"]
and not is_operator
and not class_obj
and itype.extra_attrs is None # skip ModuleType.__getattr__
):
for method_name in ("__getattribute__", "__getattr__"):
# Normally, mypy assumes that instances that define __getattr__ have all
# attributes with the corresponding return type. If this will produce
# many false negatives, then this could be prohibited for
# structural subtyping.
method = info.get_method(method_name)
if method and method.info.fullname != "builtins.object":
if isinstance(method, Decorator):
getattr_type = get_proper_type(find_node_type(method.var, itype, subtype))
else:
getattr_type = get_proper_type(find_node_type(method, itype, subtype))
if isinstance(getattr_type, CallableType):
return getattr_type.ret_type
return getattr_type
if itype.type.fallback_to_any or class_obj and itype.type.meta_fallback_to_any:
return AnyType(TypeOfAny.special_form)
if isinstance(v, TypeInfo):
# PEP 544 doesn't specify anything about such use cases. So we just try
# to do something meaningful (at least we should not crash).
return TypeType(fill_typevars_with_any(v))
if itype.extra_attrs and name in itype.extra_attrs.attrs:
return itype.extra_attrs.attrs[name]
return None
def get_member_flags(name: str, itype: Instance, class_obj: bool = False) -> set[int]:
"""Detect whether a member 'name' is settable, whether it is an
instance or class variable, and whether it is class or static method.
The flags are defined as following:
* IS_SETTABLE: whether this attribute can be set, not set for methods and
non-settable properties;
* IS_CLASSVAR: set if the variable is annotated as 'x: ClassVar[t]';
* IS_CLASS_OR_STATIC: set for methods decorated with @classmethod or
with @staticmethod.
"""
info = itype.type
method = info.get_method(name)
setattr_meth = info.get_method("__setattr__")
if method:
if isinstance(method, Decorator):
if method.var.is_staticmethod or method.var.is_classmethod:
return {IS_CLASS_OR_STATIC}
elif method.var.is_property:
return {IS_VAR}
elif method.is_property: # this could be settable property
assert isinstance(method, OverloadedFuncDef)
dec = method.items[0]
assert isinstance(dec, Decorator)
if dec.var.is_settable_property or setattr_meth:
return {IS_VAR, IS_SETTABLE}
else:
return {IS_VAR}
return set() # Just a regular method
node = info.get(name)
if not node:
if setattr_meth:
return {IS_SETTABLE}
if itype.extra_attrs and name in itype.extra_attrs.attrs:
flags = set()
if name not in itype.extra_attrs.immutable:
flags.add(IS_SETTABLE)
return flags
return set()
v = node.node
# just a variable
if isinstance(v, Var):
if v.is_property:
return {IS_VAR}
flags = {IS_VAR}
if not v.is_final:
flags.add(IS_SETTABLE)
if v.is_classvar:
flags.add(IS_CLASSVAR)
if class_obj and v.is_inferred:
flags.add(IS_CLASSVAR)
return flags
return set()
def find_node_type(
node: Var | FuncBase, itype: Instance, subtype: Type, class_obj: bool = False
) -> Type:
"""Find type of a variable or method 'node' (maybe also a decorated method).
Apply type arguments from 'itype', and bind 'self' to 'subtype'.
"""
from mypy.typeops import bind_self
if isinstance(node, FuncBase):
typ: Type | None = mypy.typeops.function_type(
node, fallback=Instance(itype.type.mro[-1], [])
)
else:
typ = node.type
if typ is not None:
typ = expand_self_type(node, typ, subtype)
p_typ = get_proper_type(typ)
if typ is None:
return AnyType(TypeOfAny.from_error)
# We don't need to bind 'self' for static methods, since there is no 'self'.
if isinstance(node, FuncBase) or (
isinstance(p_typ, FunctionLike)
and node.is_initialized_in_class
and not node.is_staticmethod
):
assert isinstance(p_typ, FunctionLike)
if class_obj and not (
node.is_class if isinstance(node, FuncBase) else node.is_classmethod
):
# Don't bind instance methods on class objects.
signature = p_typ
else:
signature = bind_self(
p_typ, subtype, is_classmethod=isinstance(node, Var) and node.is_classmethod
)
if node.is_property and not class_obj:
assert isinstance(signature, CallableType)
typ = signature.ret_type
else:
typ = signature
itype = map_instance_to_supertype(itype, node.info)
typ = expand_type_by_instance(typ, itype)
return typ
def non_method_protocol_members(tp: TypeInfo) -> list[str]:
"""Find all non-callable members of a protocol."""
assert tp.is_protocol
result: list[str] = []
anytype = AnyType(TypeOfAny.special_form)
instance = Instance(tp, [anytype] * len(tp.defn.type_vars))
for member in tp.protocol_members:
typ = get_proper_type(find_member(member, instance, instance))
if not isinstance(typ, (Overloaded, CallableType)):
result.append(member)
return result
def is_callable_compatible(
left: CallableType,
right: CallableType,
*,
is_compat: Callable[[Type, Type], bool],
is_proper_subtype: bool,
is_compat_return: Callable[[Type, Type], bool] | None = None,
ignore_return: bool = False,
ignore_pos_arg_names: bool = False,
check_args_covariantly: bool = False,
allow_partial_overlap: bool = False,
strict_concatenate: bool = False,
) -> bool:
"""Is the left compatible with the right, using the provided compatibility check?
is_compat:
The check we want to run against the parameters.
is_compat_return:
The check we want to run against the return type.
If None, use the 'is_compat' check.
check_args_covariantly:
If true, check if the left's args is compatible with the right's
instead of the other way around (contravariantly).
This function is mostly used to check if the left is a subtype of the right which
is why the default is to check the args contravariantly. However, it's occasionally
useful to check the args using some other check, so we leave the variance
configurable.
For example, when checking the validity of overloads, it's useful to see if
the first overload alternative has more precise arguments than the second.
We would want to check the arguments covariantly in that case.
Note! The following two function calls are NOT equivalent:
is_callable_compatible(f, g, is_compat=is_subtype, check_args_covariantly=False)
is_callable_compatible(g, f, is_compat=is_subtype, check_args_covariantly=True)
The two calls are similar in that they both check the function arguments in
the same direction: they both run `is_subtype(argument_from_g, argument_from_f)`.
However, the two calls differ in which direction they check things like
keyword arguments. For example, suppose f and g are defined like so:
def f(x: int, *y: int) -> int: ...
def g(x: int) -> int: ...
In this case, the first call will succeed and the second will fail: f is a
valid stand-in for g but not vice-versa.
allow_partial_overlap:
By default this function returns True if and only if *all* calls to left are
also calls to right (with respect to the provided 'is_compat' function).
If this parameter is set to 'True', we return True if *there exists at least one*
call to left that's also a call to right.
In other words, we perform an existential check instead of a universal one;
we require left to only overlap with right instead of being a subset.
For example, suppose we set 'is_compat' to some subtype check and compare following:
f(x: float, y: str = "...", *args: bool) -> str
g(*args: int) -> str
This function would normally return 'False': f is not a subtype of g.
However, we would return True if this parameter is set to 'True': the two
calls are compatible if the user runs "f_or_g(3)". In the context of that
specific call, the two functions effectively have signatures of:
f2(float) -> str
g2(int) -> str
Here, f2 is a valid subtype of g2 so we return True.
Specifically, if this parameter is set this function will:
- Ignore optional arguments on either the left or right that have no
corresponding match.
- No longer mandate optional arguments on either side are also optional
on the other.
- No longer mandate that if right has a *arg or **kwarg that left must also
have the same.
Note: when this argument is set to True, this function becomes "symmetric" --
the following calls are equivalent:
is_callable_compatible(f, g,
is_compat=some_check,
check_args_covariantly=False,
allow_partial_overlap=True)
is_callable_compatible(g, f,
is_compat=some_check,
check_args_covariantly=True,
allow_partial_overlap=True)
If the 'some_check' function is also symmetric, the two calls would be equivalent
whether or not we check the args covariantly.
"""
# Normalize both types before comparing them.
left = left.with_unpacked_kwargs().with_normalized_var_args()
right = right.with_unpacked_kwargs().with_normalized_var_args()
if is_compat_return is None:
is_compat_return = is_compat
# If either function is implicitly typed, ignore positional arg names too
if left.implicit or right.implicit:
ignore_pos_arg_names = True
# Non-type cannot be a subtype of type.
if right.is_type_obj() and not left.is_type_obj() and not allow_partial_overlap:
return False
# A callable L is a subtype of a generic callable R if L is a
# subtype of every type obtained from R by substituting types for
# the variables of R. We can check this by simply leaving the
# generic variables of R as type variables, effectively varying
# over all possible values.
# It's okay even if these variables share ids with generic
# type variables of L, because generating and solving
# constraints for the variables of L to make L a subtype of R
# (below) treats type variables on the two sides as independent.
if left.variables:
# Apply generic type variables away in left via type inference.
unified = unify_generic_callable(left, right, ignore_return=ignore_return)
if unified is None:
return False
left = unified
# Check return types.
if not ignore_return and not is_compat_return(left.ret_type, right.ret_type):
return False
if check_args_covariantly:
is_compat = flip_compat_check(is_compat)
if not strict_concatenate and (left.from_concatenate or right.from_concatenate):
strict_concatenate_check = False
else:
strict_concatenate_check = True
return are_parameters_compatible(
left,
right,
is_compat=is_compat,
is_proper_subtype=is_proper_subtype,
ignore_pos_arg_names=ignore_pos_arg_names,
allow_partial_overlap=allow_partial_overlap,
strict_concatenate_check=strict_concatenate_check,
)
def are_trivial_parameters(param: Parameters | NormalizedCallableType) -> bool:
param_star = param.var_arg()
param_star2 = param.kw_arg()
return (
param.arg_kinds == [ARG_STAR, ARG_STAR2]
and param_star is not None
and isinstance(get_proper_type(param_star.typ), AnyType)
and param_star2 is not None
and isinstance(get_proper_type(param_star2.typ), AnyType)
)
def is_trivial_suffix(param: Parameters | NormalizedCallableType) -> bool:
param_star = param.var_arg()
param_star2 = param.kw_arg()
return (
param.arg_kinds[-2:] == [ARG_STAR, ARG_STAR2]
and param_star is not None
and isinstance(get_proper_type(param_star.typ), AnyType)
and param_star2 is not None
and isinstance(get_proper_type(param_star2.typ), AnyType)
)
def are_parameters_compatible(
left: Parameters | NormalizedCallableType,
right: Parameters | NormalizedCallableType,
*,
is_compat: Callable[[Type, Type], bool],
is_proper_subtype: bool,
ignore_pos_arg_names: bool = False,
allow_partial_overlap: bool = False,
strict_concatenate_check: bool = False,
) -> bool:
"""Helper function for is_callable_compatible, used for Parameter compatibility"""
if right.is_ellipsis_args and not is_proper_subtype:
return True
left_star = left.var_arg()
left_star2 = left.kw_arg()
right_star = right.var_arg()
right_star2 = right.kw_arg()
# Treat "def _(*a: Any, **kw: Any) -> X" similarly to "Callable[..., X]"
if are_trivial_parameters(right) and not is_proper_subtype:
return True
trivial_suffix = is_trivial_suffix(right) and not is_proper_subtype
if (
right.arg_kinds == [ARG_STAR]
and isinstance(get_proper_type(right.arg_types[0]), AnyType)
and not is_proper_subtype
):
# Similar to how (*Any, **Any) is considered a supertype of all callables, we consider
# (*Any) a supertype of all callables with positional arguments. This is needed in
# particular because we often refuse to try type inference if actual type is not
# a subtype of erased template type.
if all(k.is_positional() for k in left.arg_kinds) and ignore_pos_arg_names:
return True
# Match up corresponding arguments and check them for compatibility. In
# every pair (argL, argR) of corresponding arguments from L and R, argL must
# be "more general" than argR if L is to be a subtype of R.
# Arguments are corresponding if they either share a name, share a position,
# or both. If L's corresponding argument is ambiguous, L is not a subtype of R.
# If left has one corresponding argument by name and another by position,
# consider them to be one "merged" argument (and not ambiguous) if they're
# both optional, they're name-only and position-only respectively, and they
# have the same type. This rule allows functions with (*args, **kwargs) to
# properly stand in for the full domain of formal arguments that they're
# used for in practice.
# Every argument in R must have a corresponding argument in L, and every
# required argument in L must have a corresponding argument in R.
# Phase 1: Confirm every argument in R has a corresponding argument in L.
# Phase 1a: If left and right can both accept an infinite number of args,
# their types must be compatible.
#
# Furthermore, if we're checking for compatibility in all cases,
# we confirm that if R accepts an infinite number of arguments,
# L must accept the same.
def _incompatible(left_arg: FormalArgument | None, right_arg: FormalArgument | None) -> bool:
if right_arg is None:
return False
if left_arg is None:
return not allow_partial_overlap and not trivial_suffix
return not is_compat(right_arg.typ, left_arg.typ)
if _incompatible(left_star, right_star) or _incompatible(left_star2, right_star2):
return False
# Phase 1b: Check non-star args: for every arg right can accept, left must
# also accept. The only exception is if we are allowing partial
# overlaps: in that case, we ignore optional args on the right.
for right_arg in right.formal_arguments():
left_arg = mypy.typeops.callable_corresponding_argument(left, right_arg)
if left_arg is None:
if allow_partial_overlap and not right_arg.required:
continue
return False
if not are_args_compatible(
left_arg,
right_arg,
is_compat,
ignore_pos_arg_names=ignore_pos_arg_names,
allow_partial_overlap=allow_partial_overlap,
allow_imprecise_kinds=right.imprecise_arg_kinds,
):
return False
# Phase 1c: Check var args. Right has an infinite series of optional positional
# arguments. Get all further positional args of left, and make sure
# they're more general than the corresponding member in right.
# TODO: are we handling UnpackType correctly here?
if right_star is not None and not trivial_suffix:
# Synthesize an anonymous formal argument for the right
right_by_position = right.try_synthesizing_arg_from_vararg(None)
assert right_by_position is not None
i = right_star.pos
assert i is not None
while i < len(left.arg_kinds) and left.arg_kinds[i].is_positional():
if allow_partial_overlap and left.arg_kinds[i].is_optional():
break
left_by_position = left.argument_by_position(i)
assert left_by_position is not None
if not are_args_compatible(
left_by_position,
right_by_position,
is_compat,
ignore_pos_arg_names=ignore_pos_arg_names,
allow_partial_overlap=allow_partial_overlap,
):
return False
i += 1
# Phase 1d: Check kw args. Right has an infinite series of optional named
# arguments. Get all further named args of left, and make sure
# they're more general than the corresponding member in right.
if right_star2 is not None and not trivial_suffix:
right_names = {name for name in right.arg_names if name is not None}
left_only_names = set()
for name, kind in zip(left.arg_names, left.arg_kinds):
if (
name is None
or kind.is_star()
or name in right_names
or not strict_concatenate_check
):
continue
left_only_names.add(name)
# Synthesize an anonymous formal argument for the right
right_by_name = right.try_synthesizing_arg_from_kwarg(None)
assert right_by_name is not None
for name in left_only_names:
left_by_name = left.argument_by_name(name)
assert left_by_name is not None
if allow_partial_overlap and not left_by_name.required:
continue
if not are_args_compatible(
left_by_name,
right_by_name,
is_compat,
ignore_pos_arg_names=ignore_pos_arg_names,
allow_partial_overlap=allow_partial_overlap,
):
return False
# Phase 2: Left must not impose additional restrictions.
# (Every required argument in L must have a corresponding argument in R)
# Note: we already checked the *arg and **kwarg arguments in phase 1a.
for left_arg in left.formal_arguments():
right_by_name = (
right.argument_by_name(left_arg.name) if left_arg.name is not None else None
)
right_by_pos = (
right.argument_by_position(left_arg.pos) if left_arg.pos is not None else None
)
# If the left hand argument corresponds to two right-hand arguments,
# neither of them can be required.
if (
right_by_name is not None
and right_by_pos is not None
and right_by_name != right_by_pos
and (right_by_pos.required or right_by_name.required)
and strict_concatenate_check
and not right.imprecise_arg_kinds
):
return False
# All *required* left-hand arguments must have a corresponding
# right-hand argument. Optional args do not matter.
if left_arg.required and right_by_pos is None and right_by_name is None:
return False
return True
def are_args_compatible(
left: FormalArgument,
right: FormalArgument,
is_compat: Callable[[Type, Type], bool],
*,
ignore_pos_arg_names: bool,
allow_partial_overlap: bool,
allow_imprecise_kinds: bool = False,
) -> bool:
if left.required and right.required:
# If both arguments are required allow_partial_overlap has no effect.
allow_partial_overlap = False
def is_different(
left_item: object | None, right_item: object | None, allow_overlap: bool
) -> bool:
"""Checks if the left and right items are different.
If the right item is unspecified (e.g. if the right callable doesn't care
about what name or position its arg has), we default to returning False.
If we're allowing partial overlap, we also default to returning False
if the left callable also doesn't care."""
if right_item is None:
return False
if allow_overlap and left_item is None:
return False
return left_item != right_item
# If right has a specific name it wants this argument to be, left must
# have the same.
if is_different(left.name, right.name, allow_partial_overlap):
# But pay attention to whether we're ignoring positional arg names
if not ignore_pos_arg_names or right.pos is None:
return False
# If right is at a specific position, left must have the same.
# TODO: partial overlap logic is flawed for positions.
# We disable it to avoid false positives at a cost of few false negatives.
if is_different(left.pos, right.pos, allow_overlap=False) and not allow_imprecise_kinds:
return False
# If right's argument is optional, left's must also be
# (unless we're relaxing the checks to allow potential
# rather than definite compatibility).
if not allow_partial_overlap and not right.required and left.required:
return False
# If we're allowing partial overlaps and neither arg is required,
# the types don't actually need to be the same
if allow_partial_overlap and not left.required and not right.required:
return True
# Left must have a more general type
return is_compat(right.typ, left.typ)
def flip_compat_check(is_compat: Callable[[Type, Type], bool]) -> Callable[[Type, Type], bool]:
def new_is_compat(left: Type, right: Type) -> bool:
return is_compat(right, left)
return new_is_compat
def unify_generic_callable(
type: NormalizedCallableType,
target: NormalizedCallableType,
ignore_return: bool,
return_constraint_direction: int | None = None,
) -> NormalizedCallableType | None:
"""Try to unify a generic callable type with another callable type.
Return unified CallableType if successful; otherwise, return None.
"""
import mypy.solve
if set(type.type_var_ids()) & {v.id for v in mypy.typeops.get_all_type_vars(target)}:
# Overload overlap check does nasty things like unifying in opposite direction.
# This can easily create type variable clashes, so we need to refresh.
type = freshen_function_type_vars(type)
if return_constraint_direction is None:
return_constraint_direction = mypy.constraints.SUBTYPE_OF
constraints: list[mypy.constraints.Constraint] = []
# There is some special logic for inference in callables, so better use them
# as wholes instead of picking separate arguments.
cs = mypy.constraints.infer_constraints(
type.copy_modified(ret_type=UninhabitedType()),
target.copy_modified(ret_type=UninhabitedType()),
mypy.constraints.SUBTYPE_OF,
skip_neg_op=True,
)
constraints.extend(cs)
if not ignore_return:
c = mypy.constraints.infer_constraints(
type.ret_type, target.ret_type, return_constraint_direction
)
constraints.extend(c)
inferred_vars, _ = mypy.solve.solve_constraints(
type.variables, constraints, allow_polymorphic=True
)
if None in inferred_vars:
return None
non_none_inferred_vars = cast(List[Type], inferred_vars)
had_errors = False
def report(*args: Any) -> None:
nonlocal had_errors
had_errors = True
# This function may be called by the solver, so we need to allow erased types here.
# We anyway allow checking subtyping between other types containing <Erased>
# (probably also because solver needs subtyping). See also comment in
# ExpandTypeVisitor.visit_erased_type().
applied = mypy.applytype.apply_generic_arguments(
type, non_none_inferred_vars, report, context=target
)
if had_errors:
return None
return cast(NormalizedCallableType, applied)
def try_restrict_literal_union(t: UnionType, s: Type) -> list[Type] | None:
"""Return the items of t, excluding any occurrence of s, if and only if
- t only contains simple literals
- s is a simple literal
Otherwise, returns None
"""
ps = get_proper_type(s)
if not mypy.typeops.is_simple_literal(ps):
return None
new_items: list[Type] = []
for i in t.relevant_items():
pi = get_proper_type(i)
if not mypy.typeops.is_simple_literal(pi):
return None
if pi != ps:
new_items.append(i)
return new_items
def restrict_subtype_away(t: Type, s: Type) -> Type:
"""Return t minus s for runtime type assertions.
If we can't determine a precise result, return a supertype of the
ideal result (just t is a valid result).
This is used for type inference of runtime type checks such as
isinstance(). Currently, this just removes elements of a union type.
"""
p_t = get_proper_type(t)
if isinstance(p_t, UnionType):
new_items = try_restrict_literal_union(p_t, s)
if new_items is None:
new_items = [
restrict_subtype_away(item, s)
for item in p_t.relevant_items()
if (isinstance(get_proper_type(item), AnyType) or not covers_at_runtime(item, s))
]
return UnionType.make_union(new_items)
elif isinstance(p_t, TypeVarType):
return p_t.copy_modified(upper_bound=restrict_subtype_away(p_t.upper_bound, s))
elif covers_at_runtime(t, s):
return UninhabitedType()
else:
return t
def covers_at_runtime(item: Type, supertype: Type) -> bool:
"""Will isinstance(item, supertype) always return True at runtime?"""
item = get_proper_type(item)
supertype = get_proper_type(supertype)
# Since runtime type checks will ignore type arguments, erase the types.
supertype = erase_type(supertype)
if is_proper_subtype(
erase_type(item), supertype, ignore_promotions=True, erase_instances=True
):
return True
if isinstance(supertype, Instance):
if supertype.type.is_protocol:
# TODO: Implement more robust support for runtime isinstance() checks, see issue #3827.
if is_proper_subtype(item, supertype, ignore_promotions=True):
return True
if isinstance(item, TypedDictType):
# Special case useful for selecting TypedDicts from unions using isinstance(x, dict).
if supertype.type.fullname == "builtins.dict":
return True
elif isinstance(item, TypeVarType):
if is_proper_subtype(item.upper_bound, supertype, ignore_promotions=True):
return True
elif isinstance(item, Instance) and supertype.type.fullname == "builtins.int":
# "int" covers all native int types
if item.type.fullname in MYPYC_NATIVE_INT_NAMES:
return True
# TODO: Add more special cases.
return False
def is_more_precise(left: Type, right: Type, *, ignore_promotions: bool = False) -> bool:
"""Check if left is a more precise type than right.
A left is a proper subtype of right, left is also more precise than
right. Also, if right is Any, left is more precise than right, for
any left.
"""
# TODO Should List[int] be more precise than List[Any]?
right = get_proper_type(right)
if isinstance(right, AnyType):
return True
return is_proper_subtype(left, right, ignore_promotions=ignore_promotions)
def all_non_object_members(info: TypeInfo) -> set[str]:
members = set(info.names)
for base in info.mro[1:-1]:
members.update(base.names)
return members
def infer_variance(info: TypeInfo, i: int) -> bool:
"""Infer the variance of the ith type variable of a generic class.
Return True if successful. This can fail if some inferred types aren't ready.
"""
object_type = Instance(info.mro[-1], [])
for variance in COVARIANT, CONTRAVARIANT, INVARIANT:
tv = info.defn.type_vars[i]
assert isinstance(tv, TypeVarType)
if tv.variance != VARIANCE_NOT_READY:
continue
tv.variance = variance
co = True
contra = True
tvar = info.defn.type_vars[i]
self_type = fill_typevars(info)
for member in all_non_object_members(info):
# __mypy-replace is an implementation detail of the dataclass plugin
if member in ("__init__", "__new__", "__mypy-replace"):
continue
if isinstance(self_type, TupleType):
self_type = mypy.typeops.tuple_fallback(self_type)
flags = get_member_flags(member, self_type)
settable = IS_SETTABLE in flags
node = info[member].node
if isinstance(node, Var):
if node.type is None:
tv.variance = VARIANCE_NOT_READY
return False
if has_underscore_prefix(member):
# Special case to avoid false positives (and to pass conformance tests)
settable = False
typ = find_member(member, self_type, self_type)
if typ:
# It's okay for a method in a generic class with a contravariant type
# variable to return a generic instance of the class, if it doesn't involve
# variance (i.e. values of type variables are propagated). Our normal rules
# would disallow this. Replace such return types with 'Any' to allow this.
#
# This could probably be more lenient (e.g. allow self type be nested, don't
# require all type arguments to be identical to self_type), but this will
# hopefully cover the vast majority of such cases, including Self.
typ = erase_return_self_types(typ, self_type)
typ2 = expand_type(typ, {tvar.id: object_type})
if not is_subtype(typ, typ2):
co = False
if not is_subtype(typ2, typ):
contra = False
if settable:
co = False
# Infer variance from base classes, in case they have explicit variances
for base in info.bases:
base2 = expand_type(base, {tvar.id: object_type})
if not is_subtype(base, base2):
co = False
if not is_subtype(base2, base):
contra = False
if co:
v = COVARIANT
elif contra:
v = CONTRAVARIANT
else:
v = INVARIANT
if v == variance:
break
tv.variance = VARIANCE_NOT_READY
return True
def has_underscore_prefix(name: str) -> bool:
return name.startswith("_") and not (name.startswith("__") and name.endswith("__"))
def infer_class_variances(info: TypeInfo) -> bool:
if not info.defn.type_args:
return True
tvs = info.defn.type_vars
success = True
for i, tv in enumerate(tvs):
if isinstance(tv, TypeVarType) and tv.variance == VARIANCE_NOT_READY:
if not infer_variance(info, i):
success = False
return success
def erase_return_self_types(typ: Type, self_type: Instance) -> Type:
"""If a typ is function-like and returns self_type, replace return type with Any."""
proper_type = get_proper_type(typ)
if isinstance(proper_type, CallableType):
ret = get_proper_type(proper_type.ret_type)
if isinstance(ret, Instance) and ret == self_type:
return proper_type.copy_modified(ret_type=AnyType(TypeOfAny.implementation_artifact))
elif isinstance(proper_type, Overloaded):
return Overloaded(
[
cast(CallableType, erase_return_self_types(it, self_type))
for it in proper_type.items
]
)
return typ
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/subtypes.py
|
Python
|
NOASSERTION
| 92,670 |
"""Mechanisms for inferring function types based on callsites.
Currently works by collecting all argument types at callsites,
synthesizing a list of possible function types from that, trying them
all, and picking the one with the fewest errors that we think is the
"best".
Can return JSON that pyannotate can use to apply the annotations to code.
There are a bunch of TODOs here:
* Maybe want a way to surface the choices not selected??
* We can generate an exponential number of type suggestions, and probably want
a way to not always need to check them all.
* Our heuristics for what types to try are primitive and not yet
supported by real practice.
* More!
Other things:
* This is super brute force. Could we integrate with the typechecker
more to understand more about what is going on?
* Like something with tracking constraints/unification variables?
* No understanding of type variables at *all*
"""
from __future__ import annotations
import itertools
import json
import os
from contextlib import contextmanager
from typing import Callable, Iterator, NamedTuple, TypeVar, cast
from typing_extensions import TypedDict
from mypy.argmap import map_actuals_to_formals
from mypy.build import Graph, State
from mypy.checkexpr import has_any_type
from mypy.find_sources import InvalidSourceList, SourceFinder
from mypy.join import join_type_list
from mypy.meet import meet_type_list
from mypy.modulefinder import PYTHON_EXTENSIONS
from mypy.nodes import (
ARG_STAR,
ARG_STAR2,
ArgKind,
CallExpr,
Decorator,
Expression,
FuncDef,
MypyFile,
RefExpr,
ReturnStmt,
SymbolNode,
SymbolTable,
TypeInfo,
reverse_builtin_aliases,
)
from mypy.options import Options
from mypy.plugin import FunctionContext, MethodContext, Plugin
from mypy.server.update import FineGrainedBuildManager
from mypy.state import state
from mypy.traverser import TraverserVisitor
from mypy.typeops import make_simplified_union
from mypy.types import (
AnyType,
CallableType,
FunctionLike,
Instance,
NoneType,
ProperType,
TupleType,
Type,
TypeAliasType,
TypedDictType,
TypeOfAny,
TypeStrVisitor,
TypeTranslator,
TypeVarType,
UninhabitedType,
UnionType,
get_proper_type,
)
from mypy.types_utils import is_overlapping_none, remove_optional
from mypy.util import split_target
class PyAnnotateSignature(TypedDict):
return_type: str
arg_types: list[str]
class Callsite(NamedTuple):
path: str
line: int
arg_kinds: list[list[ArgKind]]
callee_arg_names: list[str | None]
arg_names: list[list[str | None]]
arg_types: list[list[Type]]
class SuggestionPlugin(Plugin):
"""Plugin that records all calls to a given target."""
def __init__(self, target: str) -> None:
if target.endswith((".__new__", ".__init__")):
target = target.rsplit(".", 1)[0]
self.target = target
# List of call sites found by dmypy suggest:
# (path, line, <arg kinds>, <arg names>, <arg types>)
self.mystery_hits: list[Callsite] = []
def get_function_hook(self, fullname: str) -> Callable[[FunctionContext], Type] | None:
if fullname == self.target:
return self.log
else:
return None
def get_method_hook(self, fullname: str) -> Callable[[MethodContext], Type] | None:
if fullname == self.target:
return self.log
else:
return None
def log(self, ctx: FunctionContext | MethodContext) -> Type:
self.mystery_hits.append(
Callsite(
ctx.api.path,
ctx.context.line,
ctx.arg_kinds,
ctx.callee_arg_names,
ctx.arg_names,
ctx.arg_types,
)
)
return ctx.default_return_type
# NOTE: We could make this a bunch faster by implementing a StatementVisitor that skips
# traversing into expressions
class ReturnFinder(TraverserVisitor):
"""Visitor for finding all types returned from a function."""
def __init__(self, typemap: dict[Expression, Type]) -> None:
self.typemap = typemap
self.return_types: list[Type] = []
def visit_return_stmt(self, o: ReturnStmt) -> None:
if o.expr is not None and o.expr in self.typemap:
self.return_types.append(self.typemap[o.expr])
def visit_func_def(self, o: FuncDef) -> None:
# Skip nested functions
pass
def get_return_types(typemap: dict[Expression, Type], func: FuncDef) -> list[Type]:
"""Find all the types returned by return statements in func."""
finder = ReturnFinder(typemap)
func.body.accept(finder)
return finder.return_types
class ArgUseFinder(TraverserVisitor):
"""Visitor for finding all the types of arguments that each arg is passed to.
This is extremely simple minded but might be effective anyways.
"""
def __init__(self, func: FuncDef, typemap: dict[Expression, Type]) -> None:
self.typemap = typemap
self.arg_types: dict[SymbolNode, list[Type]] = {arg.variable: [] for arg in func.arguments}
def visit_call_expr(self, o: CallExpr) -> None:
if not any(isinstance(e, RefExpr) and e.node in self.arg_types for e in o.args):
return
typ = get_proper_type(self.typemap.get(o.callee))
if not isinstance(typ, CallableType):
return
formal_to_actual = map_actuals_to_formals(
o.arg_kinds,
o.arg_names,
typ.arg_kinds,
typ.arg_names,
lambda n: AnyType(TypeOfAny.special_form),
)
for i, args in enumerate(formal_to_actual):
for arg_idx in args:
arg = o.args[arg_idx]
if isinstance(arg, RefExpr) and arg.node in self.arg_types:
self.arg_types[arg.node].append(typ.arg_types[i])
def get_arg_uses(typemap: dict[Expression, Type], func: FuncDef) -> list[list[Type]]:
"""Find all the types of arguments that each arg is passed to.
For example, given
def foo(x: int) -> None: ...
def bar(x: str) -> None: ...
def test(x, y):
foo(x)
bar(y)
this will return [[int], [str]].
"""
finder = ArgUseFinder(func, typemap)
func.body.accept(finder)
return [finder.arg_types[arg.variable] for arg in func.arguments]
class SuggestionFailure(Exception):
pass
def is_explicit_any(typ: AnyType) -> bool:
# Originally I wanted to count as explicit anything derived from an explicit any, but that
# seemed too strict in some testing.
# return (typ.type_of_any == TypeOfAny.explicit
# or (typ.source_any is not None and typ.source_any.type_of_any == TypeOfAny.explicit))
# Important question: what should we do with source_any stuff? Does that count?
# And actually should explicit anys count at all?? Maybe not!
return typ.type_of_any == TypeOfAny.explicit
def is_implicit_any(typ: Type) -> bool:
typ = get_proper_type(typ)
return isinstance(typ, AnyType) and not is_explicit_any(typ)
class SuggestionEngine:
"""Engine for finding call sites and suggesting signatures."""
def __init__(
self,
fgmanager: FineGrainedBuildManager,
*,
json: bool,
no_errors: bool = False,
no_any: bool = False,
flex_any: float | None = None,
use_fixme: str | None = None,
max_guesses: int | None = None,
) -> None:
self.fgmanager = fgmanager
self.manager = fgmanager.manager
self.plugin = self.manager.plugin
self.graph = fgmanager.graph
self.finder = SourceFinder(self.manager.fscache, self.manager.options)
self.give_json = json
self.no_errors = no_errors
self.flex_any = flex_any
if no_any:
self.flex_any = 1.0
self.max_guesses = max_guesses or 64
self.use_fixme = use_fixme
def suggest(self, function: str) -> str:
"""Suggest an inferred type for function."""
mod, func_name, node = self.find_node(function)
with self.restore_after(mod):
with self.with_export_types():
suggestion = self.get_suggestion(mod, node)
if self.give_json:
return self.json_suggestion(mod, func_name, node, suggestion)
else:
return self.format_signature(suggestion)
def suggest_callsites(self, function: str) -> str:
"""Find a list of call sites of function."""
mod, _, node = self.find_node(function)
with self.restore_after(mod):
callsites, _ = self.get_callsites(node)
return "\n".join(
dedup(
[
f"{path}:{line}: {self.format_args(arg_kinds, arg_names, arg_types)}"
for path, line, arg_kinds, _, arg_names, arg_types in callsites
]
)
)
@contextmanager
def restore_after(self, module: str) -> Iterator[None]:
"""Context manager that reloads a module after executing the body.
This should undo any damage done to the module state while mucking around.
"""
try:
yield
finally:
self.reload(self.graph[module])
@contextmanager
def with_export_types(self) -> Iterator[None]:
"""Context manager that enables the export_types flag in the body.
This causes type information to be exported into the manager's all_types variable.
"""
old = self.manager.options.export_types
self.manager.options.export_types = True
try:
yield
finally:
self.manager.options.export_types = old
def get_trivial_type(self, fdef: FuncDef) -> CallableType:
"""Generate a trivial callable type from a func def, with all Anys"""
# The Anys are marked as being from the suggestion engine
# since they need some special treatment (specifically,
# constraint generation ignores them.)
return CallableType(
[AnyType(TypeOfAny.suggestion_engine) for _ in fdef.arg_kinds],
fdef.arg_kinds,
fdef.arg_names,
AnyType(TypeOfAny.suggestion_engine),
self.named_type("builtins.function"),
)
def get_starting_type(self, fdef: FuncDef) -> CallableType:
if isinstance(fdef.type, CallableType):
return make_suggestion_anys(fdef.type)
else:
return self.get_trivial_type(fdef)
def get_args(
self,
is_method: bool,
base: CallableType,
defaults: list[Type | None],
callsites: list[Callsite],
uses: list[list[Type]],
) -> list[list[Type]]:
"""Produce a list of type suggestions for each argument type."""
types: list[list[Type]] = []
for i in range(len(base.arg_kinds)):
# Make self args Any but this will get overridden somewhere in the checker
if i == 0 and is_method:
types.append([AnyType(TypeOfAny.suggestion_engine)])
continue
all_arg_types = []
for call in callsites:
for typ in call.arg_types[i - is_method]:
# Collect all the types except for implicit anys
if not is_implicit_any(typ):
all_arg_types.append(typ)
all_use_types = []
for typ in uses[i]:
# Collect all the types except for implicit anys
if not is_implicit_any(typ):
all_use_types.append(typ)
# Add in any default argument types
default = defaults[i]
if default:
all_arg_types.append(default)
if all_use_types:
all_use_types.append(default)
arg_types = []
if all_arg_types and all(
isinstance(get_proper_type(tp), NoneType) for tp in all_arg_types
):
arg_types.append(
UnionType.make_union([all_arg_types[0], AnyType(TypeOfAny.explicit)])
)
elif all_arg_types:
arg_types.extend(generate_type_combinations(all_arg_types))
else:
arg_types.append(AnyType(TypeOfAny.explicit))
if all_use_types:
# This is a meet because the type needs to be compatible with all the uses
arg_types.append(meet_type_list(all_use_types))
types.append(arg_types)
return types
def get_default_arg_types(self, fdef: FuncDef) -> list[Type | None]:
return [
self.manager.all_types[arg.initializer] if arg.initializer else None
for arg in fdef.arguments
]
def get_guesses(
self,
is_method: bool,
base: CallableType,
defaults: list[Type | None],
callsites: list[Callsite],
uses: list[list[Type]],
) -> list[CallableType]:
"""Compute a list of guesses for a function's type.
This focuses just on the argument types, and doesn't change the provided return type.
"""
options = self.get_args(is_method, base, defaults, callsites, uses)
# Take the first `max_guesses` guesses.
product = itertools.islice(itertools.product(*options), 0, self.max_guesses)
return [refine_callable(base, base.copy_modified(arg_types=list(x))) for x in product]
def get_callsites(self, func: FuncDef) -> tuple[list[Callsite], list[str]]:
"""Find all call sites of a function."""
new_type = self.get_starting_type(func)
collector_plugin = SuggestionPlugin(func.fullname)
self.plugin._plugins.insert(0, collector_plugin)
try:
errors = self.try_type(func, new_type)
finally:
self.plugin._plugins.pop(0)
return collector_plugin.mystery_hits, errors
def filter_options(
self, guesses: list[CallableType], is_method: bool, ignore_return: bool
) -> list[CallableType]:
"""Apply any configured filters to the possible guesses.
Currently the only option is filtering based on Any prevalance."""
return [
t
for t in guesses
if self.flex_any is None
or any_score_callable(t, is_method, ignore_return) >= self.flex_any
]
def find_best(self, func: FuncDef, guesses: list[CallableType]) -> tuple[CallableType, int]:
"""From a list of possible function types, find the best one.
For best, we want the fewest errors, then the best "score" from score_callable.
"""
if not guesses:
raise SuggestionFailure("No guesses that match criteria!")
errors = {guess: self.try_type(func, guess) for guess in guesses}
best = min(guesses, key=lambda s: (count_errors(errors[s]), self.score_callable(s)))
return best, count_errors(errors[best])
def get_guesses_from_parent(self, node: FuncDef) -> list[CallableType]:
"""Try to get a guess of a method type from a parent class."""
if not node.info:
return []
for parent in node.info.mro[1:]:
pnode = parent.names.get(node.name)
if pnode and isinstance(pnode.node, (FuncDef, Decorator)):
typ = get_proper_type(pnode.node.type)
# FIXME: Doesn't work right with generic tyeps
if isinstance(typ, CallableType) and len(typ.arg_types) == len(node.arguments):
# Return the first thing we find, since it probably doesn't make sense
# to grab things further up in the chain if an earlier parent has it.
return [typ]
return []
def get_suggestion(self, mod: str, node: FuncDef) -> PyAnnotateSignature:
"""Compute a suggestion for a function.
Return the type and whether the first argument should be ignored.
"""
graph = self.graph
callsites, orig_errors = self.get_callsites(node)
uses = get_arg_uses(self.manager.all_types, node)
if self.no_errors and orig_errors:
raise SuggestionFailure("Function does not typecheck.")
is_method = bool(node.info) and not node.is_static
with state.strict_optional_set(graph[mod].options.strict_optional):
guesses = self.get_guesses(
is_method,
self.get_starting_type(node),
self.get_default_arg_types(node),
callsites,
uses,
)
guesses += self.get_guesses_from_parent(node)
guesses = self.filter_options(guesses, is_method, ignore_return=True)
best, _ = self.find_best(node, guesses)
# Now try to find the return type!
self.try_type(node, best)
returns = get_return_types(self.manager.all_types, node)
with state.strict_optional_set(graph[mod].options.strict_optional):
if returns:
ret_types = generate_type_combinations(returns)
else:
ret_types = [NoneType()]
guesses = [best.copy_modified(ret_type=refine_type(best.ret_type, t)) for t in ret_types]
guesses = self.filter_options(guesses, is_method, ignore_return=False)
best, errors = self.find_best(node, guesses)
if self.no_errors and errors:
raise SuggestionFailure("No annotation without errors")
return self.pyannotate_signature(mod, is_method, best)
def format_args(
self,
arg_kinds: list[list[ArgKind]],
arg_names: list[list[str | None]],
arg_types: list[list[Type]],
) -> str:
args: list[str] = []
for i in range(len(arg_types)):
for kind, name, typ in zip(arg_kinds[i], arg_names[i], arg_types[i]):
arg = self.format_type(None, typ)
if kind == ARG_STAR:
arg = "*" + arg
elif kind == ARG_STAR2:
arg = "**" + arg
elif kind.is_named():
if name:
arg = f"{name}={arg}"
args.append(arg)
return f"({', '.join(args)})"
def find_node(self, key: str) -> tuple[str, str, FuncDef]:
"""From a target name, return module/target names and the func def.
The 'key' argument can be in one of two formats:
* As the function full name, e.g., package.module.Cls.method
* As the function location as file and line separated by column,
e.g., path/to/file.py:42
"""
# TODO: Also return OverloadedFuncDef -- currently these are ignored.
node: SymbolNode | None = None
if ":" in key:
if key.count(":") > 1:
raise SuggestionFailure(
"Malformed location for function: {}. Must be either"
" package.module.Class.method or path/to/file.py:line".format(key)
)
file, line = key.split(":")
if not line.isdigit():
raise SuggestionFailure(f"Line number must be a number. Got {line}")
line_number = int(line)
modname, node = self.find_node_by_file_and_line(file, line_number)
tail = node.fullname[len(modname) + 1 :] # add one to account for '.'
else:
target = split_target(self.fgmanager.graph, key)
if not target:
raise SuggestionFailure(f"Cannot find module for {key}")
modname, tail = target
node = self.find_node_by_module_and_name(modname, tail)
if isinstance(node, Decorator):
node = self.extract_from_decorator(node)
if not node:
raise SuggestionFailure(f"Object {key} is a decorator we can't handle")
if not isinstance(node, FuncDef):
raise SuggestionFailure(f"Object {key} is not a function")
return modname, tail, node
def find_node_by_module_and_name(self, modname: str, tail: str) -> SymbolNode | None:
"""Find symbol node by module id and qualified name.
Raise SuggestionFailure if can't find one.
"""
tree = self.ensure_loaded(self.fgmanager.graph[modname])
# N.B. This is reimplemented from update's lookup_target
# basically just to produce better error messages.
names: SymbolTable = tree.names
# Look through any classes
components = tail.split(".")
for i, component in enumerate(components[:-1]):
if component not in names:
raise SuggestionFailure(
"Unknown class {}.{}".format(modname, ".".join(components[: i + 1]))
)
node: SymbolNode | None = names[component].node
if not isinstance(node, TypeInfo):
raise SuggestionFailure(
"Object {}.{} is not a class".format(modname, ".".join(components[: i + 1]))
)
names = node.names
# Look for the actual function/method
funcname = components[-1]
if funcname not in names:
key = modname + "." + tail
raise SuggestionFailure(
"Unknown {} {}".format("method" if len(components) > 1 else "function", key)
)
return names[funcname].node
def find_node_by_file_and_line(self, file: str, line: int) -> tuple[str, SymbolNode]:
"""Find symbol node by path to file and line number.
Find the first function declared *before or on* the line number.
Return module id and the node found. Raise SuggestionFailure if can't find one.
"""
if not any(file.endswith(ext) for ext in PYTHON_EXTENSIONS):
raise SuggestionFailure("Source file is not a Python file")
try:
modname, _ = self.finder.crawl_up(os.path.normpath(file))
except InvalidSourceList as e:
raise SuggestionFailure("Invalid source file name: " + file) from e
if modname not in self.graph:
raise SuggestionFailure("Unknown module: " + modname)
# We must be sure about any edits in this file as this might affect the line numbers.
tree = self.ensure_loaded(self.fgmanager.graph[modname], force=True)
node: SymbolNode | None = None
closest_line: int | None = None
# TODO: Handle nested functions.
for _, sym, _ in tree.local_definitions():
if isinstance(sym.node, (FuncDef, Decorator)):
sym_line = sym.node.line
# TODO: add support for OverloadedFuncDef.
else:
continue
# We want the closest function above the specified line
if sym_line <= line and (closest_line is None or sym_line > closest_line):
closest_line = sym_line
node = sym.node
if not node:
raise SuggestionFailure(f"Cannot find a function at line {line}")
return modname, node
def extract_from_decorator(self, node: Decorator) -> FuncDef | None:
for dec in node.decorators:
typ = None
if isinstance(dec, RefExpr) and isinstance(dec.node, FuncDef):
typ = dec.node.type
elif (
isinstance(dec, CallExpr)
and isinstance(dec.callee, RefExpr)
and isinstance(dec.callee.node, FuncDef)
and isinstance(dec.callee.node.type, CallableType)
):
typ = get_proper_type(dec.callee.node.type.ret_type)
if not isinstance(typ, FunctionLike):
return None
for ct in typ.items:
if not (
len(ct.arg_types) == 1
and isinstance(ct.arg_types[0], TypeVarType)
and ct.arg_types[0] == ct.ret_type
):
return None
return node.func
def try_type(self, func: FuncDef, typ: ProperType) -> list[str]:
"""Recheck a function while assuming it has type typ.
Return all error messages.
"""
old = func.unanalyzed_type
# During reprocessing, unanalyzed_type gets copied to type (by aststrip).
# We set type to None to ensure that the type always changes during
# reprocessing.
func.type = None
func.unanalyzed_type = typ
try:
res = self.fgmanager.trigger(func.fullname)
# if res:
# print('===', typ)
# print('\n'.join(res))
return res
finally:
func.unanalyzed_type = old
def reload(self, state: State) -> list[str]:
"""Recheck the module given by state."""
assert state.path is not None
self.fgmanager.flush_cache()
return self.fgmanager.update([(state.id, state.path)], [])
def ensure_loaded(self, state: State, force: bool = False) -> MypyFile:
"""Make sure that the module represented by state is fully loaded."""
if not state.tree or state.tree.is_cache_skeleton or force:
self.reload(state)
assert state.tree is not None
return state.tree
def named_type(self, s: str) -> Instance:
return self.manager.semantic_analyzer.named_type(s)
def json_suggestion(
self, mod: str, func_name: str, node: FuncDef, suggestion: PyAnnotateSignature
) -> str:
"""Produce a json blob for a suggestion suitable for application by pyannotate."""
# pyannotate irritatingly drops class names for class and static methods
if node.is_class or node.is_static:
func_name = func_name.split(".", 1)[-1]
# pyannotate works with either paths relative to where the
# module is rooted or with absolute paths. We produce absolute
# paths because it is simpler.
path = os.path.abspath(self.graph[mod].xpath)
obj = {
"signature": suggestion,
"line": node.line,
"path": path,
"func_name": func_name,
"samples": 0,
}
return json.dumps([obj], sort_keys=True)
def pyannotate_signature(
self, cur_module: str | None, is_method: bool, typ: CallableType
) -> PyAnnotateSignature:
"""Format a callable type as a pyannotate dict"""
start = int(is_method)
return {
"arg_types": [self.format_type(cur_module, t) for t in typ.arg_types[start:]],
"return_type": self.format_type(cur_module, typ.ret_type),
}
def format_signature(self, sig: PyAnnotateSignature) -> str:
"""Format a callable type in a way suitable as an annotation... kind of"""
return f"({', '.join(sig['arg_types'])}) -> {sig['return_type']}"
def format_type(self, cur_module: str | None, typ: Type) -> str:
if self.use_fixme and isinstance(get_proper_type(typ), AnyType):
return self.use_fixme
return typ.accept(TypeFormatter(cur_module, self.graph, self.manager.options))
def score_type(self, t: Type, arg_pos: bool) -> int:
"""Generate a score for a type that we use to pick which type to use.
Lower is better, prefer non-union/non-any types. Don't penalize optionals.
"""
t = get_proper_type(t)
if isinstance(t, AnyType):
return 20
if arg_pos and isinstance(t, NoneType):
return 20
if isinstance(t, UnionType):
if any(isinstance(get_proper_type(x), AnyType) for x in t.items):
return 20
if any(has_any_type(x) for x in t.items):
return 15
if not is_overlapping_none(t):
return 10
if isinstance(t, CallableType) and (has_any_type(t) or is_tricky_callable(t)):
return 10
return 0
def score_callable(self, t: CallableType) -> int:
return sum(self.score_type(x, arg_pos=True) for x in t.arg_types) + self.score_type(
t.ret_type, arg_pos=False
)
def any_score_type(ut: Type, arg_pos: bool) -> float:
"""Generate a very made up number representing the Anyness of a type.
Higher is better, 1.0 is max
"""
t = get_proper_type(ut)
if isinstance(t, AnyType) and t.type_of_any != TypeOfAny.suggestion_engine:
return 0
if isinstance(t, NoneType) and arg_pos:
return 0.5
if isinstance(t, UnionType):
if any(isinstance(get_proper_type(x), AnyType) for x in t.items):
return 0.5
if any(has_any_type(x) for x in t.items):
return 0.25
if isinstance(t, CallableType) and is_tricky_callable(t):
return 0.5
if has_any_type(t):
return 0.5
return 1.0
def any_score_callable(t: CallableType, is_method: bool, ignore_return: bool) -> float:
# Ignore the first argument of methods
scores = [any_score_type(x, arg_pos=True) for x in t.arg_types[int(is_method) :]]
# Return type counts twice (since it spreads type information), unless it is
# None in which case it does not count at all. (Though it *does* still count
# if there are no arguments.)
if not isinstance(get_proper_type(t.ret_type), NoneType) or not scores:
ret = 1.0 if ignore_return else any_score_type(t.ret_type, arg_pos=False)
scores += [ret, ret]
return sum(scores) / len(scores)
def is_tricky_callable(t: CallableType) -> bool:
"""Is t a callable that we need to put a ... in for syntax reasons?"""
return t.is_ellipsis_args or any(k.is_star() or k.is_named() for k in t.arg_kinds)
class TypeFormatter(TypeStrVisitor):
"""Visitor used to format types"""
# TODO: Probably a lot
def __init__(self, module: str | None, graph: Graph, options: Options) -> None:
super().__init__(options=options)
self.module = module
self.graph = graph
def visit_any(self, t: AnyType) -> str:
if t.missing_import_name:
return t.missing_import_name
else:
return "Any"
def visit_instance(self, t: Instance) -> str:
s = t.type.fullname or t.type.name or None
if s is None:
return "<???>"
if s in reverse_builtin_aliases:
s = reverse_builtin_aliases[s]
mod_obj = split_target(self.graph, s)
assert mod_obj
mod, obj = mod_obj
# If a class is imported into the current module, rewrite the reference
# to point to the current module. This helps the annotation tool avoid
# inserting redundant imports when a type has been reexported.
if self.module:
parts = obj.split(".") # need to split the object part if it is a nested class
tree = self.graph[self.module].tree
if tree and parts[0] in tree.names:
mod = self.module
if (mod, obj) == ("builtins", "tuple"):
mod, obj = "typing", "Tuple[" + t.args[0].accept(self) + ", ...]"
elif t.args:
obj += f"[{self.list_str(t.args)}]"
if mod_obj == ("builtins", "unicode"):
return "Text"
elif mod == "builtins":
return obj
else:
delim = "." if "." not in obj else ":"
return mod + delim + obj
def visit_tuple_type(self, t: TupleType) -> str:
if t.partial_fallback and t.partial_fallback.type:
fallback_name = t.partial_fallback.type.fullname
if fallback_name != "builtins.tuple":
return t.partial_fallback.accept(self)
s = self.list_str(t.items)
return f"Tuple[{s}]"
def visit_uninhabited_type(self, t: UninhabitedType) -> str:
return "Any"
def visit_typeddict_type(self, t: TypedDictType) -> str:
return t.fallback.accept(self)
def visit_union_type(self, t: UnionType) -> str:
if len(t.items) == 2 and is_overlapping_none(t):
return f"Optional[{remove_optional(t).accept(self)}]"
else:
return super().visit_union_type(t)
def visit_callable_type(self, t: CallableType) -> str:
# TODO: use extended callables?
if is_tricky_callable(t):
arg_str = "..."
else:
# Note: for default arguments, we just assume that they
# are required. This isn't right, but neither is the
# other thing, and I suspect this will produce more better
# results than falling back to `...`
args = [typ.accept(self) for typ in t.arg_types]
arg_str = f"[{', '.join(args)}]"
return f"Callable[{arg_str}, {t.ret_type.accept(self)}]"
TType = TypeVar("TType", bound=Type)
def make_suggestion_anys(t: TType) -> TType:
"""Make all anys in the type as coming from the suggestion engine.
This keeps those Anys from influencing constraint generation,
which allows us to do better when refining types.
"""
return cast(TType, t.accept(MakeSuggestionAny()))
class MakeSuggestionAny(TypeTranslator):
def visit_any(self, t: AnyType) -> Type:
if not t.missing_import_name:
return t.copy_modified(type_of_any=TypeOfAny.suggestion_engine)
else:
return t
def visit_type_alias_type(self, t: TypeAliasType) -> Type:
return t.copy_modified(args=[a.accept(self) for a in t.args])
def generate_type_combinations(types: list[Type]) -> list[Type]:
"""Generate possible combinations of a list of types.
mypy essentially supports two different ways to do this: joining the types
and unioning the types. We try both.
"""
joined_type = join_type_list(types)
union_type = make_simplified_union(types)
if joined_type == union_type:
return [joined_type]
else:
return [joined_type, union_type]
def count_errors(msgs: list[str]) -> int:
return len([x for x in msgs if " error: " in x])
def refine_type(ti: Type, si: Type) -> Type:
"""Refine `ti` by replacing Anys in it with information taken from `si`
This basically works by, when the types have the same structure,
traversing both of them in parallel and replacing Any on the left
with whatever the type on the right is. If the types don't have the
same structure (or aren't supported), the left type is chosen.
For example:
refine(Any, T) = T, for all T
refine(float, int) = float
refine(List[Any], List[int]) = List[int]
refine(Dict[int, Any], Dict[Any, int]) = Dict[int, int]
refine(Tuple[int, Any], Tuple[Any, int]) = Tuple[int, int]
refine(Callable[[Any], Any], Callable[[int], int]) = Callable[[int], int]
refine(Callable[..., int], Callable[[int, float], Any]) = Callable[[int, float], int]
refine(Optional[Any], int) = Optional[int]
refine(Optional[Any], Optional[int]) = Optional[int]
refine(Optional[Any], Union[int, str]) = Optional[Union[int, str]]
refine(Optional[List[Any]], List[int]) = List[int]
"""
t = get_proper_type(ti)
s = get_proper_type(si)
if isinstance(t, AnyType):
# If s is also an Any, we return if it is a missing_import Any
return t if isinstance(s, AnyType) and t.missing_import_name else s
if isinstance(t, Instance) and isinstance(s, Instance) and t.type == s.type:
return t.copy_modified(args=[refine_type(ta, sa) for ta, sa in zip(t.args, s.args)])
if (
isinstance(t, TupleType)
and isinstance(s, TupleType)
and t.partial_fallback == s.partial_fallback
and len(t.items) == len(s.items)
):
return t.copy_modified(items=[refine_type(ta, sa) for ta, sa in zip(t.items, s.items)])
if isinstance(t, CallableType) and isinstance(s, CallableType):
return refine_callable(t, s)
if isinstance(t, UnionType):
return refine_union(t, s)
# TODO: Refining of builtins.tuple, Type?
return t
def refine_union(t: UnionType, s: ProperType) -> Type:
"""Refine a union type based on another type.
This is done by refining every component of the union against the
right hand side type (or every component of its union if it is
one). If an element of the union is successfully refined, we drop it
from the union in favor of the refined versions.
"""
# Don't try to do any union refining if the types are already the
# same. This prevents things like refining Optional[Any] against
# itself and producing None.
if t == s:
return t
rhs_items = s.items if isinstance(s, UnionType) else [s]
new_items = []
for lhs in t.items:
refined = False
for rhs in rhs_items:
new = refine_type(lhs, rhs)
if new != lhs:
new_items.append(new)
refined = True
if not refined:
new_items.append(lhs)
# Turn strict optional on when simplifying the union since we
# don't want to drop Nones.
with state.strict_optional_set(True):
return make_simplified_union(new_items)
def refine_callable(t: CallableType, s: CallableType) -> CallableType:
"""Refine a callable based on another.
See comments for refine_type.
"""
if t.fallback != s.fallback:
return t
if t.is_ellipsis_args and not is_tricky_callable(s):
return s.copy_modified(ret_type=refine_type(t.ret_type, s.ret_type))
if is_tricky_callable(t) or t.arg_kinds != s.arg_kinds:
return t
return t.copy_modified(
arg_types=[refine_type(ta, sa) for ta, sa in zip(t.arg_types, s.arg_types)],
ret_type=refine_type(t.ret_type, s.ret_type),
)
T = TypeVar("T")
def dedup(old: list[T]) -> list[T]:
new: list[T] = []
for x in old:
if x not in new:
new.append(x)
return new
|
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/suggestions.py
|
Python
|
NOASSERTION
| 38,070 |
algorandfoundation/puya
|
src/puyapy/_vendor/mypy/test/__init__.py
|
Python
|
NOASSERTION
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.